aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorSage Weil <sage@inktank.com>2012-06-15 15:32:04 -0400
committerSage Weil <sage@inktank.com>2012-06-15 15:32:04 -0400
commit9a64e8e0ace51b309fdcff4b4754b3649250382a (patch)
tree1f0d75c196c5ab0408c55ed6cf3a152f1f921e15 /net
parentf3dea7edd3d449fe7a6d402c1ce56a294b985261 (diff)
parentf8f5701bdaf9134b1f90e5044a82c66324d2073f (diff)
Merge tag 'v3.5-rc1'
Linux 3.5-rc1 Conflicts: net/ceph/messenger.c
Diffstat (limited to 'net')
-rw-r--r--net/802/Makefile1
-rw-r--r--net/802/fc.c3
-rw-r--r--net/802/fddi.c3
-rw-r--r--net/802/garp.c30
-rw-r--r--net/802/hippi.c3
-rw-r--r--net/802/p8022.c3
-rw-r--r--net/802/stp.c2
-rw-r--r--net/802/tr.c677
-rw-r--r--net/8021q/vlan.c10
-rw-r--r--net/8021q/vlan_core.c3
-rw-r--r--net/8021q/vlan_dev.c12
-rw-r--r--net/8021q/vlan_netlink.c16
-rw-r--r--net/9p/client.c32
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/Kconfig11
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/appletalk/sysctl_net_atalk.c10
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/clip.c18
-rw-r--r--net/atm/ioctl.c8
-rw-r--r--net/atm/lec.c144
-rw-r--r--net/atm/lec.h5
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/atm/mpoa_proc.c2
-rw-r--r--net/atm/pppoatm.c97
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/ax25/af_ax25.c10
-rw-r--r--net/ax25/ax25_addr.c1
-rw-r--r--net/ax25/ax25_dev.c11
-rw-r--r--net/ax25/ax25_ds_in.c1
-rw-r--r--net/ax25/ax25_ds_subr.c1
-rw-r--r--net/ax25/ax25_ds_timer.c1
-rw-r--r--net/ax25/ax25_iface.c1
-rw-r--r--net/ax25/ax25_in.c1
-rw-r--r--net/ax25/ax25_ip.c5
-rw-r--r--net/ax25/ax25_out.c1
-rw-r--r--net/ax25/ax25_route.c1
-rw-r--r--net/ax25/ax25_std_in.c1
-rw-r--r--net/ax25/ax25_std_subr.c1
-rw-r--r--net/ax25/ax25_std_timer.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/ax25/ax25_timer.c1
-rw-r--r--net/ax25/ax25_uid.c1
-rw-r--r--net/ax25/sysctl_net_ax25.c82
-rw-r--r--net/batman-adv/Kconfig27
-rw-r--r--net/batman-adv/Makefile3
-rw-r--r--net/batman-adv/bat_algo.h (renamed from net/batman-adv/bat_ogm.h)20
-rw-r--r--net/batman-adv/bat_debugfs.c47
-rw-r--r--net/batman-adv/bat_debugfs.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c511
-rw-r--r--net/batman-adv/bat_sysfs.c141
-rw-r--r--net/batman-adv/bat_sysfs.h2
-rw-r--r--net/batman-adv/bitarray.c128
-rw-r--r--net/batman-adv/bitarray.h28
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c1580
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h98
-rw-r--r--net/batman-adv/gateway_client.c43
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c14
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/hard-interface.c206
-rw-r--r--net/batman-adv/hard-interface.h2
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h2
-rw-r--r--net/batman-adv/icmp_socket.c24
-rw-r--r--net/batman-adv/icmp_socket.h2
-rw-r--r--net/batman-adv/main.c241
-rw-r--r--net/batman-adv/main.h62
-rw-r--r--net/batman-adv/originator.c82
-rw-r--r--net/batman-adv/originator.h9
-rw-r--r--net/batman-adv/packet.h90
-rw-r--r--net/batman-adv/ring_buffer.c2
-rw-r--r--net/batman-adv/ring_buffer.h2
-rw-r--r--net/batman-adv/routing.c143
-rw-r--r--net/batman-adv/routing.h7
-rw-r--r--net/batman-adv/send.c31
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c518
-rw-r--r--net/batman-adv/soft-interface.h4
-rw-r--r--net/batman-adv/translation-table.c665
-rw-r--r--net/batman-adv/translation-table.h14
-rw-r--r--net/batman-adv/types.h107
-rw-r--r--net/batman-adv/unicast.c30
-rw-r--r--net/batman-adv/unicast.h2
-rw-r--r--net/batman-adv/vis.c27
-rw-r--r--net/batman-adv/vis.h5
-rw-r--r--net/bluetooth/Kconfig1
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/bnep/sock.c7
-rw-r--r--net/bluetooth/cmtp/sock.c7
-rw-r--r--net/bluetooth/hci_conn.c130
-rw-r--r--net/bluetooth/hci_core.c855
-rw-r--r--net/bluetooth/hci_event.c701
-rw-r--r--net/bluetooth/hci_sock.c473
-rw-r--r--net/bluetooth/hci_sysfs.c58
-rw-r--r--net/bluetooth/hidp/core.c27
-rw-r--r--net/bluetooth/hidp/sock.c6
-rw-r--r--net/bluetooth/l2cap_core.c1342
-rw-r--r--net/bluetooth/l2cap_sock.c133
-rw-r--r--net/bluetooth/lib.c27
-rw-r--r--net/bluetooth/mgmt.c2768
-rw-r--r--net/bluetooth/rfcomm/sock.c15
-rw-r--r--net/bluetooth/rfcomm/tty.c140
-rw-r--r--net/bluetooth/sco.c76
-rw-r--r--net/bluetooth/smp.c110
-rw-r--r--net/bridge/br_device.c10
-rw-r--r--net/bridge/br_fdb.c150
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c158
-rw-r--r--net/bridge/br_netfilter.c46
-rw-r--r--net/bridge/br_netlink.c39
-rw-r--r--net/bridge/br_private.h21
-rw-r--r--net/bridge/br_private_stp.h7
-rw-r--r--net/bridge/br_stp.c4
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c11
-rw-r--r--net/bridge/br_stp_timer.c6
-rw-r--r--net/bridge/br_sysfs_br.c20
-rw-r--r--net/bridge/netfilter/ebt_stp.c4
-rw-r--r--net/caif/Kconfig2
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_socket.c141
-rw-r--r--net/caif/cfctrl.c4
-rw-r--r--net/caif/cfdbgl.c4
-rw-r--r--net/caif/cfdgml.c9
-rw-r--r--net/caif/cfpkt_skbuff.c7
-rw-r--r--net/caif/cfrfml.c25
-rw-r--r--net/caif/cfsrvl.c9
-rw-r--r--net/caif/cfutill.c5
-rw-r--r--net/caif/cfvidl.c6
-rw-r--r--net/caif/chnl_net.c47
-rw-r--r--net/can/gw.c2
-rw-r--r--net/ceph/auth_x.h6
-rw-r--r--net/ceph/ceph_common.c4
-rw-r--r--net/ceph/ceph_hash.c6
-rw-r--r--net/ceph/crush/mapper.c9
-rw-r--r--net/ceph/debugfs.c6
-rw-r--r--net/ceph/messenger.c16
-rw-r--r--net/ceph/mon_client.c10
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/ceph/osdmap.c14
-rw-r--r--net/compat.c85
-rw-r--r--net/core/datagram.c33
-rw-r--r--net/core/dev.c296
-rw-r--r--net/core/dev_addr_lists.c100
-rw-r--r--net/core/drop_monitor.c146
-rw-r--r--net/core/ethtool.c132
-rw-r--r--net/core/fib_rules.c32
-rw-r--r--net/core/filter.c88
-rw-r--r--net/core/gen_estimator.c1
-rw-r--r--net/core/gen_stats.c3
-rw-r--r--net/core/iovec.c2
-rw-r--r--net/core/kmap_skb.h19
-rw-r--r--net/core/neighbour.c216
-rw-r--r--net/core/net-sysfs.c17
-rw-r--r--net/core/net_namespace.c39
-rw-r--r--net/core/netpoll.c71
-rw-r--r--net/core/netprio_cgroup.c42
-rw-r--r--net/core/pktgen.c78
-rw-r--r--net/core/rtnetlink.c371
-rw-r--r--net/core/scm.c1
-rw-r--r--net/core/skbuff.c461
-rw-r--r--net/core/sock.c140
-rw-r--r--net/core/sock_diag.c12
-rw-r--r--net/core/sysctl_net_core.c19
-rw-r--r--net/core/utils.c10
-rw-r--r--net/dcb/dcbnl.c94
-rw-r--r--net/dccp/ccids/ccid3.c15
-rw-r--r--net/dccp/dccp.h8
-rw-r--r--net/dccp/input.c10
-rw-r--r--net/dccp/ipv4.c14
-rw-r--r--net/dccp/ipv6.c11
-rw-r--r--net/dccp/minisocks.c18
-rw-r--r--net/dccp/output.c10
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/dccp/sysctl.c11
-rw-r--r--net/decnet/af_decnet.c7
-rw-r--r--net/decnet/dn_dev.c36
-rw-r--r--net/decnet/dn_fib.c10
-rw-r--r--net/decnet/dn_neigh.c46
-rw-r--r--net/decnet/dn_nsp_in.c14
-rw-r--r--net/decnet/dn_nsp_out.c10
-rw-r--r--net/decnet/dn_route.c36
-rw-r--r--net/decnet/dn_rules.c14
-rw-r--r--net/decnet/dn_table.c4
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c3
-rw-r--r--net/decnet/sysctl_net_decnet.c10
-rw-r--r--net/dns_resolver/dns_key.c8
-rw-r--r--net/dns_resolver/internal.h2
-rw-r--r--net/dsa/slave.c10
-rw-r--r--net/econet/Kconfig36
-rw-r--r--net/econet/Makefile7
-rw-r--r--net/econet/af_econet.c1173
-rw-r--r--net/ethernet/eth.c10
-rw-r--r--net/ieee802154/6lowpan.c143
-rw-r--r--net/ieee802154/6lowpan.h3
-rw-r--r--net/ieee802154/dgram.c6
-rw-r--r--net/ieee802154/nl-mac.c146
-rw-r--r--net/ieee802154/nl-phy.c38
-rw-r--r--net/ieee802154/raw.c2
-rw-r--r--net/ipv4/Kconfig8
-rw-r--r--net/ipv4/af_inet.c33
-rw-r--r--net/ipv4/ah4.c23
-rw-r--r--net/ipv4/arp.c29
-rw-r--r--net/ipv4/cipso_ipv4.c11
-rw-r--r--net/ipv4/devinet.c68
-rw-r--r--net/ipv4/esp4.c34
-rw-r--r--net/ipv4/fib_frontend.c17
-rw-r--r--net/ipv4/fib_rules.c16
-rw-r--r--net/ipv4/fib_semantics.c62
-rw-r--r--net/ipv4/fib_trie.c8
-rw-r--r--net/ipv4/gre.c6
-rw-r--r--net/ipv4/icmp.c30
-rw-r--r--net/ipv4/igmp.c19
-rw-r--r--net/ipv4/inet_connection_sock.c32
-rw-r--r--net/ipv4/inet_diag.c24
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c6
-rw-r--r--net/ipv4/ip_forward.c4
-rw-r--r--net/ipv4/ip_fragment.c54
-rw-r--r--net/ipv4/ip_gre.c131
-rw-r--r--net/ipv4/ip_input.c30
-rw-r--r--net/ipv4/ip_options.c34
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_sockglue.c63
-rw-r--r--net/ipv4/ipcomp.c8
-rw-r--r--net/ipv4/ipconfig.c124
-rw-r--r--net/ipv4/ipip.c64
-rw-r--r--net/ipv4/ipmr.c15
-rw-r--r--net/ipv4/netfilter.c12
-rw-r--r--net/ipv4/netfilter/Kconfig9
-rw-r--r--net/ipv4/netfilter/Makefile4
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_queue.c639
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c516
-rw-r--r--net/ipv4/netfilter/iptable_filter.c9
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c19
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c68
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c40
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c9
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c8
-rw-r--r--net/ipv4/ping.c43
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c12
-rw-r--r--net/ipv4/route.c194
-rw-r--r--net/ipv4/sysctl_net_ipv4.c26
-rw-r--r--net/ipv4/tcp.c352
-rw-r--r--net/ipv4/tcp_cong.c15
-rw-r--r--net/ipv4/tcp_hybla.c10
-rw-r--r--net/ipv4/tcp_input.c843
-rw-r--r--net/ipv4/tcp_ipv4.c431
-rw-r--r--net/ipv4/tcp_memcontrol.c111
-rw-r--r--net/ipv4/tcp_minisocks.c37
-rw-r--r--net/ipv4/tcp_output.c158
-rw-r--r--net/ipv4/tcp_probe.c8
-rw-r--r--net/ipv4/tcp_timer.c19
-rw-r--r--net/ipv4/tunnel4.c8
-rw-r--r--net/ipv4/udp.c100
-rw-r--r--net/ipv4/udp_diag.c9
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/udplite.c7
-rw-r--r--net/ipv4/xfrm4_policy.c6
-rw-r--r--net/ipv4/xfrm4_tunnel.c16
-rw-r--r--net/ipv6/Kconfig4
-rw-r--r--net/ipv6/addrconf.c213
-rw-r--r--net/ipv6/addrconf_core.c4
-rw-r--r--net/ipv6/addrlabel.c26
-rw-r--r--net/ipv6/af_inet6.c72
-rw-r--r--net/ipv6/ah6.c30
-rw-r--r--net/ipv6/anycast.c41
-rw-r--r--net/ipv6/datagram.c22
-rw-r--r--net/ipv6/esp6.c32
-rw-r--r--net/ipv6/exthdrs.c101
-rw-r--r--net/ipv6/exthdrs_core.c5
-rw-r--r--net/ipv6/fib6_rules.c18
-rw-r--r--net/ipv6/icmp.c32
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_fib.c53
-rw-r--r--net/ipv6/ip6_flowlabel.c29
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/ip6_output.c112
-rw-r--r--net/ipv6/ip6_tunnel.c62
-rw-r--r--net/ipv6/ip6mr.c15
-rw-r--r--net/ipv6/ipcomp6.c15
-rw-r--r--net/ipv6/ipv6_sockglue.c41
-rw-r--r--net/ipv6/mcast.c73
-rw-r--r--net/ipv6/mip6.c32
-rw-r--r--net/ipv6/ndisc.c275
-rw-r--r--net/ipv6/netfilter/Kconfig31
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c641
-rw-r--r--net/ipv6/netfilter/ip6_tables.c55
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c527
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c4
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c4
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c4
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c4
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c9
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c12
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c68
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c13
-rw-r--r--net/ipv6/raw.c12
-rw-r--r--net/ipv6/reassembly.c54
-rw-r--r--net/ipv6/route.c198
-rw-r--r--net/ipv6/sit.c90
-rw-r--r--net/ipv6/sysctl_net_ipv6.c83
-rw-r--r--net/ipv6/tcp_ipv6.c321
-rw-r--r--net/ipv6/tunnel6.c10
-rw-r--r--net/ipv6/udp.c187
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c6
-rw-r--r--net/ipx/af_ipx.c14
-rw-r--r--net/ipx/sysctl_net_ipx.c11
-rw-r--r--net/irda/ircomm/ircomm_tty.c7
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c2
-rw-r--r--net/irda/irlan/irlan_client.c1
-rw-r--r--net/irda/irlan/irlan_common.c1
-rw-r--r--net/irda/irlan/irlan_provider.c1
-rw-r--r--net/irda/irnet/irnet.h2
-rw-r--r--net/irda/irsysctl.c10
-rw-r--r--net/irda/timer.c1
-rw-r--r--net/iucv/af_iucv.c381
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/key/af_key.c6
-rw-r--r--net/l2tp/Makefile3
-rw-r--r--net/l2tp/l2tp_core.c525
-rw-r--r--net/l2tp/l2tp_core.h57
-rw-r--r--net/l2tp/l2tp_debugfs.c14
-rw-r--r--net/l2tp/l2tp_eth.c17
-rw-r--r--net/l2tp/l2tp_ip.c133
-rw-r--r--net/l2tp/l2tp_ip6.c803
-rw-r--r--net/l2tp/l2tp_netlink.c197
-rw-r--r--net/l2tp/l2tp_ppp.c211
-rw-r--r--net/lapb/lapb_iface.c23
-rw-r--r--net/lapb/lapb_in.c321
-rw-r--r--net/lapb/lapb_out.c39
-rw-r--r--net/lapb/lapb_subr.c29
-rw-r--r--net/lapb/lapb_timer.c33
-rw-r--r--net/llc/af_llc.c16
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/llc/llc_output.c3
-rw-r--r--net/llc/llc_sap.c4
-rw-r--r--net/llc/sysctl_net_llc.c52
-rw-r--r--net/mac80211/Kconfig11
-rw-r--r--net/mac80211/Makefile7
-rw-r--r--net/mac80211/agg-rx.c34
-rw-r--r--net/mac80211/agg-tx.c63
-rw-r--r--net/mac80211/cfg.c508
-rw-r--r--net/mac80211/chan.c29
-rw-r--r--net/mac80211/debugfs.c99
-rw-r--r--net/mac80211/debugfs.h1
-rw-r--r--net/mac80211/debugfs_key.c4
-rw-r--r--net/mac80211/debugfs_netdev.c178
-rw-r--r--net/mac80211/debugfs_sta.c10
-rw-r--r--net/mac80211/driver-ops.h146
-rw-r--r--net/mac80211/driver-trace.h147
-rw-r--r--net/mac80211/ht.c17
-rw-r--r--net/mac80211/ibss.c154
-rw-r--r--net/mac80211/ieee80211_i.h245
-rw-r--r--net/mac80211/iface.c198
-rw-r--r--net/mac80211/key.c39
-rw-r--r--net/mac80211/main.c49
-rw-r--r--net/mac80211/mesh.c76
-rw-r--r--net/mac80211/mesh.h42
-rw-r--r--net/mac80211/mesh_hwmp.c89
-rw-r--r--net/mac80211/mesh_pathtbl.c50
-rw-r--r--net/mac80211/mesh_plink.c257
-rw-r--r--net/mac80211/mesh_sync.c316
-rw-r--r--net/mac80211/mlme.c1963
-rw-r--r--net/mac80211/pm.c15
-rw-r--r--net/mac80211/rate.c153
-rw-r--r--net/mac80211/rate.h11
-rw-r--r--net/mac80211/rc80211_minstrel.c13
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c34
-rw-r--r--net/mac80211/rx.c181
-rw-r--r--net/mac80211/scan.c214
-rw-r--r--net/mac80211/sta_info.c371
-rw-r--r--net/mac80211/sta_info.h71
-rw-r--r--net/mac80211/status.c18
-rw-r--r--net/mac80211/tx.c163
-rw-r--r--net/mac80211/util.c341
-rw-r--r--net/mac80211/wep.c36
-rw-r--r--net/mac80211/wep.h1
-rw-r--r--net/mac80211/wme.c46
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/work.c829
-rw-r--r--net/mac80211/wpa.c32
-rw-r--r--net/mac802154/Kconfig16
-rw-r--r--net/mac802154/Makefile2
-rw-r--r--net/mac802154/ieee802154_dev.c294
-rw-r--r--net/mac802154/mac802154.h109
-rw-r--r--net/mac802154/mac_cmd.c45
-rw-r--r--net/mac802154/mib.c93
-rw-r--r--net/mac802154/monitor.c116
-rw-r--r--net/mac802154/rx.c114
-rw-r--r--net/mac802154/tx.c116
-rw-r--r--net/netfilter/Kconfig45
-rw-r--r--net/netfilter/Makefile4
-rw-r--r--net/netfilter/core.c15
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c37
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c51
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c33
-rw-r--r--net/netfilter/ipset/ip_set_core.c75
-rw-r--r--net/netfilter/ipset/ip_set_getport.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c48
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c57
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c65
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c206
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c132
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c138
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c201
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c25
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c104
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c43
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c270
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c57
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c668
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c2
-rw-r--r--net/netfilter/nf_conntrack_acct.c4
-rw-r--r--net/netfilter/nf_conntrack_amanda.c3
-rw-r--r--net/netfilter/nf_conntrack_core.c57
-rw-r--r--net/netfilter/nf_conntrack_ecache.c68
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c15
-rw-r--r--net/netfilter/nf_conntrack_helper.c176
-rw-r--r--net/netfilter/nf_conntrack_irc.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c301
-rw-r--r--net/netfilter/nf_conntrack_proto.c31
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c102
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c78
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c83
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c99
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c199
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c107
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c104
-rw-r--r--net/netfilter/nf_conntrack_standalone.c14
-rw-r--r--net/netfilter/nf_conntrack_timeout.c60
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c4
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nfnetlink.c3
-rw-r--r--net/netfilter/nfnetlink_acct.c18
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c431
-rw-r--r--net/netfilter/nfnetlink_log.c100
-rw-r--r--net/netfilter/nfnetlink_queue.c68
-rw-r--r--net/netfilter/xt_CT.c250
-rw-r--r--net/netfilter/xt_HMARK.c362
-rw-r--r--net/netfilter/xt_LOG.c925
-rw-r--r--net/netfilter/xt_TCPMSS.c10
-rw-r--r--net/netfilter/xt_TEE.c12
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/netfilter/xt_hashlimit.c132
-rw-r--r--net/netfilter/xt_limit.c5
-rw-r--r--net/netfilter/xt_mac.c2
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netfilter/xt_set.c15
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/netlabel/netlabel_kapi.c2
-rw-r--r--net/netlink/af_netlink.c123
-rw-r--r--net/netlink/genetlink.c77
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/netrom/nr_dev.c3
-rw-r--r--net/netrom/nr_in.c1
-rw-r--r--net/netrom/nr_out.c1
-rw-r--r--net/netrom/nr_route.c1
-rw-r--r--net/netrom/nr_subr.c1
-rw-r--r--net/netrom/nr_timer.c1
-rw-r--r--net/netrom/sysctl_net_netrom.c10
-rw-r--r--net/nfc/Kconfig1
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/nfc/af_nfc.c2
-rw-r--r--net/nfc/core.c263
-rw-r--r--net/nfc/hci/Kconfig17
-rw-r--r--net/nfc/hci/Makefile8
-rw-r--r--net/nfc/hci/command.c354
-rw-r--r--net/nfc/hci/core.c798
-rw-r--r--net/nfc/hci/hci.h139
-rw-r--r--net/nfc/hci/hcp.c156
-rw-r--r--net/nfc/hci/shdlc.c957
-rw-r--r--net/nfc/llcp/commands.c169
-rw-r--r--net/nfc/llcp/llcp.c227
-rw-r--r--net/nfc/llcp/llcp.h12
-rw-r--r--net/nfc/llcp/sock.c177
-rw-r--r--net/nfc/nci/core.c222
-rw-r--r--net/nfc/nci/data.c40
-rw-r--r--net/nfc/nci/lib.c1
-rw-r--r--net/nfc/nci/ntf.c363
-rw-r--r--net/nfc/nci/rsp.c41
-rw-r--r--net/nfc/netlink.c160
-rw-r--r--net/nfc/nfc.h23
-rw-r--r--net/nfc/rawsock.c26
-rw-r--r--net/openvswitch/datapath.c64
-rw-r--r--net/openvswitch/flow.c21
-rw-r--r--net/openvswitch/vport-internal_dev.c3
-rw-r--r--net/openvswitch/vport-netdev.c10
-rw-r--r--net/packet/af_packet.c83
-rw-r--r--net/phonet/af_phonet.c2
-rw-r--r--net/phonet/pep.c11
-rw-r--r--net/phonet/pn_dev.c25
-rw-r--r--net/phonet/pn_netlink.c8
-rw-r--r--net/phonet/socket.c12
-rw-r--r--net/phonet/sysctl.c17
-rw-r--r--net/rds/ib.h3
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/ib_recv.c9
-rw-r--r--net/rds/ib_sysctl.c11
-rw-r--r--net/rds/info.c6
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/iw_recv.c9
-rw-r--r--net/rds/iw_sysctl.c11
-rw-r--r--net/rds/loop.c4
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/send.c1
-rw-r--r--net/rds/sysctl.c11
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rds/tcp_recv.c11
-rw-r--r--net/rfkill/core.c1
-rw-r--r--net/rose/af_rose.c1
-rw-r--r--net/rose/rose_dev.c7
-rw-r--r--net/rose/rose_in.c1
-rw-r--r--net/rose/rose_link.c1
-rw-r--r--net/rose/rose_out.c1
-rw-r--r--net/rose/rose_route.c1
-rw-r--r--net/rose/rose_subr.c3
-rw-r--r--net/rose/rose_timer.c1
-rw-r--r--net/rose/sysctl_net_rose.c10
-rw-r--r--net/rxrpc/af_rxrpc.c8
-rw-r--r--net/rxrpc/ar-ack.c6
-rw-r--r--net/rxrpc/ar-call.c4
-rw-r--r--net/rxrpc/ar-input.c2
-rw-r--r--net/rxrpc/ar-internal.h16
-rw-r--r--net/rxrpc/ar-key.c22
-rw-r--r--net/rxrpc/rxkad.c6
-rw-r--r--net/sched/Kconfig48
-rw-r--r--net/sched/Makefile3
-rw-r--r--net/sched/act_api.c9
-rw-r--r--net/sched/act_csum.c8
-rw-r--r--net/sched/act_gact.c9
-rw-r--r--net/sched/act_ipt.c21
-rw-r--r--net/sched/act_mirred.c11
-rw-r--r--net/sched/act_nat.c6
-rw-r--r--net/sched/act_pedit.c6
-rw-r--r--net/sched/act_police.c13
-rw-r--r--net/sched/act_simple.c8
-rw-r--r--net/sched/act_skbedit.c27
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_basic.c5
-rw-r--r--net/sched/cls_cgroup.c37
-rw-r--r--net/sched/cls_flow.c35
-rw-r--r--net/sched/cls_fw.c15
-rw-r--r--net/sched/cls_route.c16
-rw-r--r--net/sched/cls_rsvp.h16
-rw-r--r--net/sched/cls_tcindex.c14
-rw-r--r--net/sched/cls_u32.c43
-rw-r--r--net/sched/em_meta.c19
-rw-r--r--net/sched/ematch.c10
-rw-r--r--net/sched/sch_api.c19
-rw-r--r--net/sched/sch_atm.c27
-rw-r--r--net/sched/sch_cbq.c18
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_codel.c276
-rw-r--r--net/sched/sch_drr.c7
-rw-r--r--net/sched/sch_dsmark.c21
-rw-r--r--net/sched/sch_fifo.c3
-rw-r--r--net/sched/sch_fq_codel.c626
-rw-r--r--net/sched/sch_generic.c14
-rw-r--r--net/sched/sch_gred.c25
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_htb.c12
-rw-r--r--net/sched/sch_mqprio.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c45
-rw-r--r--net/sched/sch_plug.c233
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_qfq.c5
-rw-r--r--net/sched/sch_red.c5
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c4
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/sm_sideeffect.c9
-rw-r--r--net/sctp/sm_statefuns.c22
-rw-r--r--net/sctp/socket.c35
-rw-r--r--net/sctp/sysctl.c10
-rw-r--r--net/sctp/transport.c17
-rw-r--r--net/socket.c128
-rw-r--r--net/sunrpc/Kconfig13
-rw-r--r--net/sunrpc/addr.c26
-rw-r--r--net/sunrpc/auth_generic.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c216
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c7
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c65
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c7
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c291
-rw-r--r--net/sunrpc/auth_unix.c15
-rw-r--r--net/sunrpc/backchannel_rqst.c1
-rw-r--r--net/sunrpc/cache.c48
-rw-r--r--net/sunrpc/clnt.c591
-rw-r--r--net/sunrpc/netns.h14
-rw-r--r--net/sunrpc/rpc_pipe.c518
-rw-r--r--net/sunrpc/rpcb_clnt.c198
-rw-r--r--net/sunrpc/sched.c73
-rw-r--r--net/sunrpc/socklib.c4
-rw-r--r--net/sunrpc/stats.c35
-rw-r--r--net/sunrpc/sunrpc.h2
-rw-r--r--net/sunrpc/sunrpc_syms.c34
-rw-r--r--net/sunrpc/svc.c129
-rw-r--r--net/sunrpc/svc_xprt.c68
-rw-r--r--net/sunrpc/svcauth_unix.c165
-rw-r--r--net/sunrpc/svcsock.c37
-rw-r--r--net/sunrpc/sysctl.c4
-rw-r--r--net/sunrpc/timer.c6
-rw-r--r--net/sunrpc/xdr.c22
-rw-r--r--net/sunrpc/xprt.c89
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c17
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c66
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c20
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c26
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
-rw-r--r--net/sunrpc/xprtrdma/verbs.c17
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h7
-rw-r--r--net/sunrpc/xprtsock.c34
-rw-r--r--net/sysctl_net.c59
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/addr.c3
-rw-r--r--net/tipc/addr.h19
-rw-r--r--net/tipc/bcast.c356
-rw-r--r--net/tipc/bcast.h5
-rw-r--r--net/tipc/bearer.c29
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.c51
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/core.c21
-rw-r--r--net/tipc/core.h56
-rw-r--r--net/tipc/discover.c91
-rw-r--r--net/tipc/eth_media.c19
-rw-r--r--net/tipc/handler.c1
-rw-r--r--net/tipc/link.c409
-rw-r--r--net/tipc/link.h6
-rw-r--r--net/tipc/log.c16
-rw-r--r--net/tipc/log.h1
-rw-r--r--net/tipc/msg.c5
-rw-r--r--net/tipc/msg.h36
-rw-r--r--net/tipc/name_distr.c138
-rw-r--r--net/tipc/name_table.c146
-rw-r--r--net/tipc/name_table.h5
-rw-r--r--net/tipc/net.c20
-rw-r--r--net/tipc/node.c99
-rw-r--r--net/tipc/node.h39
-rw-r--r--net/tipc/node_subscr.c5
-rw-r--r--net/tipc/node_subscr.h1
-rw-r--r--net/tipc/port.c176
-rw-r--r--net/tipc/port.h56
-rw-r--r--net/tipc/ref.c13
-rw-r--r--net/tipc/socket.c114
-rw-r--r--net/tipc/subscr.c47
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/unix/af_unix.c127
-rw-r--r--net/unix/diag.c14
-rw-r--r--net/unix/sysctl_net_unix.c10
-rw-r--r--net/wanrouter/Kconfig2
-rw-r--r--net/wimax/stack.c5
-rw-r--r--net/wireless/chan.c2
-rw-r--r--net/wireless/core.c13
-rw-r--r--net/wireless/core.h16
-rw-r--r--net/wireless/debugfs.c10
-rw-r--r--net/wireless/ethtool.c29
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c33
-rw-r--r--net/wireless/lib80211_crypt_tkip.c50
-rw-r--r--net/wireless/mesh.c8
-rw-r--r--net/wireless/mlme.c389
-rw-r--r--net/wireless/nl80211.c1782
-rw-r--r--net/wireless/nl80211.h7
-rw-r--r--net/wireless/reg.c29
-rw-r--r--net/wireless/scan.c29
-rw-r--r--net/wireless/sme.c41
-rw-r--r--net/wireless/util.c26
-rw-r--r--net/wireless/wext-compat.c3
-rw-r--r--net/wireless/wext-core.c15
-rw-r--r--net/wireless/wext-sme.c5
-rw-r--r--net/wireless/wext-spy.c2
-rw-r--r--net/x25/sysctl_net_x25.c10
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/x25/x25_facilities.c4
-rw-r--r--net/xfrm/Kconfig13
-rw-r--r--net/xfrm/Makefile3
-rw-r--r--net/xfrm/xfrm_algo.c5
-rw-r--r--net/xfrm/xfrm_hash.h8
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c31
-rw-r--r--net/xfrm/xfrm_replay.c6
-rw-r--r--net/xfrm/xfrm_sysctl.c2
-rw-r--r--net/xfrm/xfrm_user.c114
720 files changed, 35664 insertions, 22882 deletions
diff --git a/net/802/Makefile b/net/802/Makefile
index 7893d679910c..a30d6e385aed 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -4,7 +4,6 @@
4 4
5# Check the p8022 selections against net/core/Makefile. 5# Check the p8022 selections against net/core/Makefile.
6obj-$(CONFIG_LLC) += p8022.o psnap.o 6obj-$(CONFIG_LLC) += p8022.o psnap.o
7obj-$(CONFIG_TR) += p8022.o psnap.o tr.o
8obj-$(CONFIG_NET_FC) += fc.o 7obj-$(CONFIG_NET_FC) += fc.o
9obj-$(CONFIG_FDDI) += fddi.o 8obj-$(CONFIG_FDDI) += fddi.o
10obj-$(CONFIG_HIPPI) += hippi.o 9obj-$(CONFIG_HIPPI) += hippi.o
diff --git a/net/802/fc.c b/net/802/fc.c
index bd345f3d29f8..05eea6b98bb8 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -11,7 +11,6 @@
11 */ 11 */
12 12
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/types.h> 14#include <linux/types.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/string.h> 16#include <linux/string.h>
@@ -36,7 +35,7 @@
36 35
37static int fc_header(struct sk_buff *skb, struct net_device *dev, 36static int fc_header(struct sk_buff *skb, struct net_device *dev,
38 unsigned short type, 37 unsigned short type,
39 const void *daddr, const void *saddr, unsigned len) 38 const void *daddr, const void *saddr, unsigned int len)
40{ 39{
41 struct fch_hdr *fch; 40 struct fch_hdr *fch;
42 int hdr_len; 41 int hdr_len;
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 94b3ad08f39a..9cda40661e0d 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <asm/system.h>
31#include <linux/types.h> 30#include <linux/types.h>
32#include <linux/kernel.h> 31#include <linux/kernel.h>
33#include <linux/string.h> 32#include <linux/string.h>
@@ -52,7 +51,7 @@
52 51
53static int fddi_header(struct sk_buff *skb, struct net_device *dev, 52static int fddi_header(struct sk_buff *skb, struct net_device *dev,
54 unsigned short type, 53 unsigned short type,
55 const void *daddr, const void *saddr, unsigned len) 54 const void *daddr, const void *saddr, unsigned int len)
56{ 55{
57 int hl = FDDI_K_SNAP_HLEN; 56 int hl = FDDI_K_SNAP_HLEN;
58 struct fddihdr *fddi; 57 struct fddihdr *fddi;
diff --git a/net/802/garp.c b/net/802/garp.c
index 8e21b6db3981..8456f5d98b85 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -157,9 +157,9 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
157 while (parent) { 157 while (parent) {
158 attr = rb_entry(parent, struct garp_attr, node); 158 attr = rb_entry(parent, struct garp_attr, node);
159 d = garp_attr_cmp(attr, data, len, type); 159 d = garp_attr_cmp(attr, data, len, type);
160 if (d < 0) 160 if (d > 0)
161 parent = parent->rb_left; 161 parent = parent->rb_left;
162 else if (d > 0) 162 else if (d < 0)
163 parent = parent->rb_right; 163 parent = parent->rb_right;
164 else 164 else
165 return attr; 165 return attr;
@@ -167,7 +167,8 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
167 return NULL; 167 return NULL;
168} 168}
169 169
170static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new) 170static struct garp_attr *garp_attr_create(struct garp_applicant *app,
171 const void *data, u8 len, u8 type)
171{ 172{
172 struct rb_node *parent = NULL, **p = &app->gid.rb_node; 173 struct rb_node *parent = NULL, **p = &app->gid.rb_node;
173 struct garp_attr *attr; 174 struct garp_attr *attr;
@@ -176,21 +177,16 @@ static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new)
176 while (*p) { 177 while (*p) {
177 parent = *p; 178 parent = *p;
178 attr = rb_entry(parent, struct garp_attr, node); 179 attr = rb_entry(parent, struct garp_attr, node);
179 d = garp_attr_cmp(attr, new->data, new->dlen, new->type); 180 d = garp_attr_cmp(attr, data, len, type);
180 if (d < 0) 181 if (d > 0)
181 p = &parent->rb_left; 182 p = &parent->rb_left;
182 else if (d > 0) 183 else if (d < 0)
183 p = &parent->rb_right; 184 p = &parent->rb_right;
185 else {
186 /* The attribute already exists; re-use it. */
187 return attr;
188 }
184 } 189 }
185 rb_link_node(&new->node, parent, p);
186 rb_insert_color(&new->node, &app->gid);
187}
188
189static struct garp_attr *garp_attr_create(struct garp_applicant *app,
190 const void *data, u8 len, u8 type)
191{
192 struct garp_attr *attr;
193
194 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC); 190 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
195 if (!attr) 191 if (!attr)
196 return attr; 192 return attr;
@@ -198,7 +194,9 @@ static struct garp_attr *garp_attr_create(struct garp_applicant *app,
198 attr->type = type; 194 attr->type = type;
199 attr->dlen = len; 195 attr->dlen = len;
200 memcpy(attr->data, data, len); 196 memcpy(attr->data, data, len);
201 garp_attr_insert(app, attr); 197
198 rb_link_node(&attr->node, parent, p);
199 rb_insert_color(&attr->node, &app->gid);
202 return attr; 200 return attr;
203} 201}
204 202
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 91aca8780fd0..51a1f530417d 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -35,7 +35,6 @@
35#include <net/arp.h> 35#include <net/arp.h>
36#include <net/sock.h> 36#include <net/sock.h>
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38#include <asm/system.h>
39 38
40/* 39/*
41 * Create the HIPPI MAC header for an arbitrary protocol layer 40 * Create the HIPPI MAC header for an arbitrary protocol layer
@@ -46,7 +45,7 @@
46 45
47static int hippi_header(struct sk_buff *skb, struct net_device *dev, 46static int hippi_header(struct sk_buff *skb, struct net_device *dev,
48 unsigned short type, 47 unsigned short type,
49 const void *daddr, const void *saddr, unsigned len) 48 const void *daddr, const void *saddr, unsigned int len)
50{ 49{
51 struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); 50 struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN);
52 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; 51 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
diff --git a/net/802/p8022.c b/net/802/p8022.c
index 7f353c4f437a..0bda8de7df51 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -1,6 +1,5 @@
1/* 1/*
2 * NET3: Support for 802.2 demultiplexing off Ethernet (Token ring 2 * NET3: Support for 802.2 demultiplexing off Ethernet
3 * is kept separate see p8022tr.c)
4 * This program is free software; you can redistribute it and/or 3 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 4 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 5 * as published by the Free Software Foundation; either version
diff --git a/net/802/stp.c b/net/802/stp.c
index 15540b7323cd..2c40ba0ec116 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -46,7 +46,7 @@ static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
46 proto = rcu_dereference(garp_protos[eh->h_dest[5] - 46 proto = rcu_dereference(garp_protos[eh->h_dest[5] -
47 GARP_ADDR_MIN]); 47 GARP_ADDR_MIN]);
48 if (proto && 48 if (proto &&
49 compare_ether_addr(eh->h_dest, proto->group_address)) 49 !ether_addr_equal(eh->h_dest, proto->group_address))
50 goto err; 50 goto err;
51 } else 51 } else
52 proto = rcu_dereference(stp_proto); 52 proto = rcu_dereference(stp_proto);
diff --git a/net/802/tr.c b/net/802/tr.c
deleted file mode 100644
index 5e20cf8a074b..000000000000
--- a/net/802/tr.c
+++ /dev/null
@@ -1,677 +0,0 @@
1/*
2 * NET3: Token ring device handling subroutines
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10 * Added rif table to /proc/net/tr_rif and rif timeout to
11 * /proc/sys/net/token-ring/rif_timeout.
12 * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13 * tr_header and tr_type_trans to handle passing IPX SNAP and
14 * 802.2 through the correct layers. Eliminated tr_reformat.
15 *
16 */
17
18#include <asm/uaccess.h>
19#include <asm/system.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/jiffies.h>
24#include <linux/string.h>
25#include <linux/mm.h>
26#include <linux/socket.h>
27#include <linux/in.h>
28#include <linux/inet.h>
29#include <linux/netdevice.h>
30#include <linux/trdevice.h>
31#include <linux/skbuff.h>
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <linux/net.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/init.h>
38#include <linux/sysctl.h>
39#include <linux/slab.h>
40#include <net/arp.h>
41#include <net/net_namespace.h>
42
43static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
44static void rif_check_expire(unsigned long dummy);
45
46#define TR_SR_DEBUG 0
47
48/*
49 * Each RIF entry we learn is kept this way
50 */
51
52struct rif_cache {
53 unsigned char addr[TR_ALEN];
54 int iface;
55 __be16 rcf;
56 __be16 rseg[8];
57 struct rif_cache *next;
58 unsigned long last_used;
59 unsigned char local_ring;
60};
61
62#define RIF_TABLE_SIZE 32
63
64/*
65 * We hash the RIF cache 32 ways. We do after all have to look it
66 * up a lot.
67 */
68
69static struct rif_cache *rif_table[RIF_TABLE_SIZE];
70
71static DEFINE_SPINLOCK(rif_lock);
72
73
74/*
75 * Garbage disposal timer.
76 */
77
78static struct timer_list rif_timer;
79
80static int sysctl_tr_rif_timeout = 60*10*HZ;
81
82static inline unsigned long rif_hash(const unsigned char *addr)
83{
84 unsigned long x;
85
86 x = addr[0];
87 x = (x << 2) ^ addr[1];
88 x = (x << 2) ^ addr[2];
89 x = (x << 2) ^ addr[3];
90 x = (x << 2) ^ addr[4];
91 x = (x << 2) ^ addr[5];
92
93 x ^= x >> 8;
94
95 return x & (RIF_TABLE_SIZE - 1);
96}
97
98/*
99 * Put the headers on a token ring packet. Token ring source routing
100 * makes this a little more exciting than on ethernet.
101 */
102
103static int tr_header(struct sk_buff *skb, struct net_device *dev,
104 unsigned short type,
105 const void *daddr, const void *saddr, unsigned len)
106{
107 struct trh_hdr *trh;
108 int hdr_len;
109
110 /*
111 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
112 * dev->hard_header directly.
113 */
114 if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
115 {
116 struct trllc *trllc;
117
118 hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
119 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
120 trllc = (struct trllc *)(trh+1);
121 trllc->dsap = trllc->ssap = EXTENDED_SAP;
122 trllc->llc = UI_CMD;
123 trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
124 trllc->ethertype = htons(type);
125 }
126 else
127 {
128 hdr_len = sizeof(struct trh_hdr);
129 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
130 }
131
132 trh->ac=AC;
133 trh->fc=LLC_FRAME;
134
135 if(saddr)
136 memcpy(trh->saddr,saddr,dev->addr_len);
137 else
138 memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
139
140 /*
141 * Build the destination and then source route the frame
142 */
143
144 if(daddr)
145 {
146 memcpy(trh->daddr,daddr,dev->addr_len);
147 tr_source_route(skb, trh, dev);
148 return hdr_len;
149 }
150
151 return -hdr_len;
152}
153
154/*
155 * A neighbour discovery of some species (eg arp) has completed. We
156 * can now send the packet.
157 */
158
159static int tr_rebuild_header(struct sk_buff *skb)
160{
161 struct trh_hdr *trh=(struct trh_hdr *)skb->data;
162 struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
163 struct net_device *dev = skb->dev;
164
165 /*
166 * FIXME: We don't yet support IPv6 over token rings
167 */
168
169 if(trllc->ethertype != htons(ETH_P_IP)) {
170 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
171 return 0;
172 }
173
174#ifdef CONFIG_INET
175 if(arp_find(trh->daddr, skb)) {
176 return 1;
177 }
178 else
179#endif
180 {
181 tr_source_route(skb,trh,dev);
182 return 0;
183 }
184}
185
186/*
187 * Some of this is a bit hackish. We intercept RIF information
188 * used for source routing. We also grab IP directly and don't feed
189 * it via SNAP.
190 */
191
192__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
193{
194
195 struct trh_hdr *trh;
196 struct trllc *trllc;
197 unsigned riflen=0;
198
199 skb->dev = dev;
200 skb_reset_mac_header(skb);
201 trh = tr_hdr(skb);
202
203 if(trh->saddr[0] & TR_RII)
204 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
205
206 trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
207
208 skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
209
210 if(*trh->daddr & 0x80)
211 {
212 if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
213 skb->pkt_type=PACKET_BROADCAST;
214 else
215 skb->pkt_type=PACKET_MULTICAST;
216 }
217 else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
218 {
219 skb->pkt_type=PACKET_MULTICAST;
220 }
221 else if(dev->flags & IFF_PROMISC)
222 {
223 if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
224 skb->pkt_type=PACKET_OTHERHOST;
225 }
226
227 if ((skb->pkt_type != PACKET_BROADCAST) &&
228 (skb->pkt_type != PACKET_MULTICAST))
229 tr_add_rif_info(trh,dev) ;
230
231 /*
232 * Strip the SNAP header from ARP packets since we don't
233 * pass them through to the 802.2/SNAP layers.
234 */
235
236 if (trllc->dsap == EXTENDED_SAP &&
237 (trllc->ethertype == htons(ETH_P_IP) ||
238 trllc->ethertype == htons(ETH_P_IPV6) ||
239 trllc->ethertype == htons(ETH_P_ARP)))
240 {
241 skb_pull(skb, sizeof(struct trllc));
242 return trllc->ethertype;
243 }
244
245 return htons(ETH_P_TR_802_2);
246}
247
248/*
249 * We try to do source routing...
250 */
251
252void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,
253 struct net_device *dev)
254{
255 int slack;
256 unsigned int hash;
257 struct rif_cache *entry;
258 unsigned char *olddata;
259 unsigned long flags;
260 static const unsigned char mcast_func_addr[]
261 = {0xC0,0x00,0x00,0x04,0x00,0x00};
262
263 spin_lock_irqsave(&rif_lock, flags);
264
265 /*
266 * Broadcasts are single route as stated in RFC 1042
267 */
268 if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
269 (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) )
270 {
271 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
272 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
273 trh->saddr[0]|=TR_RII;
274 }
275 else
276 {
277 hash = rif_hash(trh->daddr);
278 /*
279 * Walk the hash table and look for an entry
280 */
281 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
282
283 /*
284 * If we found an entry we can route the frame.
285 */
286 if(entry)
287 {
288#if TR_SR_DEBUG
289printk("source routing for %pM\n", trh->daddr);
290#endif
291 if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
292 {
293 trh->rcf=entry->rcf;
294 memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
295 trh->rcf^=htons(TR_RCF_DIR_BIT);
296 trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
297
298 trh->saddr[0]|=TR_RII;
299#if TR_SR_DEBUG
300 printk("entry found with rcf %04x\n", entry->rcf);
301 }
302 else
303 {
304 printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
305#endif
306 }
307 entry->last_used=jiffies;
308 }
309 else
310 {
311 /*
312 * Without the information we simply have to shout
313 * on the wire. The replies should rapidly clean this
314 * situation up.
315 */
316 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
317 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
318 trh->saddr[0]|=TR_RII;
319#if TR_SR_DEBUG
320 printk("no entry in rif table found - broadcasting frame\n");
321#endif
322 }
323 }
324
325 /* Compress the RIF here so we don't have to do it in the driver(s) */
326 if (!(trh->saddr[0] & 0x80))
327 slack = 18;
328 else
329 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
330 olddata = skb->data;
331 spin_unlock_irqrestore(&rif_lock, flags);
332
333 skb_pull(skb, slack);
334 memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
335}
336
337/*
338 * We have learned some new RIF information for our source
339 * routing.
340 */
341
342static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
343{
344 unsigned int hash, rii_p = 0;
345 unsigned long flags;
346 struct rif_cache *entry;
347 unsigned char saddr0;
348
349 spin_lock_irqsave(&rif_lock, flags);
350 saddr0 = trh->saddr[0];
351
352 /*
353 * Firstly see if the entry exists
354 */
355
356 if(trh->saddr[0] & TR_RII)
357 {
358 trh->saddr[0]&=0x7f;
359 if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
360 {
361 rii_p = 1;
362 }
363 }
364
365 hash = rif_hash(trh->saddr);
366 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
367
368 if(entry==NULL)
369 {
370#if TR_SR_DEBUG
371 printk("adding rif_entry: addr:%pM rcf:%04X\n",
372 trh->saddr, ntohs(trh->rcf));
373#endif
374 /*
375 * Allocate our new entry. A failure to allocate loses
376 * use the information. This is harmless.
377 *
378 * FIXME: We ought to keep some kind of cache size
379 * limiting and adjust the timers to suit.
380 */
381 entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
382
383 if(!entry)
384 {
385 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
386 spin_unlock_irqrestore(&rif_lock, flags);
387 return;
388 }
389
390 memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
391 entry->iface = dev->ifindex;
392 entry->next=rif_table[hash];
393 entry->last_used=jiffies;
394 rif_table[hash]=entry;
395
396 if (rii_p)
397 {
398 entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
399 memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
400 entry->local_ring = 0;
401 }
402 else
403 {
404 entry->local_ring = 1;
405 }
406 }
407 else /* Y. Tahara added */
408 {
409 /*
410 * Update existing entries
411 */
412 if (!entry->local_ring)
413 if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
414 !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
415 {
416#if TR_SR_DEBUG
417printk("updating rif_entry: addr:%pM rcf:%04X\n",
418 trh->saddr, ntohs(trh->rcf));
419#endif
420 entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
421 memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
422 }
423 entry->last_used=jiffies;
424 }
425 trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
426 spin_unlock_irqrestore(&rif_lock, flags);
427}
428
429/*
430 * Scan the cache with a timer and see what we need to throw out.
431 */
432
433static void rif_check_expire(unsigned long dummy)
434{
435 int i;
436 unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
437
438 spin_lock_irqsave(&rif_lock, flags);
439
440 for(i =0; i < RIF_TABLE_SIZE; i++) {
441 struct rif_cache *entry, **pentry;
442
443 pentry = rif_table+i;
444 while((entry=*pentry) != NULL) {
445 unsigned long expires
446 = entry->last_used + sysctl_tr_rif_timeout;
447
448 if (time_before_eq(expires, jiffies)) {
449 *pentry = entry->next;
450 kfree(entry);
451 } else {
452 pentry = &entry->next;
453
454 if (time_before(expires, next_interval))
455 next_interval = expires;
456 }
457 }
458 }
459
460 spin_unlock_irqrestore(&rif_lock, flags);
461
462 mod_timer(&rif_timer, next_interval);
463
464}
465
466/*
467 * Generate the /proc/net information for the token ring RIF
468 * routing.
469 */
470
471#ifdef CONFIG_PROC_FS
472
473static struct rif_cache *rif_get_idx(loff_t pos)
474{
475 int i;
476 struct rif_cache *entry;
477 loff_t off = 0;
478
479 for(i = 0; i < RIF_TABLE_SIZE; i++)
480 for(entry = rif_table[i]; entry; entry = entry->next) {
481 if (off == pos)
482 return entry;
483 ++off;
484 }
485
486 return NULL;
487}
488
489static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
490 __acquires(&rif_lock)
491{
492 spin_lock_irq(&rif_lock);
493
494 return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
495}
496
497static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
498{
499 int i;
500 struct rif_cache *ent = v;
501
502 ++*pos;
503
504 if (v == SEQ_START_TOKEN) {
505 i = -1;
506 goto scan;
507 }
508
509 if (ent->next)
510 return ent->next;
511
512 i = rif_hash(ent->addr);
513 scan:
514 while (++i < RIF_TABLE_SIZE) {
515 if ((ent = rif_table[i]) != NULL)
516 return ent;
517 }
518 return NULL;
519}
520
521static void rif_seq_stop(struct seq_file *seq, void *v)
522 __releases(&rif_lock)
523{
524 spin_unlock_irq(&rif_lock);
525}
526
527static int rif_seq_show(struct seq_file *seq, void *v)
528{
529 int j, rcf_len, segment, brdgnmb;
530 struct rif_cache *entry = v;
531
532 if (v == SEQ_START_TOKEN)
533 seq_puts(seq,
534 "if TR address TTL rcf routing segments\n");
535 else {
536 struct net_device *dev = dev_get_by_index(&init_net, entry->iface);
537 long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
538 - (long) jiffies;
539
540 seq_printf(seq, "%s %pM %7li ",
541 dev?dev->name:"?",
542 entry->addr,
543 ttl/HZ);
544
545 if (entry->local_ring)
546 seq_puts(seq, "local\n");
547 else {
548
549 seq_printf(seq, "%04X", ntohs(entry->rcf));
550 rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
551 if (rcf_len)
552 rcf_len >>= 1;
553 for(j = 1; j < rcf_len; j++) {
554 if(j==1) {
555 segment=ntohs(entry->rseg[j-1])>>4;
556 seq_printf(seq," %03X",segment);
557 }
558
559 segment=ntohs(entry->rseg[j])>>4;
560 brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
561 seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
562 }
563 seq_putc(seq, '\n');
564 }
565
566 if (dev)
567 dev_put(dev);
568 }
569 return 0;
570}
571
572
573static const struct seq_operations rif_seq_ops = {
574 .start = rif_seq_start,
575 .next = rif_seq_next,
576 .stop = rif_seq_stop,
577 .show = rif_seq_show,
578};
579
580static int rif_seq_open(struct inode *inode, struct file *file)
581{
582 return seq_open(file, &rif_seq_ops);
583}
584
585static const struct file_operations rif_seq_fops = {
586 .owner = THIS_MODULE,
587 .open = rif_seq_open,
588 .read = seq_read,
589 .llseek = seq_lseek,
590 .release = seq_release,
591};
592
593#endif
594
595static const struct header_ops tr_header_ops = {
596 .create = tr_header,
597 .rebuild= tr_rebuild_header,
598};
599
600static void tr_setup(struct net_device *dev)
601{
602 /*
603 * Configure and register
604 */
605
606 dev->header_ops = &tr_header_ops;
607
608 dev->type = ARPHRD_IEEE802_TR;
609 dev->hard_header_len = TR_HLEN;
610 dev->mtu = 2000;
611 dev->addr_len = TR_ALEN;
612 dev->tx_queue_len = 100; /* Long queues on tr */
613
614 memset(dev->broadcast,0xFF, TR_ALEN);
615
616 /* New-style flags. */
617 dev->flags = IFF_BROADCAST | IFF_MULTICAST ;
618}
619
620/**
621 * alloc_trdev - Register token ring device
622 * @sizeof_priv: Size of additional driver-private structure to be allocated
623 * for this token ring device
624 *
625 * Fill in the fields of the device structure with token ring-generic values.
626 *
627 * Constructs a new net device, complete with a private data area of
628 * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
629 * this private data area.
630 */
631struct net_device *alloc_trdev(int sizeof_priv)
632{
633 return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
634}
635
636#ifdef CONFIG_SYSCTL
637static struct ctl_table tr_table[] = {
638 {
639 .procname = "rif_timeout",
640 .data = &sysctl_tr_rif_timeout,
641 .maxlen = sizeof(int),
642 .mode = 0644,
643 .proc_handler = proc_dointvec
644 },
645 { },
646};
647
648static __initdata struct ctl_path tr_path[] = {
649 { .procname = "net", },
650 { .procname = "token-ring", },
651 { }
652};
653#endif
654
655/*
656 * Called during bootup. We don't actually have to initialise
657 * too much for this.
658 */
659
660static int __init rif_init(void)
661{
662 rif_timer.expires = jiffies + sysctl_tr_rif_timeout;
663 setup_timer(&rif_timer, rif_check_expire, 0);
664 add_timer(&rif_timer);
665#ifdef CONFIG_SYSCTL
666 register_sysctl_paths(tr_path, tr_table);
667#endif
668 proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
669 return 0;
670}
671
672module_init(rif_init);
673
674EXPORT_SYMBOL(tr_type_trans);
675EXPORT_SYMBOL(alloc_trdev);
676
677MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index efea35b02e7f..6089f0cf23b4 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -266,19 +266,19 @@ static void vlan_sync_address(struct net_device *dev,
266 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); 266 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
267 267
268 /* May be called without an actual change */ 268 /* May be called without an actual change */
269 if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr)) 269 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
270 return; 270 return;
271 271
272 /* vlan address was different from the old address and is equal to 272 /* vlan address was different from the old address and is equal to
273 * the new address */ 273 * the new address */
274 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 274 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
275 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 275 ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
276 dev_uc_del(dev, vlandev->dev_addr); 276 dev_uc_del(dev, vlandev->dev_addr);
277 277
278 /* vlan address was equal to the old address and is different from 278 /* vlan address was equal to the old address and is different from
279 * the new address */ 279 * the new address */
280 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 280 if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
281 compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 281 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
282 dev_uc_add(dev, vlandev->dev_addr); 282 dev_uc_add(dev, vlandev->dev_addr);
283 283
284 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); 284 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4d39d802be2c..8ca533c95de0 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -31,8 +31,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
31 /* Our lower layer thinks this is not local, let's make sure. 31 /* Our lower layer thinks this is not local, let's make sure.
32 * This allows the VLAN to have a different MAC than the 32 * This allows the VLAN to have a different MAC than the
33 * underlying device, and still route correctly. */ 33 * underlying device, and still route correctly. */
34 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 34 if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
35 vlan_dev->dev_addr))
36 skb->pkt_type = PACKET_HOST; 35 skb->pkt_type = PACKET_HOST;
37 } 36 }
38 37
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9988d4abb372..da1bc9c3cf38 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
158 } 158 }
159 159
160 skb_set_dev(skb, vlan_dev_priv(dev)->real_dev); 160 skb->dev = vlan_dev_priv(dev)->real_dev;
161 len = skb->len; 161 len = skb->len;
162 if (netpoll_tx_running(dev)) 162 if (netpoll_tx_running(dev))
163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); 163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
@@ -277,7 +277,7 @@ static int vlan_dev_open(struct net_device *dev)
277 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 277 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
278 return -ENETDOWN; 278 return -ENETDOWN;
279 279
280 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { 280 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
281 err = dev_uc_add(real_dev, dev->dev_addr); 281 err = dev_uc_add(real_dev, dev->dev_addr);
282 if (err < 0) 282 if (err < 0)
283 goto out; 283 goto out;
@@ -307,7 +307,7 @@ clear_allmulti:
307 if (dev->flags & IFF_ALLMULTI) 307 if (dev->flags & IFF_ALLMULTI)
308 dev_set_allmulti(real_dev, -1); 308 dev_set_allmulti(real_dev, -1);
309del_unicast: 309del_unicast:
310 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 310 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
311 dev_uc_del(real_dev, dev->dev_addr); 311 dev_uc_del(real_dev, dev->dev_addr);
312out: 312out:
313 netif_carrier_off(dev); 313 netif_carrier_off(dev);
@@ -326,7 +326,7 @@ static int vlan_dev_stop(struct net_device *dev)
326 if (dev->flags & IFF_PROMISC) 326 if (dev->flags & IFF_PROMISC)
327 dev_set_promiscuity(real_dev, -1); 327 dev_set_promiscuity(real_dev, -1);
328 328
329 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 329 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
330 dev_uc_del(real_dev, dev->dev_addr); 330 dev_uc_del(real_dev, dev->dev_addr);
331 331
332 netif_carrier_off(dev); 332 netif_carrier_off(dev);
@@ -345,13 +345,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
345 if (!(dev->flags & IFF_UP)) 345 if (!(dev->flags & IFF_UP))
346 goto out; 346 goto out;
347 347
348 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { 348 if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
349 err = dev_uc_add(real_dev, addr->sa_data); 349 err = dev_uc_add(real_dev, addr->sa_data);
350 if (err < 0) 350 if (err < 0)
351 return err; 351 return err;
352 } 352 }
353 353
354 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 354 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
355 dev_uc_del(real_dev, dev->dev_addr); 355 dev_uc_del(real_dev, dev->dev_addr);
356 356
357out: 357out:
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 50711368ad6a..708c80ea1874 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -166,11 +166,13 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
166 struct nlattr *nest; 166 struct nlattr *nest;
167 unsigned int i; 167 unsigned int i;
168 168
169 NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id); 169 if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id))
170 goto nla_put_failure;
170 if (vlan->flags) { 171 if (vlan->flags) {
171 f.flags = vlan->flags; 172 f.flags = vlan->flags;
172 f.mask = ~0; 173 f.mask = ~0;
173 NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f); 174 if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
175 goto nla_put_failure;
174 } 176 }
175 if (vlan->nr_ingress_mappings) { 177 if (vlan->nr_ingress_mappings) {
176 nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS); 178 nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
@@ -183,8 +185,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
183 185
184 m.from = i; 186 m.from = i;
185 m.to = vlan->ingress_priority_map[i]; 187 m.to = vlan->ingress_priority_map[i];
186 NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, 188 if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
187 sizeof(m), &m); 189 sizeof(m), &m))
190 goto nla_put_failure;
188 } 191 }
189 nla_nest_end(skb, nest); 192 nla_nest_end(skb, nest);
190 } 193 }
@@ -202,8 +205,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
202 205
203 m.from = pm->priority; 206 m.from = pm->priority;
204 m.to = (pm->vlan_qos >> 13) & 0x7; 207 m.to = (pm->vlan_qos >> 13) & 0x7;
205 NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, 208 if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
206 sizeof(m), &m); 209 sizeof(m), &m))
210 goto nla_put_failure;
207 } 211 }
208 } 212 }
209 nla_nest_end(skb, nest); 213 nla_nest_end(skb, nest);
diff --git a/net/9p/client.c b/net/9p/client.c
index 776618cd2be5..a170893d70e0 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -740,10 +740,18 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
740 c->status = Disconnected; 740 c->status = Disconnected;
741 goto reterr; 741 goto reterr;
742 } 742 }
743again:
743 /* Wait for the response */ 744 /* Wait for the response */
744 err = wait_event_interruptible(*req->wq, 745 err = wait_event_interruptible(*req->wq,
745 req->status >= REQ_STATUS_RCVD); 746 req->status >= REQ_STATUS_RCVD);
746 747
748 if ((err == -ERESTARTSYS) && (c->status == Connected)
749 && (type == P9_TFLUSH)) {
750 sigpending = 1;
751 clear_thread_flag(TIF_SIGPENDING);
752 goto again;
753 }
754
747 if (req->status == REQ_STATUS_ERROR) { 755 if (req->status == REQ_STATUS_ERROR) {
748 p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); 756 p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
749 err = req->t_err; 757 err = req->t_err;
@@ -1420,6 +1428,7 @@ int p9_client_clunk(struct p9_fid *fid)
1420 int err; 1428 int err;
1421 struct p9_client *clnt; 1429 struct p9_client *clnt;
1422 struct p9_req_t *req; 1430 struct p9_req_t *req;
1431 int retries = 0;
1423 1432
1424 if (!fid) { 1433 if (!fid) {
1425 pr_warn("%s (%d): Trying to clunk with NULL fid\n", 1434 pr_warn("%s (%d): Trying to clunk with NULL fid\n",
@@ -1428,7 +1437,9 @@ int p9_client_clunk(struct p9_fid *fid)
1428 return 0; 1437 return 0;
1429 } 1438 }
1430 1439
1431 p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid); 1440again:
1441 p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d (try %d)\n", fid->fid,
1442 retries);
1432 err = 0; 1443 err = 0;
1433 clnt = fid->clnt; 1444 clnt = fid->clnt;
1434 1445
@@ -1444,8 +1455,14 @@ int p9_client_clunk(struct p9_fid *fid)
1444error: 1455error:
1445 /* 1456 /*
1446 * Fid is not valid even after a failed clunk 1457 * Fid is not valid even after a failed clunk
1458 * If interrupted, retry once then give up and
1459 * leak fid until umount.
1447 */ 1460 */
1448 p9_fid_destroy(fid); 1461 if (err == -ERESTARTSYS) {
1462 if (retries++ == 0)
1463 goto again;
1464 } else
1465 p9_fid_destroy(fid);
1449 return err; 1466 return err;
1450} 1467}
1451EXPORT_SYMBOL(p9_client_clunk); 1468EXPORT_SYMBOL(p9_client_clunk);
@@ -1470,7 +1487,10 @@ int p9_client_remove(struct p9_fid *fid)
1470 1487
1471 p9_free_req(clnt, req); 1488 p9_free_req(clnt, req);
1472error: 1489error:
1473 p9_fid_destroy(fid); 1490 if (err == -ERESTARTSYS)
1491 p9_client_clunk(fid);
1492 else
1493 p9_fid_destroy(fid);
1474 return err; 1494 return err;
1475} 1495}
1476EXPORT_SYMBOL(p9_client_remove); 1496EXPORT_SYMBOL(p9_client_remove);
@@ -1510,7 +1530,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1510 1530
1511 1531
1512 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", 1532 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
1513 fid->fid, (long long unsigned) offset, count); 1533 fid->fid, (unsigned long long) offset, count);
1514 err = 0; 1534 err = 0;
1515 clnt = fid->clnt; 1535 clnt = fid->clnt;
1516 1536
@@ -1585,7 +1605,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1585 struct p9_req_t *req; 1605 struct p9_req_t *req;
1586 1606
1587 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n", 1607 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
1588 fid->fid, (long long unsigned) offset, count); 1608 fid->fid, (unsigned long long) offset, count);
1589 err = 0; 1609 err = 0;
1590 clnt = fid->clnt; 1610 clnt = fid->clnt;
1591 1611
@@ -2020,7 +2040,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
2020 char *dataptr; 2040 char *dataptr;
2021 2041
2022 p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", 2042 p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
2023 fid->fid, (long long unsigned) offset, count); 2043 fid->fid, (unsigned long long) offset, count);
2024 2044
2025 err = 0; 2045 err = 0;
2026 clnt = fid->clnt; 2046 clnt = fid->clnt;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index fccae26fa674..6449bae15702 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -513,7 +513,7 @@ error:
513 clear_bit(Wworksched, &m->wsched); 513 clear_bit(Wworksched, &m->wsched);
514} 514}
515 515
516static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 516static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
517{ 517{
518 struct p9_poll_wait *pwait = 518 struct p9_poll_wait *pwait =
519 container_of(wait, struct p9_poll_wait, wait); 519 container_of(wait, struct p9_poll_wait, wait);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 3d432068f627..5af18d11b518 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -615,7 +615,8 @@ static void p9_virtio_remove(struct virtio_device *vdev)
615{ 615{
616 struct virtio_chan *chan = vdev->priv; 616 struct virtio_chan *chan = vdev->priv;
617 617
618 BUG_ON(chan->inuse); 618 if (chan->inuse)
619 p9_virtio_close(chan->client);
619 vdev->config->del_vqs(vdev); 620 vdev->config->del_vqs(vdev);
620 621
621 mutex_lock(&virtio_9p_lock); 622 mutex_lock(&virtio_9p_lock);
diff --git a/net/Kconfig b/net/Kconfig
index e07272d0bb2d..245831bec09a 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -207,10 +207,10 @@ source "net/ipx/Kconfig"
207source "drivers/net/appletalk/Kconfig" 207source "drivers/net/appletalk/Kconfig"
208source "net/x25/Kconfig" 208source "net/x25/Kconfig"
209source "net/lapb/Kconfig" 209source "net/lapb/Kconfig"
210source "net/econet/Kconfig"
211source "net/wanrouter/Kconfig" 210source "net/wanrouter/Kconfig"
212source "net/phonet/Kconfig" 211source "net/phonet/Kconfig"
213source "net/ieee802154/Kconfig" 212source "net/ieee802154/Kconfig"
213source "net/mac802154/Kconfig"
214source "net/sched/Kconfig" 214source "net/sched/Kconfig"
215source "net/dcb/Kconfig" 215source "net/dcb/Kconfig"
216source "net/dns_resolver/Kconfig" 216source "net/dns_resolver/Kconfig"
@@ -246,9 +246,6 @@ config BQL
246 select DQL 246 select DQL
247 default y 247 default y
248 248
249config HAVE_BPF_JIT
250 bool
251
252config BPF_JIT 249config BPF_JIT
253 bool "enable BPF Just In Time compiler" 250 bool "enable BPF Just In Time compiler"
254 depends on HAVE_BPF_JIT 251 depends on HAVE_BPF_JIT
@@ -295,7 +292,7 @@ config NET_TCPPROBE
295 module will be called tcp_probe. 292 module will be called tcp_probe.
296 293
297config NET_DROP_MONITOR 294config NET_DROP_MONITOR
298 boolean "Network packet drop alerting service" 295 tristate "Network packet drop alerting service"
299 depends on INET && EXPERIMENTAL && TRACEPOINTS 296 depends on INET && EXPERIMENTAL && TRACEPOINTS
300 ---help--- 297 ---help---
301 This feature provides an alerting service to userspace in the 298 This feature provides an alerting service to userspace in the
@@ -340,3 +337,7 @@ source "net/nfc/Kconfig"
340 337
341 338
342endif # if NET 339endif # if NET
340
341# Used by archs to tell that they support BPF_JIT
342config HAVE_BPF_JIT
343 bool
diff --git a/net/Makefile b/net/Makefile
index ad432fa4d934..4f4ee083064c 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/
40obj-$(CONFIG_ATM) += atm/ 40obj-$(CONFIG_ATM) += atm/
41obj-$(CONFIG_L2TP) += l2tp/ 41obj-$(CONFIG_L2TP) += l2tp/
42obj-$(CONFIG_DECNET) += decnet/ 42obj-$(CONFIG_DECNET) += decnet/
43obj-$(CONFIG_ECONET) += econet/
44obj-$(CONFIG_PHONET) += phonet/ 43obj-$(CONFIG_PHONET) += phonet/
45ifneq ($(CONFIG_VLAN_8021Q),) 44ifneq ($(CONFIG_VLAN_8021Q),)
46obj-y += 8021q/ 45obj-y += 8021q/
@@ -60,6 +59,7 @@ ifneq ($(CONFIG_DCB),)
60obj-y += dcb/ 59obj-y += dcb/
61endif 60endif
62obj-$(CONFIG_IEEE802154) += ieee802154/ 61obj-$(CONFIG_IEEE802154) += ieee802154/
62obj-$(CONFIG_MAC802154) += mac802154/
63 63
64ifeq ($(CONFIG_NET),y) 64ifeq ($(CONFIG_NET),y)
65obj-$(CONFIG_SYSCTL) += sysctl_net.o 65obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bfa9ab93eda5..0301b328cf0f 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -63,7 +63,7 @@
63#include <net/tcp_states.h> 63#include <net/tcp_states.h>
64#include <net/route.h> 64#include <net/route.h>
65#include <linux/atalk.h> 65#include <linux/atalk.h>
66#include "../core/kmap_skb.h" 66#include <linux/highmem.h>
67 67
68struct datalink_proto *ddp_dl, *aarp_dl; 68struct datalink_proto *ddp_dl, *aarp_dl;
69static const struct proto_ops atalk_dgram_ops; 69static const struct proto_ops atalk_dgram_ops;
@@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
960 960
961 if (copy > len) 961 if (copy > len)
962 copy = len; 962 copy = len;
963 vaddr = kmap_skb_frag(frag); 963 vaddr = kmap_atomic(skb_frag_page(frag));
964 sum = atalk_sum_partial(vaddr + frag->page_offset + 964 sum = atalk_sum_partial(vaddr + frag->page_offset +
965 offset - start, copy, sum); 965 offset - start, copy, sum);
966 kunmap_skb_frag(vaddr); 966 kunmap_atomic(vaddr);
967 967
968 if (!(len -= copy)) 968 if (!(len -= copy))
969 return sum; 969 return sum;
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index 04e9c0da7aa9..ebb864361f7a 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -42,20 +42,14 @@ static struct ctl_table atalk_table[] = {
42 { }, 42 { },
43}; 43};
44 44
45static struct ctl_path atalk_path[] = {
46 { .procname = "net", },
47 { .procname = "appletalk", },
48 { }
49};
50
51static struct ctl_table_header *atalk_table_header; 45static struct ctl_table_header *atalk_table_header;
52 46
53void atalk_register_sysctl(void) 47void atalk_register_sysctl(void)
54{ 48{
55 atalk_table_header = register_sysctl_paths(atalk_path, atalk_table); 49 atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
56} 50}
57 51
58void atalk_unregister_sysctl(void) 52void atalk_unregister_sysctl(void)
59{ 53{
60 unregister_sysctl_table(atalk_table_header); 54 unregister_net_sysctl_table(atalk_table_header);
61} 55}
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 353fccf1cde3..4819d31533e0 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -73,7 +73,7 @@ struct br2684_vcc {
73#ifdef CONFIG_ATM_BR2684_IPFILTER 73#ifdef CONFIG_ATM_BR2684_IPFILTER
74 struct br2684_filter filter; 74 struct br2684_filter filter;
75#endif /* CONFIG_ATM_BR2684_IPFILTER */ 75#endif /* CONFIG_ATM_BR2684_IPFILTER */
76 unsigned copies_needed, copies_failed; 76 unsigned int copies_needed, copies_failed;
77}; 77};
78 78
79struct br2684_dev { 79struct br2684_dev {
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 127fe70a1baa..8ae3a7879335 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -37,7 +37,6 @@
37#include <linux/param.h> /* for HZ */ 37#include <linux/param.h> /* for HZ */
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39#include <asm/byteorder.h> /* for htons etc. */ 39#include <asm/byteorder.h> /* for htons etc. */
40#include <asm/system.h> /* save/restore_flags */
41#include <linux/atomic.h> 40#include <linux/atomic.h>
42 41
43#include "common.h" 42#include "common.h"
@@ -330,6 +329,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
330 struct atmarp_entry *entry; 329 struct atmarp_entry *entry;
331 struct neighbour *n; 330 struct neighbour *n;
332 struct atm_vcc *vcc; 331 struct atm_vcc *vcc;
332 struct rtable *rt;
333 __be32 *daddr;
333 int old; 334 int old;
334 unsigned long flags; 335 unsigned long flags;
335 336
@@ -340,7 +341,12 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
340 dev->stats.tx_dropped++; 341 dev->stats.tx_dropped++;
341 return NETDEV_TX_OK; 342 return NETDEV_TX_OK;
342 } 343 }
343 n = dst_get_neighbour_noref(dst); 344 rt = (struct rtable *) dst;
345 if (rt->rt_gateway)
346 daddr = &rt->rt_gateway;
347 else
348 daddr = &ip_hdr(skb)->daddr;
349 n = dst_neigh_lookup(dst, daddr);
344 if (!n) { 350 if (!n) {
345 pr_err("NO NEIGHBOUR !\n"); 351 pr_err("NO NEIGHBOUR !\n");
346 dev_kfree_skb(skb); 352 dev_kfree_skb(skb);
@@ -360,7 +366,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
360 dev_kfree_skb(skb); 366 dev_kfree_skb(skb);
361 dev->stats.tx_dropped++; 367 dev->stats.tx_dropped++;
362 } 368 }
363 return NETDEV_TX_OK; 369 goto out_release_neigh;
364 } 370 }
365 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); 371 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
366 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; 372 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
@@ -379,14 +385,14 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
379 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ 385 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
380 if (old) { 386 if (old) {
381 pr_warning("XOFF->XOFF transition\n"); 387 pr_warning("XOFF->XOFF transition\n");
382 return NETDEV_TX_OK; 388 goto out_release_neigh;
383 } 389 }
384 dev->stats.tx_packets++; 390 dev->stats.tx_packets++;
385 dev->stats.tx_bytes += skb->len; 391 dev->stats.tx_bytes += skb->len;
386 vcc->send(vcc, skb); 392 vcc->send(vcc, skb);
387 if (atm_may_send(vcc, 0)) { 393 if (atm_may_send(vcc, 0)) {
388 entry->vccs->xoff = 0; 394 entry->vccs->xoff = 0;
389 return NETDEV_TX_OK; 395 goto out_release_neigh;
390 } 396 }
391 spin_lock_irqsave(&clip_priv->xoff_lock, flags); 397 spin_lock_irqsave(&clip_priv->xoff_lock, flags);
392 netif_stop_queue(dev); /* XOFF -> throttle immediately */ 398 netif_stop_queue(dev); /* XOFF -> throttle immediately */
@@ -398,6 +404,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
398 of the brief netif_stop_queue. If this isn't true or if it 404 of the brief netif_stop_queue. If this isn't true or if it
399 changes, use netif_wake_queue instead. */ 405 changes, use netif_wake_queue instead. */
400 spin_unlock_irqrestore(&clip_priv->xoff_lock, flags); 406 spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
407out_release_neigh:
408 neigh_release(n);
401 return NETDEV_TX_OK; 409 return NETDEV_TX_OK;
402} 410}
403 411
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 62dc8bfe6fe7..bbd3b639992e 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -97,9 +97,8 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
97 error = sock_get_timestampns(sk, argp); 97 error = sock_get_timestampns(sk, argp);
98 goto done; 98 goto done;
99 case ATM_SETSC: 99 case ATM_SETSC:
100 if (net_ratelimit()) 100 net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n",
101 pr_warning("ATM_SETSC is obsolete; used by %s:%d\n", 101 current->comm, task_pid_nr(current));
102 current->comm, task_pid_nr(current));
103 error = 0; 102 error = 0;
104 goto done; 103 goto done;
105 case ATMSIGD_CTRL: 104 case ATMSIGD_CTRL:
@@ -123,8 +122,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
123 work for 32-bit userspace. TBH I don't really want 122 work for 32-bit userspace. TBH I don't really want
124 to think about it at all. dwmw2. */ 123 to think about it at all. dwmw2. */
125 if (compat) { 124 if (compat) {
126 if (net_ratelimit()) 125 net_warn_ratelimited("32-bit task cannot be atmsigd\n");
127 pr_warning("32-bit task cannot be atmsigd\n");
128 error = -EINVAL; 126 error = -EINVAL;
129 goto done; 127 goto done;
130 } 128 }
diff --git a/net/atm/lec.c b/net/atm/lec.c
index f1964caa0f83..a7d172105c99 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -26,11 +26,6 @@
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28 28
29/* TokenRing if needed */
30#ifdef CONFIG_TR
31#include <linux/trdevice.h>
32#endif
33
34/* And atm device */ 29/* And atm device */
35#include <linux/atmdev.h> 30#include <linux/atmdev.h>
36#include <linux/atmlec.h> 31#include <linux/atmlec.h>
@@ -163,50 +158,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
163#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 158#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
164 159
165/* 160/*
166 * Modelled after tr_type_trans
167 * All multicast and ARE or STE frames go to BUS.
168 * Non source routed frames go by destination address.
169 * Last hop source routed frames go by destination address.
170 * Not last hop source routed frames go by _next_ route descriptor.
171 * Returns pointer to destination MAC address or fills in rdesc
172 * and returns NULL.
173 */
174#ifdef CONFIG_TR
175static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
176{
177 struct trh_hdr *trh;
178 unsigned int riflen, num_rdsc;
179
180 trh = (struct trh_hdr *)packet;
181 if (trh->daddr[0] & (uint8_t) 0x80)
182 return bus_mac; /* multicast */
183
184 if (trh->saddr[0] & TR_RII) {
185 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
186 if ((ntohs(trh->rcf) >> 13) != 0)
187 return bus_mac; /* ARE or STE */
188 } else
189 return trh->daddr; /* not source routed */
190
191 if (riflen < 6)
192 return trh->daddr; /* last hop, source routed */
193
194 /* riflen is 6 or more, packet has more than one route descriptor */
195 num_rdsc = (riflen / 2) - 1;
196 memset(rdesc, 0, ETH_ALEN);
197 /* offset 4 comes from LAN destination field in LE control frames */
198 if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
199 memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16));
200 else {
201 memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16));
202 rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
203 }
204
205 return NULL;
206}
207#endif /* CONFIG_TR */
208
209/*
210 * Open/initialize the netdevice. This is called (in the current kernel) 161 * Open/initialize the netdevice. This is called (in the current kernel)
211 * sometime after booting when the 'ifconfig' program is run. 162 * sometime after booting when the 'ifconfig' program is run.
212 * 163 *
@@ -257,9 +208,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
257 struct lec_arp_table *entry; 208 struct lec_arp_table *entry;
258 unsigned char *dst; 209 unsigned char *dst;
259 int min_frame_size; 210 int min_frame_size;
260#ifdef CONFIG_TR
261 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
262#endif
263 int is_rdesc; 211 int is_rdesc;
264 212
265 pr_debug("called\n"); 213 pr_debug("called\n");
@@ -290,24 +238,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
290 } 238 }
291 skb_push(skb, 2); 239 skb_push(skb, 2);
292 240
293 /* Put le header to place, works for TokenRing too */ 241 /* Put le header to place */
294 lec_h = (struct lecdatahdr_8023 *)skb->data; 242 lec_h = (struct lecdatahdr_8023 *)skb->data;
295 lec_h->le_header = htons(priv->lecid); 243 lec_h->le_header = htons(priv->lecid);
296 244
297#ifdef CONFIG_TR
298 /*
299 * Ugly. Use this to realign Token Ring packets for
300 * e.g. PCA-200E driver.
301 */
302 if (priv->is_trdev) {
303 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
304 kfree_skb(skb);
305 if (skb2 == NULL)
306 return NETDEV_TX_OK;
307 skb = skb2;
308 }
309#endif
310
311#if DUMP_PACKETS >= 2 245#if DUMP_PACKETS >= 2
312#define MAX_DUMP_SKB 99 246#define MAX_DUMP_SKB 99
313#elif DUMP_PACKETS >= 1 247#elif DUMP_PACKETS >= 1
@@ -321,12 +255,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
321#endif /* DUMP_PACKETS >= 1 */ 255#endif /* DUMP_PACKETS >= 1 */
322 256
323 /* Minimum ethernet-frame size */ 257 /* Minimum ethernet-frame size */
324#ifdef CONFIG_TR 258 min_frame_size = LEC_MINIMUM_8023_SIZE;
325 if (priv->is_trdev)
326 min_frame_size = LEC_MINIMUM_8025_SIZE;
327 else
328#endif
329 min_frame_size = LEC_MINIMUM_8023_SIZE;
330 if (skb->len < min_frame_size) { 259 if (skb->len < min_frame_size) {
331 if ((skb->len + skb_tailroom(skb)) < min_frame_size) { 260 if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
332 skb2 = skb_copy_expand(skb, 0, 261 skb2 = skb_copy_expand(skb, 0,
@@ -345,15 +274,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
345 /* Send to right vcc */ 274 /* Send to right vcc */
346 is_rdesc = 0; 275 is_rdesc = 0;
347 dst = lec_h->h_dest; 276 dst = lec_h->h_dest;
348#ifdef CONFIG_TR
349 if (priv->is_trdev) {
350 dst = get_tr_dst(skb->data + 2, rdesc);
351 if (dst == NULL) {
352 dst = rdesc;
353 is_rdesc = 1;
354 }
355 }
356#endif
357 entry = NULL; 277 entry = NULL;
358 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); 278 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
359 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", 279 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
@@ -710,12 +630,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
710 dev_kfree_skb(skb); 630 dev_kfree_skb(skb);
711 return; 631 return;
712 } 632 }
713#ifdef CONFIG_TR 633 dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
714 if (priv->is_trdev)
715 dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest;
716 else
717#endif
718 dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
719 634
720 /* 635 /*
721 * If this is a Data Direct VCC, and the VCC does not match 636 * If this is a Data Direct VCC, and the VCC does not match
@@ -723,16 +638,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
723 */ 638 */
724 spin_lock_irqsave(&priv->lec_arp_lock, flags); 639 spin_lock_irqsave(&priv->lec_arp_lock, flags);
725 if (lec_is_data_direct(vcc)) { 640 if (lec_is_data_direct(vcc)) {
726#ifdef CONFIG_TR 641 src = ((struct lecdatahdr_8023 *)skb->data)->h_source;
727 if (priv->is_trdev)
728 src =
729 ((struct lecdatahdr_8025 *)skb->data)->
730 h_source;
731 else
732#endif
733 src =
734 ((struct lecdatahdr_8023 *)skb->data)->
735 h_source;
736 entry = lec_arp_find(priv, src); 642 entry = lec_arp_find(priv, src);
737 if (entry && entry->vcc != vcc) { 643 if (entry && entry->vcc != vcc) {
738 lec_arp_remove(priv, entry); 644 lec_arp_remove(priv, entry);
@@ -750,12 +656,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
750 if (!hlist_empty(&priv->lec_arp_empty_ones)) 656 if (!hlist_empty(&priv->lec_arp_empty_ones))
751 lec_arp_check_empties(priv, vcc, skb); 657 lec_arp_check_empties(priv, vcc, skb);
752 skb_pull(skb, 2); /* skip lec_id */ 658 skb_pull(skb, 2); /* skip lec_id */
753#ifdef CONFIG_TR 659 skb->protocol = eth_type_trans(skb, dev);
754 if (priv->is_trdev)
755 skb->protocol = tr_type_trans(skb, dev);
756 else
757#endif
758 skb->protocol = eth_type_trans(skb, dev);
759 dev->stats.rx_packets++; 660 dev->stats.rx_packets++;
760 dev->stats.rx_bytes += skb->len; 661 dev->stats.rx_bytes += skb->len;
761 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); 662 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
@@ -827,27 +728,13 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
827 i = 0; 728 i = 0;
828 else 729 else
829 i = arg; 730 i = arg;
830#ifdef CONFIG_TR
831 if (arg >= MAX_LEC_ITF) 731 if (arg >= MAX_LEC_ITF)
832 return -EINVAL; 732 return -EINVAL;
833#else /* Reserve the top NUM_TR_DEVS for TR */
834 if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS))
835 return -EINVAL;
836#endif
837 if (!dev_lec[i]) { 733 if (!dev_lec[i]) {
838 int is_trdev, size; 734 int size;
839
840 is_trdev = 0;
841 if (i >= (MAX_LEC_ITF - NUM_TR_DEVS))
842 is_trdev = 1;
843 735
844 size = sizeof(struct lec_priv); 736 size = sizeof(struct lec_priv);
845#ifdef CONFIG_TR 737 dev_lec[i] = alloc_etherdev(size);
846 if (is_trdev)
847 dev_lec[i] = alloc_trdev(size);
848 else
849#endif
850 dev_lec[i] = alloc_etherdev(size);
851 if (!dev_lec[i]) 738 if (!dev_lec[i])
852 return -ENOMEM; 739 return -ENOMEM;
853 dev_lec[i]->netdev_ops = &lec_netdev_ops; 740 dev_lec[i]->netdev_ops = &lec_netdev_ops;
@@ -858,7 +745,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
858 } 745 }
859 746
860 priv = netdev_priv(dev_lec[i]); 747 priv = netdev_priv(dev_lec[i]);
861 priv->is_trdev = is_trdev;
862 } else { 748 } else {
863 priv = netdev_priv(dev_lec[i]); 749 priv = netdev_priv(dev_lec[i]);
864 if (priv->lecd) 750 if (priv->lecd)
@@ -1255,7 +1141,7 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1255 struct sk_buff *skb; 1141 struct sk_buff *skb;
1256 struct lec_priv *priv = netdev_priv(dev); 1142 struct lec_priv *priv = netdev_priv(dev);
1257 1143
1258 if (compare_ether_addr(lan_dst, dev->dev_addr)) 1144 if (!ether_addr_equal(lan_dst, dev->dev_addr))
1259 return 0; /* not our mac address */ 1145 return 0; /* not our mac address */
1260 1146
1261 kfree(priv->tlvs); /* NULL if there was no previous association */ 1147 kfree(priv->tlvs); /* NULL if there was no previous association */
@@ -1662,7 +1548,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1662 1548
1663 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; 1549 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
1664 hlist_for_each_entry(entry, node, head, next) { 1550 hlist_for_each_entry(entry, node, head, next) {
1665 if (!compare_ether_addr(mac_addr, entry->mac_addr)) 1551 if (ether_addr_equal(mac_addr, entry->mac_addr))
1666 return entry; 1552 return entry;
1667 } 1553 }
1668 return NULL; 1554 return NULL;
@@ -1849,7 +1735,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1849 case 1: 1735 case 1:
1850 return priv->mcast_vcc; 1736 return priv->mcast_vcc;
1851 case 2: /* LANE2 wants arp for multicast addresses */ 1737 case 2: /* LANE2 wants arp for multicast addresses */
1852 if (!compare_ether_addr(mac_to_find, bus_mac)) 1738 if (ether_addr_equal(mac_to_find, bus_mac))
1853 return priv->mcast_vcc; 1739 return priv->mcast_vcc;
1854 break; 1740 break;
1855 default: 1741 default:
@@ -2372,15 +2258,7 @@ lec_arp_check_empties(struct lec_priv *priv,
2372 struct hlist_node *node, *next; 2258 struct hlist_node *node, *next;
2373 struct lec_arp_table *entry, *tmp; 2259 struct lec_arp_table *entry, *tmp;
2374 struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; 2260 struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
2375 unsigned char *src; 2261 unsigned char *src = hdr->h_source;
2376#ifdef CONFIG_TR
2377 struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data;
2378
2379 if (priv->is_trdev)
2380 src = tr_hdr->h_source;
2381 else
2382#endif
2383 src = hdr->h_source;
2384 2262
2385 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2263 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2386 hlist_for_each_entry_safe(entry, node, next, 2264 hlist_for_each_entry_safe(entry, node, next,
diff --git a/net/atm/lec.h b/net/atm/lec.h
index dfc071966463..a86aff9a3c04 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -55,11 +55,11 @@ struct lane2_ops {
55 * frames. 55 * frames.
56 * 56 *
57 * 1. Dix Ethernet EtherType frames encoded by placing EtherType 57 * 1. Dix Ethernet EtherType frames encoded by placing EtherType
58 * field in h_type field. Data follows immediatelly after header. 58 * field in h_type field. Data follows immediately after header.
59 * 2. LLC Data frames whose total length, including LLC field and data, 59 * 2. LLC Data frames whose total length, including LLC field and data,
60 * but not padding required to meet the minimum data frame length, 60 * but not padding required to meet the minimum data frame length,
61 * is less than 1536(0x0600) MUST be encoded by placing that length 61 * is less than 1536(0x0600) MUST be encoded by placing that length
62 * in the h_type field. The LLC field follows header immediatelly. 62 * in the h_type field. The LLC field follows header immediately.
63 * 3. LLC data frames longer than this maximum MUST be encoded by placing 63 * 3. LLC data frames longer than this maximum MUST be encoded by placing
64 * the value 0 in the h_type field. 64 * the value 0 in the h_type field.
65 * 65 *
@@ -142,7 +142,6 @@ struct lec_priv {
142 int itfnum; /* e.g. 2 for lec2, 5 for lec5 */ 142 int itfnum; /* e.g. 2 for lec2, 5 for lec5 */
143 struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */ 143 struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */
144 int is_proxy; /* bridge between ATM and Ethernet */ 144 int is_proxy; /* bridge between ATM and Ethernet */
145 int is_trdev; /* Device type, 0 = Ethernet, 1 = TokenRing */
146}; 145};
147 146
148struct lec_vcc_priv { 147struct lec_vcc_priv {
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index aa972409f093..d4cc1be5c364 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -592,8 +592,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
592 goto non_ip; 592 goto non_ip;
593 593
594 while (i < mpc->number_of_mps_macs) { 594 while (i < mpc->number_of_mps_macs) {
595 if (!compare_ether_addr(eth->h_dest, 595 if (ether_addr_equal(eth->h_dest, mpc->mps_macs + i * ETH_ALEN))
596 (mpc->mps_macs + i*ETH_ALEN)))
597 if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */ 596 if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
598 return NETDEV_TX_OK; 597 return NETDEV_TX_OK;
599 i++; 598 i++;
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 53e500292271..5bdd300db0f7 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -207,7 +207,7 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
207 size_t nbytes, loff_t *ppos) 207 size_t nbytes, loff_t *ppos)
208{ 208{
209 char *page, *p; 209 char *page, *p;
210 unsigned len; 210 unsigned int len;
211 211
212 if (nbytes == 0) 212 if (nbytes == 0)
213 return 0; 213 return 0;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index df35d9a3b5fe..ce1e59fdae7b 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -44,7 +44,7 @@
44#include <linux/atmdev.h> 44#include <linux/atmdev.h>
45#include <linux/capability.h> 45#include <linux/capability.h>
46#include <linux/ppp_defs.h> 46#include <linux/ppp_defs.h>
47#include <linux/if_ppp.h> 47#include <linux/ppp-ioctl.h>
48#include <linux/ppp_channel.h> 48#include <linux/ppp_channel.h>
49#include <linux/atmppp.h> 49#include <linux/atmppp.h>
50 50
@@ -62,12 +62,25 @@ struct pppoatm_vcc {
62 void (*old_pop)(struct atm_vcc *, struct sk_buff *); 62 void (*old_pop)(struct atm_vcc *, struct sk_buff *);
63 /* keep old push/pop for detaching */ 63 /* keep old push/pop for detaching */
64 enum pppoatm_encaps encaps; 64 enum pppoatm_encaps encaps;
65 atomic_t inflight;
66 unsigned long blocked;
65 int flags; /* SC_COMP_PROT - compress protocol */ 67 int flags; /* SC_COMP_PROT - compress protocol */
66 struct ppp_channel chan; /* interface to generic ppp layer */ 68 struct ppp_channel chan; /* interface to generic ppp layer */
67 struct tasklet_struct wakeup_tasklet; 69 struct tasklet_struct wakeup_tasklet;
68}; 70};
69 71
70/* 72/*
73 * We want to allow two packets in the queue. The one that's currently in
74 * flight, and *one* queued up ready for the ATM device to send immediately
75 * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
76 * inflight == -2 represents an empty queue, -1 one packet, and zero means
77 * there are two packets in the queue.
78 */
79#define NONE_INFLIGHT -2
80
81#define BLOCKED 0
82
83/*
71 * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol 84 * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
72 * ID (0xC021) used in autodetection 85 * ID (0xC021) used in autodetection
73 */ 86 */
@@ -102,16 +115,30 @@ static void pppoatm_wakeup_sender(unsigned long arg)
102static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb) 115static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
103{ 116{
104 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); 117 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
118
105 pvcc->old_pop(atmvcc, skb); 119 pvcc->old_pop(atmvcc, skb);
120 atomic_dec(&pvcc->inflight);
121
106 /* 122 /*
107 * We don't really always want to do this since it's 123 * We always used to run the wakeup tasklet unconditionally here, for
108 * really inefficient - it would be much better if we could 124 * fear of race conditions where we clear the BLOCKED flag just as we
109 * test if we had actually throttled the generic layer. 125 * refuse another packet in pppoatm_send(). This was quite inefficient.
110 * Unfortunately then there would be a nasty SMP race where 126 *
111 * we could clear that flag just as we refuse another packet. 127 * In fact it's OK. The PPP core will only ever call pppoatm_send()
112 * For now we do the safe thing. 128 * while holding the channel->downl lock. And ppp_output_wakeup() as
129 * called by the tasklet will *also* grab that lock. So even if another
130 * CPU is in pppoatm_send() right now, the tasklet isn't going to race
131 * with it. The wakeup *will* happen after the other CPU is safely out
132 * of pppoatm_send() again.
133 *
134 * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
135 * it about to return, that's fine. We trigger a wakeup which will
136 * happen later. And if the CPU in pppoatm_send() *hasn't* set the
137 * BLOCKED bit yet, that's fine too because of the double check in
138 * pppoatm_may_send() which is commented there.
113 */ 139 */
114 tasklet_schedule(&pvcc->wakeup_tasklet); 140 if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
141 tasklet_schedule(&pvcc->wakeup_tasklet);
115} 142}
116 143
117/* 144/*
@@ -184,6 +211,51 @@ error:
184 ppp_input_error(&pvcc->chan, 0); 211 ppp_input_error(&pvcc->chan, 0);
185} 212}
186 213
214static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
215{
216 /*
217 * It's not clear that we need to bother with using atm_may_send()
218 * to check we don't exceed sk->sk_sndbuf. If userspace sets a
219 * value of sk_sndbuf which is lower than the MTU, we're going to
220 * block for ever. But the code always did that before we introduced
221 * the packet count limit, so...
222 */
223 if (atm_may_send(pvcc->atmvcc, size) &&
224 atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
225 return 1;
226
227 /*
228 * We use test_and_set_bit() rather than set_bit() here because
229 * we need to ensure there's a memory barrier after it. The bit
230 * *must* be set before we do the atomic_inc() on pvcc->inflight.
231 * There's no smp_mb__after_set_bit(), so it's this or abuse
232 * smp_mb__after_clear_bit().
233 */
234 test_and_set_bit(BLOCKED, &pvcc->blocked);
235
236 /*
237 * We may have raced with pppoatm_pop(). If it ran for the
238 * last packet in the queue, *just* before we set the BLOCKED
239 * bit, then it might never run again and the channel could
240 * remain permanently blocked. Cope with that race by checking
241 * *again*. If it did run in that window, we'll have space on
242 * the queue now and can return success. It's harmless to leave
243 * the BLOCKED flag set, since it's only used as a trigger to
244 * run the wakeup tasklet. Another wakeup will never hurt.
245 * If pppoatm_pop() is running but hasn't got as far as making
246 * space on the queue yet, then it hasn't checked the BLOCKED
247 * flag yet either, so we're safe in that case too. It'll issue
248 * an "immediate" wakeup... where "immediate" actually involves
249 * taking the PPP channel's ->downl lock, which is held by the
250 * code path that calls pppoatm_send(), and is thus going to
251 * wait for us to finish.
252 */
253 if (atm_may_send(pvcc->atmvcc, size) &&
254 atomic_inc_not_zero(&pvcc->inflight))
255 return 1;
256
257 return 0;
258}
187/* 259/*
188 * Called by the ppp_generic.c to send a packet - returns true if packet 260 * Called by the ppp_generic.c to send a packet - returns true if packet
189 * was accepted. If we return false, then it's our job to call 261 * was accepted. If we return false, then it's our job to call
@@ -207,7 +279,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
207 struct sk_buff *n; 279 struct sk_buff *n;
208 n = skb_realloc_headroom(skb, LLC_LEN); 280 n = skb_realloc_headroom(skb, LLC_LEN);
209 if (n != NULL && 281 if (n != NULL &&
210 !atm_may_send(pvcc->atmvcc, n->truesize)) { 282 !pppoatm_may_send(pvcc, n->truesize)) {
211 kfree_skb(n); 283 kfree_skb(n);
212 goto nospace; 284 goto nospace;
213 } 285 }
@@ -215,12 +287,12 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
215 skb = n; 287 skb = n;
216 if (skb == NULL) 288 if (skb == NULL)
217 return DROP_PACKET; 289 return DROP_PACKET;
218 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) 290 } else if (!pppoatm_may_send(pvcc, skb->truesize))
219 goto nospace; 291 goto nospace;
220 memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN); 292 memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
221 break; 293 break;
222 case e_vc: 294 case e_vc:
223 if (!atm_may_send(pvcc->atmvcc, skb->truesize)) 295 if (!pppoatm_may_send(pvcc, skb->truesize))
224 goto nospace; 296 goto nospace;
225 break; 297 break;
226 case e_autodetect: 298 case e_autodetect:
@@ -285,6 +357,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
285 if (pvcc == NULL) 357 if (pvcc == NULL)
286 return -ENOMEM; 358 return -ENOMEM;
287 pvcc->atmvcc = atmvcc; 359 pvcc->atmvcc = atmvcc;
360
361 /* Maximum is zero, so that we can use atomic_inc_not_zero() */
362 atomic_set(&pvcc->inflight, NONE_INFLIGHT);
288 pvcc->old_push = atmvcc->push; 363 pvcc->old_push = atmvcc->push;
289 pvcc->old_pop = atmvcc->pop; 364 pvcc->old_pop = atmvcc->pop;
290 pvcc->encaps = (enum pppoatm_encaps) be.encaps; 365 pvcc->encaps = (enum pppoatm_encaps) be.encaps;
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 509c8ac02b63..86767ca908a3 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -166,7 +166,7 @@ void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
166{ 166{
167 struct sk_buff *skb; 167 struct sk_buff *skb;
168 struct atmsvc_msg *msg; 168 struct atmsvc_msg *msg;
169 static unsigned session = 0; 169 static unsigned int session = 0;
170 170
171 pr_debug("%d (0x%p)\n", (int)type, vcc); 171 pr_debug("%d (0x%p)\n", (int)type, vcc);
172 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL))) 172 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 3cd0a0dc91cb..051f7abae66d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -33,7 +33,6 @@
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <net/sock.h> 34#include <net/sock.h>
35#include <asm/uaccess.h> 35#include <asm/uaccess.h>
36#include <asm/system.h>
37#include <linux/fcntl.h> 36#include <linux/fcntl.h>
38#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 37#include <linux/termios.h> /* For TIOCINQ/OUTQ */
39#include <linux/mm.h> 38#include <linux/mm.h>
@@ -1991,7 +1990,6 @@ static int __init ax25_init(void)
1991 sock_register(&ax25_family_ops); 1990 sock_register(&ax25_family_ops);
1992 dev_add_pack(&ax25_packet_type); 1991 dev_add_pack(&ax25_packet_type);
1993 register_netdevice_notifier(&ax25_dev_notifier); 1992 register_netdevice_notifier(&ax25_dev_notifier);
1994 ax25_register_sysctl();
1995 1993
1996 proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops); 1994 proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops);
1997 proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops); 1995 proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops);
@@ -2012,16 +2010,16 @@ static void __exit ax25_exit(void)
2012 proc_net_remove(&init_net, "ax25_route"); 2010 proc_net_remove(&init_net, "ax25_route");
2013 proc_net_remove(&init_net, "ax25"); 2011 proc_net_remove(&init_net, "ax25");
2014 proc_net_remove(&init_net, "ax25_calls"); 2012 proc_net_remove(&init_net, "ax25_calls");
2015 ax25_rt_free();
2016 ax25_uid_free();
2017 ax25_dev_free();
2018 2013
2019 ax25_unregister_sysctl();
2020 unregister_netdevice_notifier(&ax25_dev_notifier); 2014 unregister_netdevice_notifier(&ax25_dev_notifier);
2021 2015
2022 dev_remove_pack(&ax25_packet_type); 2016 dev_remove_pack(&ax25_packet_type);
2023 2017
2024 sock_unregister(PF_AX25); 2018 sock_unregister(PF_AX25);
2025 proto_unregister(&ax25_proto); 2019 proto_unregister(&ax25_proto);
2020
2021 ax25_rt_free();
2022 ax25_uid_free();
2023 ax25_dev_free();
2026} 2024}
2027module_exit(ax25_exit); 2025module_exit(ax25_exit);
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 7e7964dd987b..9162409559cf 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -22,7 +22,6 @@
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/system.h>
26#include <linux/fcntl.h> 25#include <linux/fcntl.h>
27#include <linux/mm.h> 26#include <linux/mm.h>
28#include <linux/interrupt.h> 27#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index c1cb982f6e86..3d106767b272 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -24,7 +24,6 @@
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <net/sock.h> 25#include <net/sock.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/system.h>
28#include <linux/fcntl.h> 27#include <linux/fcntl.h>
29#include <linux/mm.h> 28#include <linux/mm.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
@@ -60,8 +59,6 @@ void ax25_dev_device_up(struct net_device *dev)
60 return; 59 return;
61 } 60 }
62 61
63 ax25_unregister_sysctl();
64
65 dev->ax25_ptr = ax25_dev; 62 dev->ax25_ptr = ax25_dev;
66 ax25_dev->dev = dev; 63 ax25_dev->dev = dev;
67 dev_hold(dev); 64 dev_hold(dev);
@@ -91,7 +88,7 @@ void ax25_dev_device_up(struct net_device *dev)
91 ax25_dev_list = ax25_dev; 88 ax25_dev_list = ax25_dev;
92 spin_unlock_bh(&ax25_dev_lock); 89 spin_unlock_bh(&ax25_dev_lock);
93 90
94 ax25_register_sysctl(); 91 ax25_register_dev_sysctl(ax25_dev);
95} 92}
96 93
97void ax25_dev_device_down(struct net_device *dev) 94void ax25_dev_device_down(struct net_device *dev)
@@ -101,7 +98,7 @@ void ax25_dev_device_down(struct net_device *dev)
101 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) 98 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
102 return; 99 return;
103 100
104 ax25_unregister_sysctl(); 101 ax25_unregister_dev_sysctl(ax25_dev);
105 102
106 spin_lock_bh(&ax25_dev_lock); 103 spin_lock_bh(&ax25_dev_lock);
107 104
@@ -121,7 +118,6 @@ void ax25_dev_device_down(struct net_device *dev)
121 spin_unlock_bh(&ax25_dev_lock); 118 spin_unlock_bh(&ax25_dev_lock);
122 dev_put(dev); 119 dev_put(dev);
123 kfree(ax25_dev); 120 kfree(ax25_dev);
124 ax25_register_sysctl();
125 return; 121 return;
126 } 122 }
127 123
@@ -131,7 +127,6 @@ void ax25_dev_device_down(struct net_device *dev)
131 spin_unlock_bh(&ax25_dev_lock); 127 spin_unlock_bh(&ax25_dev_lock);
132 dev_put(dev); 128 dev_put(dev);
133 kfree(ax25_dev); 129 kfree(ax25_dev);
134 ax25_register_sysctl();
135 return; 130 return;
136 } 131 }
137 132
@@ -139,8 +134,6 @@ void ax25_dev_device_down(struct net_device *dev)
139 } 134 }
140 spin_unlock_bh(&ax25_dev_lock); 135 spin_unlock_bh(&ax25_dev_lock);
141 dev->ax25_ptr = NULL; 136 dev->ax25_ptr = NULL;
142
143 ax25_register_sysctl();
144} 137}
145 138
146int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) 139int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
diff --git a/net/ax25/ax25_ds_in.c b/net/ax25/ax25_ds_in.c
index 8273b1200eee..9bd31e88aeca 100644
--- a/net/ax25/ax25_ds_in.c
+++ b/net/ax25/ax25_ds_in.c
@@ -23,7 +23,6 @@
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/system.h>
27#include <linux/fcntl.h> 26#include <linux/fcntl.h>
28#include <linux/mm.h> 27#include <linux/mm.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index 85816e612dc0..5ea7fd3e2af9 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -24,7 +24,6 @@
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <net/sock.h> 25#include <net/sock.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/system.h>
28#include <linux/fcntl.h> 27#include <linux/fcntl.h>
29#include <linux/mm.h> 28#include <linux/mm.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index c7d81436213d..993c439b4f71 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -25,7 +25,6 @@
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <net/sock.h> 26#include <net/sock.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include <asm/system.h>
29#include <linux/fcntl.h> 28#include <linux/fcntl.h>
30#include <linux/mm.h> 29#include <linux/mm.h>
31#include <linux/interrupt.h> 30#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 60b545e2822a..7d5f24b82cc8 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -24,7 +24,6 @@
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <net/sock.h> 25#include <net/sock.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/system.h>
28#include <linux/fcntl.h> 27#include <linux/fcntl.h>
29#include <linux/mm.h> 28#include <linux/mm.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 9bb776541203..96f4cab3a2f9 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -27,7 +27,6 @@
27#include <net/sock.h> 27#include <net/sock.h>
28#include <net/tcp_states.h> 28#include <net/tcp_states.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/system.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/mm.h> 31#include <linux/mm.h>
33#include <linux/interrupt.h> 32#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index cf0c47a26530..67de6b33f2c3 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -24,7 +24,6 @@
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <net/sock.h> 25#include <net/sock.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/system.h>
28#include <linux/fcntl.h> 27#include <linux/fcntl.h>
29#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 28#include <linux/termios.h> /* For TIOCINQ/OUTQ */
30#include <linux/mm.h> 29#include <linux/mm.h>
@@ -49,7 +48,7 @@
49 48
50int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, 49int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
51 unsigned short type, const void *daddr, 50 unsigned short type, const void *daddr,
52 const void *saddr, unsigned len) 51 const void *saddr, unsigned int len)
53{ 52{
54 unsigned char *buff; 53 unsigned char *buff;
55 54
@@ -220,7 +219,7 @@ put:
220 219
221int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, 220int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
222 unsigned short type, const void *daddr, 221 unsigned short type, const void *daddr,
223 const void *saddr, unsigned len) 222 const void *saddr, unsigned int len)
224{ 223{
225 return -AX25_HEADER_LEN; 224 return -AX25_HEADER_LEN;
226} 225}
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index 37507d806f65..be8a25e0db65 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -27,7 +27,6 @@
27#include <linux/netfilter.h> 27#include <linux/netfilter.h>
28#include <net/sock.h> 28#include <net/sock.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/system.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/mm.h> 31#include <linux/mm.h>
33#include <linux/interrupt.h> 32#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 87fddab22e0f..a65588040b9e 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -32,7 +32,6 @@
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <net/sock.h> 33#include <net/sock.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <asm/system.h>
36#include <linux/fcntl.h> 35#include <linux/fcntl.h>
37#include <linux/mm.h> 36#include <linux/mm.h>
38#include <linux/interrupt.h> 37#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_std_in.c b/net/ax25/ax25_std_in.c
index a8eef88d8652..3fbf8f7b2cf4 100644
--- a/net/ax25/ax25_std_in.c
+++ b/net/ax25/ax25_std_in.c
@@ -30,7 +30,6 @@
30#include <net/sock.h> 30#include <net/sock.h>
31#include <net/tcp_states.h> 31#include <net/tcp_states.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <asm/system.h>
34#include <linux/fcntl.h> 33#include <linux/fcntl.h>
35#include <linux/mm.h> 34#include <linux/mm.h>
36#include <linux/interrupt.h> 35#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_std_subr.c b/net/ax25/ax25_std_subr.c
index 277f81bb979a..8b66a41e538f 100644
--- a/net/ax25/ax25_std_subr.c
+++ b/net/ax25/ax25_std_subr.c
@@ -21,7 +21,6 @@
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <net/sock.h> 22#include <net/sock.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <asm/system.h>
25#include <linux/fcntl.h> 24#include <linux/fcntl.h>
26#include <linux/mm.h> 25#include <linux/mm.h>
27#include <linux/interrupt.h> 26#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index 96e4b9273250..004467c9e6e1 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -25,7 +25,6 @@
25#include <net/sock.h> 25#include <net/sock.h>
26#include <net/tcp_states.h> 26#include <net/tcp_states.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include <asm/system.h>
29#include <linux/fcntl.h> 28#include <linux/fcntl.h>
30#include <linux/mm.h> 29#include <linux/mm.h>
31#include <linux/interrupt.h> 30#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index c6715ee4ab8f..1997538a5d23 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -26,7 +26,6 @@
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/tcp_states.h> 27#include <net/tcp_states.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/system.h>
30#include <linux/fcntl.h> 29#include <linux/fcntl.h>
31#include <linux/mm.h> 30#include <linux/mm.h>
32#include <linux/interrupt.h> 31#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_timer.c b/net/ax25/ax25_timer.c
index db29ea71e80a..c3cffa79bafb 100644
--- a/net/ax25/ax25_timer.c
+++ b/net/ax25/ax25_timer.c
@@ -29,7 +29,6 @@
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <net/sock.h> 30#include <net/sock.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/system.h>
33#include <linux/fcntl.h> 32#include <linux/fcntl.h>
34#include <linux/mm.h> 33#include <linux/mm.h>
35#include <linux/interrupt.h> 34#include <linux/interrupt.h>
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 4c83137b5954..e3c579ba6325 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -26,7 +26,6 @@
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <net/sock.h> 27#include <net/sock.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/system.h>
30#include <linux/fcntl.h> 29#include <linux/fcntl.h>
31#include <linux/mm.h> 30#include <linux/mm.h>
32#include <linux/interrupt.h> 31#include <linux/interrupt.h>
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index ebe0ef3f1d83..d5744b752511 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -29,17 +29,6 @@ static int min_proto[1], max_proto[] = { AX25_PROTO_MAX };
29static int min_ds_timeout[1], max_ds_timeout[] = {65535000}; 29static int min_ds_timeout[1], max_ds_timeout[] = {65535000};
30#endif 30#endif
31 31
32static struct ctl_table_header *ax25_table_header;
33
34static ctl_table *ax25_table;
35static int ax25_table_size;
36
37static struct ctl_path ax25_path[] = {
38 { .procname = "net", },
39 { .procname = "ax25", },
40 { }
41};
42
43static const ctl_table ax25_param_table[] = { 32static const ctl_table ax25_param_table[] = {
44 { 33 {
45 .procname = "ip_default_mode", 34 .procname = "ip_default_mode",
@@ -159,52 +148,37 @@ static const ctl_table ax25_param_table[] = {
159 { } /* that's all, folks! */ 148 { } /* that's all, folks! */
160}; 149};
161 150
162void ax25_register_sysctl(void) 151int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
163{ 152{
164 ax25_dev *ax25_dev; 153 char path[sizeof("net/ax25/") + IFNAMSIZ];
165 int n, k; 154 int k;
166 155 struct ctl_table *table;
167 spin_lock_bh(&ax25_dev_lock); 156
168 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) 157 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
169 ax25_table_size += sizeof(ctl_table); 158 if (!table)
170 159 return -ENOMEM;
171 if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 160
172 spin_unlock_bh(&ax25_dev_lock); 161 for (k = 0; k < AX25_MAX_VALUES; k++)
173 return; 162 table[k].data = &ax25_dev->values[k];
174 } 163
175 164 snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name);
176 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { 165 ax25_dev->sysheader = register_net_sysctl(&init_net, path, table);
177 struct ctl_table *child = kmemdup(ax25_param_table, 166 if (!ax25_dev->sysheader) {
178 sizeof(ax25_param_table), 167 kfree(table);
179 GFP_ATOMIC); 168 return -ENOMEM;
180 if (!child) {
181 while (n--)
182 kfree(ax25_table[n].child);
183 kfree(ax25_table);
184 spin_unlock_bh(&ax25_dev_lock);
185 return;
186 }
187 ax25_table[n].child = ax25_dev->systable = child;
188 ax25_table[n].procname = ax25_dev->dev->name;
189 ax25_table[n].mode = 0555;
190
191
192 for (k = 0; k < AX25_MAX_VALUES; k++)
193 child[k].data = &ax25_dev->values[k];
194
195 n++;
196 } 169 }
197 spin_unlock_bh(&ax25_dev_lock); 170 return 0;
198
199 ax25_table_header = register_sysctl_paths(ax25_path, ax25_table);
200} 171}
201 172
202void ax25_unregister_sysctl(void) 173void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev)
203{ 174{
204 ctl_table *p; 175 struct ctl_table_header *header = ax25_dev->sysheader;
205 unregister_sysctl_table(ax25_table_header); 176 struct ctl_table *table;
206 177
207 for (p = ax25_table; p->procname; p++) 178 if (header) {
208 kfree(p->child); 179 ax25_dev->sysheader = NULL;
209 kfree(ax25_table); 180 table = header->ctl_table_arg;
181 unregister_net_sysctl_table(header);
182 kfree(table);
183 }
210} 184}
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 2b68d068eaf3..53f5244e28f8 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -7,19 +7,28 @@ config BATMAN_ADV
7 depends on NET 7 depends on NET
8 select CRC16 8 select CRC16
9 default n 9 default n
10 ---help--- 10 help
11 B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
12 a routing protocol for multi-hop ad-hoc mesh networks. The
13 networks may be wired or wireless. See
14 http://www.open-mesh.org/ for more information and user space
15 tools.
11 16
12 B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is 17config BATMAN_ADV_BLA
13 a routing protocol for multi-hop ad-hoc mesh networks. The 18 bool "Bridge Loop Avoidance"
14 networks may be wired or wireless. See 19 depends on BATMAN_ADV && INET
15 http://www.open-mesh.org/ for more information and user space 20 default y
16 tools. 21 help
22 This option enables BLA (Bridge Loop Avoidance), a mechanism
23 to avoid Ethernet frames looping when mesh nodes are connected
24 to both the same LAN and the same mesh. If you will never use
25 more than one mesh node in the same LAN, you can safely remove
26 this feature and save some space.
17 27
18config BATMAN_ADV_DEBUG 28config BATMAN_ADV_DEBUG
19 bool "B.A.T.M.A.N. debugging" 29 bool "B.A.T.M.A.N. debugging"
20 depends on BATMAN_ADV != n 30 depends on BATMAN_ADV
21 ---help--- 31 help
22
23 This is an option for use by developers; most people should 32 This is an option for use by developers; most people should
24 say N here. This enables compilation of support for 33 say N here. This enables compilation of support for
25 outputting debugging information to the kernel log. The 34 outputting debugging information to the kernel log. The
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index ce6861166499..6d5c1940667d 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
1# 1#
2# Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2# Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3# 3#
4# Marek Lindner, Simon Wunderlich 4# Marek Lindner, Simon Wunderlich
5# 5#
@@ -23,6 +23,7 @@ batman-adv-y += bat_debugfs.o
23batman-adv-y += bat_iv_ogm.o 23batman-adv-y += bat_iv_ogm.o
24batman-adv-y += bat_sysfs.o 24batman-adv-y += bat_sysfs.o
25batman-adv-y += bitarray.o 25batman-adv-y += bitarray.o
26batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
26batman-adv-y += gateway_client.o 27batman-adv-y += gateway_client.o
27batman-adv-y += gateway_common.o 28batman-adv-y += gateway_common.o
28batman-adv-y += hard-interface.o 29batman-adv-y += hard-interface.o
diff --git a/net/batman-adv/bat_ogm.h b/net/batman-adv/bat_algo.h
index 69329c107e28..9852a688ba43 100644
--- a/net/batman-adv/bat_ogm.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public 7 * modify it under the terms of version 2 of the GNU General Public
@@ -19,17 +19,9 @@
19 * 19 *
20 */ 20 */
21 21
22#ifndef _NET_BATMAN_ADV_OGM_H_ 22#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
23#define _NET_BATMAN_ADV_OGM_H_ 23#define _NET_BATMAN_ADV_BAT_ALGO_H_
24 24
25#include "main.h" 25int bat_iv_init(void);
26 26
27void bat_ogm_init(struct hard_iface *hard_iface); 27#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
28void bat_ogm_init_primary(struct hard_iface *hard_iface);
29void bat_ogm_update_mac(struct hard_iface *hard_iface);
30void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes);
31void bat_ogm_emit(struct forw_packet *forw_packet);
32void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
33 int packet_len, struct hard_iface *if_incoming);
34
35#endif /* _NET_BATMAN_ADV_OGM_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index d0af9bf69e46..3b588f86d770 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -32,6 +32,7 @@
32#include "soft-interface.h" 32#include "soft-interface.h"
33#include "vis.h" 33#include "vis.h"
34#include "icmp_socket.h" 34#include "icmp_socket.h"
35#include "bridge_loop_avoidance.h"
35 36
36static struct dentry *bat_debugfs; 37static struct dentry *bat_debugfs;
37 38
@@ -82,8 +83,8 @@ int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
82 83
83 va_start(args, fmt); 84 va_start(args, fmt);
84 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); 85 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
85 fdebug_log(bat_priv->debug_log, "[%10lu] %s", 86 fdebug_log(bat_priv->debug_log, "[%10u] %s",
86 (jiffies / HZ), tmp_log_buf); 87 jiffies_to_msecs(jiffies), tmp_log_buf);
87 va_end(args); 88 va_end(args);
88 89
89 return 0; 90 return 0;
@@ -221,6 +222,11 @@ static void debug_log_cleanup(struct bat_priv *bat_priv)
221} 222}
222#endif 223#endif
223 224
225static int bat_algorithms_open(struct inode *inode, struct file *file)
226{
227 return single_open(file, bat_algo_seq_print_text, NULL);
228}
229
224static int originators_open(struct inode *inode, struct file *file) 230static int originators_open(struct inode *inode, struct file *file)
225{ 231{
226 struct net_device *net_dev = (struct net_device *)inode->i_private; 232 struct net_device *net_dev = (struct net_device *)inode->i_private;
@@ -233,17 +239,19 @@ static int gateways_open(struct inode *inode, struct file *file)
233 return single_open(file, gw_client_seq_print_text, net_dev); 239 return single_open(file, gw_client_seq_print_text, net_dev);
234} 240}
235 241
236static int softif_neigh_open(struct inode *inode, struct file *file) 242static int transtable_global_open(struct inode *inode, struct file *file)
237{ 243{
238 struct net_device *net_dev = (struct net_device *)inode->i_private; 244 struct net_device *net_dev = (struct net_device *)inode->i_private;
239 return single_open(file, softif_neigh_seq_print_text, net_dev); 245 return single_open(file, tt_global_seq_print_text, net_dev);
240} 246}
241 247
242static int transtable_global_open(struct inode *inode, struct file *file) 248#ifdef CONFIG_BATMAN_ADV_BLA
249static int bla_claim_table_open(struct inode *inode, struct file *file)
243{ 250{
244 struct net_device *net_dev = (struct net_device *)inode->i_private; 251 struct net_device *net_dev = (struct net_device *)inode->i_private;
245 return single_open(file, tt_global_seq_print_text, net_dev); 252 return single_open(file, bla_claim_table_seq_print_text, net_dev);
246} 253}
254#endif
247 255
248static int transtable_local_open(struct inode *inode, struct file *file) 256static int transtable_local_open(struct inode *inode, struct file *file)
249{ 257{
@@ -274,18 +282,23 @@ struct bat_debuginfo bat_debuginfo_##_name = { \
274 } \ 282 } \
275}; 283};
276 284
285static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
277static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); 286static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
278static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); 287static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
279static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
280static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); 288static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
289#ifdef CONFIG_BATMAN_ADV_BLA
290static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
291#endif
281static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); 292static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
282static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); 293static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
283 294
284static struct bat_debuginfo *mesh_debuginfos[] = { 295static struct bat_debuginfo *mesh_debuginfos[] = {
285 &bat_debuginfo_originators, 296 &bat_debuginfo_originators,
286 &bat_debuginfo_gateways, 297 &bat_debuginfo_gateways,
287 &bat_debuginfo_softif_neigh,
288 &bat_debuginfo_transtable_global, 298 &bat_debuginfo_transtable_global,
299#ifdef CONFIG_BATMAN_ADV_BLA
300 &bat_debuginfo_bla_claim_table,
301#endif
289 &bat_debuginfo_transtable_local, 302 &bat_debuginfo_transtable_local,
290 &bat_debuginfo_vis_data, 303 &bat_debuginfo_vis_data,
291 NULL, 304 NULL,
@@ -293,9 +306,25 @@ static struct bat_debuginfo *mesh_debuginfos[] = {
293 306
294void debugfs_init(void) 307void debugfs_init(void)
295{ 308{
309 struct bat_debuginfo *bat_debug;
310 struct dentry *file;
311
296 bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL); 312 bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
297 if (bat_debugfs == ERR_PTR(-ENODEV)) 313 if (bat_debugfs == ERR_PTR(-ENODEV))
298 bat_debugfs = NULL; 314 bat_debugfs = NULL;
315
316 if (!bat_debugfs)
317 goto out;
318
319 bat_debug = &bat_debuginfo_routing_algos;
320 file = debugfs_create_file(bat_debug->attr.name,
321 S_IFREG | bat_debug->attr.mode,
322 bat_debugfs, NULL, &bat_debug->fops);
323 if (!file)
324 pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
325
326out:
327 return;
299} 328}
300 329
301void debugfs_destroy(void) 330void debugfs_destroy(void)
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/bat_debugfs.h
index bc9cda3f01e1..d605c6746428 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/bat_debugfs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 3512e251545b..dc53798ebb47 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include "main.h" 22#include "main.h"
23#include "bat_ogm.h"
24#include "translation-table.h" 23#include "translation-table.h"
25#include "ring_buffer.h" 24#include "ring_buffer.h"
26#include "originator.h" 25#include "originator.h"
@@ -29,34 +28,71 @@
29#include "gateway_client.h" 28#include "gateway_client.h"
30#include "hard-interface.h" 29#include "hard-interface.h"
31#include "send.h" 30#include "send.h"
31#include "bat_algo.h"
32 32
33void bat_ogm_init(struct hard_iface *hard_iface) 33static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
34 const uint8_t *neigh_addr,
35 struct orig_node *orig_node,
36 struct orig_node *orig_neigh,
37 uint32_t seqno)
38{
39 struct neigh_node *neigh_node;
40
41 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
42 if (!neigh_node)
43 goto out;
44
45 INIT_LIST_HEAD(&neigh_node->bonding_list);
46
47 neigh_node->orig_node = orig_neigh;
48 neigh_node->if_incoming = hard_iface;
49
50 spin_lock_bh(&orig_node->neigh_list_lock);
51 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
52 spin_unlock_bh(&orig_node->neigh_list_lock);
53
54out:
55 return neigh_node;
56}
57
58static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
34{ 59{
35 struct batman_ogm_packet *batman_ogm_packet; 60 struct batman_ogm_packet *batman_ogm_packet;
61 uint32_t random_seqno;
62 int res = -1;
63
64 /* randomize initial seqno to avoid collision */
65 get_random_bytes(&random_seqno, sizeof(random_seqno));
66 atomic_set(&hard_iface->seqno, random_seqno);
36 67
37 hard_iface->packet_len = BATMAN_OGM_LEN; 68 hard_iface->packet_len = BATMAN_OGM_HLEN;
38 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); 69 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
39 70
71 if (!hard_iface->packet_buff)
72 goto out;
73
40 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 74 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
41 batman_ogm_packet->packet_type = BAT_OGM; 75 batman_ogm_packet->header.packet_type = BAT_IV_OGM;
42 batman_ogm_packet->version = COMPAT_VERSION; 76 batman_ogm_packet->header.version = COMPAT_VERSION;
77 batman_ogm_packet->header.ttl = 2;
43 batman_ogm_packet->flags = NO_FLAGS; 78 batman_ogm_packet->flags = NO_FLAGS;
44 batman_ogm_packet->ttl = 2;
45 batman_ogm_packet->tq = TQ_MAX_VALUE; 79 batman_ogm_packet->tq = TQ_MAX_VALUE;
46 batman_ogm_packet->tt_num_changes = 0; 80 batman_ogm_packet->tt_num_changes = 0;
47 batman_ogm_packet->ttvn = 0; 81 batman_ogm_packet->ttvn = 0;
82
83 res = 0;
84
85out:
86 return res;
48} 87}
49 88
50void bat_ogm_init_primary(struct hard_iface *hard_iface) 89static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
51{ 90{
52 struct batman_ogm_packet *batman_ogm_packet; 91 kfree(hard_iface->packet_buff);
53 92 hard_iface->packet_buff = NULL;
54 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
55 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
56 batman_ogm_packet->ttl = TTL;
57} 93}
58 94
59void bat_ogm_update_mac(struct hard_iface *hard_iface) 95static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface)
60{ 96{
61 struct batman_ogm_packet *batman_ogm_packet; 97 struct batman_ogm_packet *batman_ogm_packet;
62 98
@@ -67,8 +103,17 @@ void bat_ogm_update_mac(struct hard_iface *hard_iface)
67 hard_iface->net_dev->dev_addr, ETH_ALEN); 103 hard_iface->net_dev->dev_addr, ETH_ALEN);
68} 104}
69 105
106static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
107{
108 struct batman_ogm_packet *batman_ogm_packet;
109
110 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
111 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
112 batman_ogm_packet->header.ttl = TTL;
113}
114
70/* when do we schedule our own ogm to be sent */ 115/* when do we schedule our own ogm to be sent */
71static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv) 116static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
72{ 117{
73 return jiffies + msecs_to_jiffies( 118 return jiffies + msecs_to_jiffies(
74 atomic_read(&bat_priv->orig_interval) - 119 atomic_read(&bat_priv->orig_interval) -
@@ -76,7 +121,7 @@ static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
76} 121}
77 122
78/* when do we schedule a ogm packet to be sent */ 123/* when do we schedule a ogm packet to be sent */
79static unsigned long bat_ogm_fwd_send_time(void) 124static unsigned long bat_iv_ogm_fwd_send_time(void)
80{ 125{
81 return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); 126 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
82} 127}
@@ -89,18 +134,18 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
89} 134}
90 135
91/* is there another aggregated packet here? */ 136/* is there another aggregated packet here? */
92static int bat_ogm_aggr_packet(int buff_pos, int packet_len, 137static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
93 int tt_num_changes) 138 int tt_num_changes)
94{ 139{
95 int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes); 140 int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
96 141
97 return (next_buff_pos <= packet_len) && 142 return (next_buff_pos <= packet_len) &&
98 (next_buff_pos <= MAX_AGGREGATION_BYTES); 143 (next_buff_pos <= MAX_AGGREGATION_BYTES);
99} 144}
100 145
101/* send a batman ogm to a given interface */ 146/* send a batman ogm to a given interface */
102static void bat_ogm_send_to_if(struct forw_packet *forw_packet, 147static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
103 struct hard_iface *hard_iface) 148 struct hard_iface *hard_iface)
104{ 149{
105 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 150 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
106 char *fwd_str; 151 char *fwd_str;
@@ -117,8 +162,8 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
117 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; 162 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
118 163
119 /* adjust all flags and log packets */ 164 /* adjust all flags and log packets */
120 while (bat_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 165 while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
121 batman_ogm_packet->tt_num_changes)) { 166 batman_ogm_packet->tt_num_changes)) {
122 167
123 /* we might have aggregated direct link packets with an 168 /* we might have aggregated direct link packets with an
124 * ordinary base packet */ 169 * ordinary base packet */
@@ -132,18 +177,17 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
132 "Sending own" : 177 "Sending own" :
133 "Forwarding")); 178 "Forwarding"));
134 bat_dbg(DBG_BATMAN, bat_priv, 179 bat_dbg(DBG_BATMAN, bat_priv,
135 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," 180 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
136 " IDF %s, ttvn %d) on interface %s [%pM]\n",
137 fwd_str, (packet_num > 0 ? "aggregated " : ""), 181 fwd_str, (packet_num > 0 ? "aggregated " : ""),
138 batman_ogm_packet->orig, 182 batman_ogm_packet->orig,
139 ntohl(batman_ogm_packet->seqno), 183 ntohl(batman_ogm_packet->seqno),
140 batman_ogm_packet->tq, batman_ogm_packet->ttl, 184 batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
141 (batman_ogm_packet->flags & DIRECTLINK ? 185 (batman_ogm_packet->flags & DIRECTLINK ?
142 "on" : "off"), 186 "on" : "off"),
143 batman_ogm_packet->ttvn, hard_iface->net_dev->name, 187 batman_ogm_packet->ttvn, hard_iface->net_dev->name,
144 hard_iface->net_dev->dev_addr); 188 hard_iface->net_dev->dev_addr);
145 189
146 buff_pos += BATMAN_OGM_LEN + 190 buff_pos += BATMAN_OGM_HLEN +
147 tt_len(batman_ogm_packet->tt_num_changes); 191 tt_len(batman_ogm_packet->tt_num_changes);
148 packet_num++; 192 packet_num++;
149 batman_ogm_packet = (struct batman_ogm_packet *) 193 batman_ogm_packet = (struct batman_ogm_packet *)
@@ -157,7 +201,7 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
157} 201}
158 202
159/* send a batman ogm packet */ 203/* send a batman ogm packet */
160void bat_ogm_emit(struct forw_packet *forw_packet) 204static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
161{ 205{
162 struct hard_iface *hard_iface; 206 struct hard_iface *hard_iface;
163 struct net_device *soft_iface; 207 struct net_device *soft_iface;
@@ -171,8 +215,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
171 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); 215 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
172 216
173 if (!forw_packet->if_incoming) { 217 if (!forw_packet->if_incoming) {
174 pr_err("Error - can't forward packet: incoming iface not " 218 pr_err("Error - can't forward packet: incoming iface not specified\n");
175 "specified\n");
176 goto out; 219 goto out;
177 } 220 }
178 221
@@ -188,17 +231,16 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
188 231
189 /* multihomed peer assumed */ 232 /* multihomed peer assumed */
190 /* non-primary OGMs are only broadcasted on their interface */ 233 /* non-primary OGMs are only broadcasted on their interface */
191 if ((directlink && (batman_ogm_packet->ttl == 1)) || 234 if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
192 (forw_packet->own && (forw_packet->if_incoming != primary_if))) { 235 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
193 236
194 /* FIXME: what about aggregated packets ? */ 237 /* FIXME: what about aggregated packets ? */
195 bat_dbg(DBG_BATMAN, bat_priv, 238 bat_dbg(DBG_BATMAN, bat_priv,
196 "%s packet (originator %pM, seqno %d, TTL %d) " 239 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
197 "on interface %s [%pM]\n",
198 (forw_packet->own ? "Sending own" : "Forwarding"), 240 (forw_packet->own ? "Sending own" : "Forwarding"),
199 batman_ogm_packet->orig, 241 batman_ogm_packet->orig,
200 ntohl(batman_ogm_packet->seqno), 242 ntohl(batman_ogm_packet->seqno),
201 batman_ogm_packet->ttl, 243 batman_ogm_packet->header.ttl,
202 forw_packet->if_incoming->net_dev->name, 244 forw_packet->if_incoming->net_dev->name,
203 forw_packet->if_incoming->net_dev->dev_addr); 245 forw_packet->if_incoming->net_dev->dev_addr);
204 246
@@ -216,7 +258,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
216 if (hard_iface->soft_iface != soft_iface) 258 if (hard_iface->soft_iface != soft_iface)
217 continue; 259 continue;
218 260
219 bat_ogm_send_to_if(forw_packet, hard_iface); 261 bat_iv_ogm_send_to_if(forw_packet, hard_iface);
220 } 262 }
221 rcu_read_unlock(); 263 rcu_read_unlock();
222 264
@@ -226,13 +268,13 @@ out:
226} 268}
227 269
228/* return true if new_packet can be aggregated with forw_packet */ 270/* return true if new_packet can be aggregated with forw_packet */
229static bool bat_ogm_can_aggregate(const struct batman_ogm_packet 271static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
230 *new_batman_ogm_packet, 272 *new_batman_ogm_packet,
231 struct bat_priv *bat_priv, 273 struct bat_priv *bat_priv,
232 int packet_len, unsigned long send_time, 274 int packet_len, unsigned long send_time,
233 bool directlink, 275 bool directlink,
234 const struct hard_iface *if_incoming, 276 const struct hard_iface *if_incoming,
235 const struct forw_packet *forw_packet) 277 const struct forw_packet *forw_packet)
236{ 278{
237 struct batman_ogm_packet *batman_ogm_packet; 279 struct batman_ogm_packet *batman_ogm_packet;
238 int aggregated_bytes = forw_packet->packet_len + packet_len; 280 int aggregated_bytes = forw_packet->packet_len + packet_len;
@@ -272,7 +314,7 @@ static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
272 * are flooded through the net */ 314 * are flooded through the net */
273 if ((!directlink) && 315 if ((!directlink) &&
274 (!(batman_ogm_packet->flags & DIRECTLINK)) && 316 (!(batman_ogm_packet->flags & DIRECTLINK)) &&
275 (batman_ogm_packet->ttl != 1) && 317 (batman_ogm_packet->header.ttl != 1) &&
276 318
277 /* own packets originating non-primary 319 /* own packets originating non-primary
278 * interfaces leave only that interface */ 320 * interfaces leave only that interface */
@@ -285,7 +327,7 @@ static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
285 /* if the incoming packet is sent via this one 327 /* if the incoming packet is sent via this one
286 * interface only - we still can aggregate */ 328 * interface only - we still can aggregate */
287 if ((directlink) && 329 if ((directlink) &&
288 (new_batman_ogm_packet->ttl == 1) && 330 (new_batman_ogm_packet->header.ttl == 1) &&
289 (forw_packet->if_incoming == if_incoming) && 331 (forw_packet->if_incoming == if_incoming) &&
290 332
291 /* packets from direct neighbors or 333 /* packets from direct neighbors or
@@ -306,11 +348,11 @@ out:
306} 348}
307 349
308/* create a new aggregated packet and add this packet to it */ 350/* create a new aggregated packet and add this packet to it */
309static void bat_ogm_aggregate_new(const unsigned char *packet_buff, 351static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
310 int packet_len, unsigned long send_time, 352 int packet_len, unsigned long send_time,
311 bool direct_link, 353 bool direct_link,
312 struct hard_iface *if_incoming, 354 struct hard_iface *if_incoming,
313 int own_packet) 355 int own_packet)
314{ 356{
315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 357 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct forw_packet *forw_packet_aggr; 358 struct forw_packet *forw_packet_aggr;
@@ -338,10 +380,9 @@ static void bat_ogm_aggregate_new(const unsigned char *packet_buff,
338 if ((atomic_read(&bat_priv->aggregated_ogms)) && 380 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
339 (packet_len < MAX_AGGREGATION_BYTES)) 381 (packet_len < MAX_AGGREGATION_BYTES))
340 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + 382 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
341 sizeof(struct ethhdr)); 383 ETH_HLEN);
342 else 384 else
343 forw_packet_aggr->skb = dev_alloc_skb(packet_len + 385 forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
344 sizeof(struct ethhdr));
345 386
346 if (!forw_packet_aggr->skb) { 387 if (!forw_packet_aggr->skb) {
347 if (!own_packet) 388 if (!own_packet)
@@ -349,7 +390,7 @@ static void bat_ogm_aggregate_new(const unsigned char *packet_buff,
349 kfree(forw_packet_aggr); 390 kfree(forw_packet_aggr);
350 goto out; 391 goto out;
351 } 392 }
352 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); 393 skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
353 394
354 INIT_HLIST_NODE(&forw_packet_aggr->list); 395 INIT_HLIST_NODE(&forw_packet_aggr->list);
355 396
@@ -385,9 +426,9 @@ out:
385} 426}
386 427
387/* aggregate a new packet into the existing ogm packet */ 428/* aggregate a new packet into the existing ogm packet */
388static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr, 429static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
389 const unsigned char *packet_buff, 430 const unsigned char *packet_buff,
390 int packet_len, bool direct_link) 431 int packet_len, bool direct_link)
391{ 432{
392 unsigned char *skb_buff; 433 unsigned char *skb_buff;
393 434
@@ -402,10 +443,10 @@ static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
402 (1 << forw_packet_aggr->num_packets); 443 (1 << forw_packet_aggr->num_packets);
403} 444}
404 445
405static void bat_ogm_queue_add(struct bat_priv *bat_priv, 446static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
406 unsigned char *packet_buff, 447 unsigned char *packet_buff,
407 int packet_len, struct hard_iface *if_incoming, 448 int packet_len, struct hard_iface *if_incoming,
408 int own_packet, unsigned long send_time) 449 int own_packet, unsigned long send_time)
409{ 450{
410 /** 451 /**
411 * _aggr -> pointer to the packet we want to aggregate with 452 * _aggr -> pointer to the packet we want to aggregate with
@@ -425,11 +466,11 @@ static void bat_ogm_queue_add(struct bat_priv *bat_priv,
425 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { 466 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
426 hlist_for_each_entry(forw_packet_pos, tmp_node, 467 hlist_for_each_entry(forw_packet_pos, tmp_node,
427 &bat_priv->forw_bat_list, list) { 468 &bat_priv->forw_bat_list, list) {
428 if (bat_ogm_can_aggregate(batman_ogm_packet, 469 if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
429 bat_priv, packet_len, 470 bat_priv, packet_len,
430 send_time, direct_link, 471 send_time, direct_link,
431 if_incoming, 472 if_incoming,
432 forw_packet_pos)) { 473 forw_packet_pos)) {
433 forw_packet_aggr = forw_packet_pos; 474 forw_packet_aggr = forw_packet_pos;
434 break; 475 break;
435 } 476 }
@@ -451,83 +492,73 @@ static void bat_ogm_queue_add(struct bat_priv *bat_priv,
451 (atomic_read(&bat_priv->aggregated_ogms))) 492 (atomic_read(&bat_priv->aggregated_ogms)))
452 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS); 493 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
453 494
454 bat_ogm_aggregate_new(packet_buff, packet_len, 495 bat_iv_ogm_aggregate_new(packet_buff, packet_len,
455 send_time, direct_link, 496 send_time, direct_link,
456 if_incoming, own_packet); 497 if_incoming, own_packet);
457 } else { 498 } else {
458 bat_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len, 499 bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
459 direct_link); 500 packet_len, direct_link);
460 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 501 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
461 } 502 }
462} 503}
463 504
464static void bat_ogm_forward(struct orig_node *orig_node, 505static void bat_iv_ogm_forward(struct orig_node *orig_node,
465 const struct ethhdr *ethhdr, 506 const struct ethhdr *ethhdr,
466 struct batman_ogm_packet *batman_ogm_packet, 507 struct batman_ogm_packet *batman_ogm_packet,
467 int directlink, struct hard_iface *if_incoming) 508 bool is_single_hop_neigh,
509 bool is_from_best_next_hop,
510 struct hard_iface *if_incoming)
468{ 511{
469 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 512 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
470 struct neigh_node *router;
471 uint8_t in_tq, in_ttl, tq_avg = 0;
472 uint8_t tt_num_changes; 513 uint8_t tt_num_changes;
473 514
474 if (batman_ogm_packet->ttl <= 1) { 515 if (batman_ogm_packet->header.ttl <= 1) {
475 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); 516 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
476 return; 517 return;
477 } 518 }
478 519
479 router = orig_node_get_router(orig_node); 520 if (!is_from_best_next_hop) {
521 /* Mark the forwarded packet when it is not coming from our
522 * best next hop. We still need to forward the packet for our
523 * neighbor link quality detection to work in case the packet
524 * originated from a single hop neighbor. Otherwise we can
525 * simply drop the ogm.
526 */
527 if (is_single_hop_neigh)
528 batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP;
529 else
530 return;
531 }
480 532
481 in_tq = batman_ogm_packet->tq;
482 in_ttl = batman_ogm_packet->ttl;
483 tt_num_changes = batman_ogm_packet->tt_num_changes; 533 tt_num_changes = batman_ogm_packet->tt_num_changes;
484 534
485 batman_ogm_packet->ttl--; 535 batman_ogm_packet->header.ttl--;
486 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); 536 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
487 537
488 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
489 * of our best tq value */
490 if (router && router->tq_avg != 0) {
491
492 /* rebroadcast ogm of best ranking neighbor as is */
493 if (!compare_eth(router->addr, ethhdr->h_source)) {
494 batman_ogm_packet->tq = router->tq_avg;
495
496 if (router->last_ttl)
497 batman_ogm_packet->ttl = router->last_ttl - 1;
498 }
499
500 tq_avg = router->tq_avg;
501 }
502
503 if (router)
504 neigh_node_free_ref(router);
505
506 /* apply hop penalty */ 538 /* apply hop penalty */
507 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); 539 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
508 540
509 bat_dbg(DBG_BATMAN, bat_priv, 541 bat_dbg(DBG_BATMAN, bat_priv,
510 "Forwarding packet: tq_orig: %i, tq_avg: %i, " 542 "Forwarding packet: tq: %i, ttl: %i\n",
511 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n", 543 batman_ogm_packet->tq, batman_ogm_packet->header.ttl);
512 in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
513 batman_ogm_packet->ttl);
514 544
515 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno); 545 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
516 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc); 546 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
517 547
518 /* switch of primaries first hop flag when forwarding */ 548 /* switch of primaries first hop flag when forwarding */
519 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP; 549 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
520 if (directlink) 550 if (is_single_hop_neigh)
521 batman_ogm_packet->flags |= DIRECTLINK; 551 batman_ogm_packet->flags |= DIRECTLINK;
522 else 552 else
523 batman_ogm_packet->flags &= ~DIRECTLINK; 553 batman_ogm_packet->flags &= ~DIRECTLINK;
524 554
525 bat_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, 555 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
526 BATMAN_OGM_LEN + tt_len(tt_num_changes), 556 BATMAN_OGM_HLEN + tt_len(tt_num_changes),
527 if_incoming, 0, bat_ogm_fwd_send_time()); 557 if_incoming, 0, bat_iv_ogm_fwd_send_time());
528} 558}
529 559
530void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes) 560static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
561 int tt_num_changes)
531{ 562{
532 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 563 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
533 struct batman_ogm_packet *batman_ogm_packet; 564 struct batman_ogm_packet *batman_ogm_packet;
@@ -564,21 +595,22 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
564 atomic_inc(&hard_iface->seqno); 595 atomic_inc(&hard_iface->seqno);
565 596
566 slide_own_bcast_window(hard_iface); 597 slide_own_bcast_window(hard_iface);
567 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff, 598 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
568 hard_iface->packet_len, hard_iface, 1, 599 hard_iface->packet_len, hard_iface, 1,
569 bat_ogm_emit_send_time(bat_priv)); 600 bat_iv_ogm_emit_send_time(bat_priv));
570 601
571 if (primary_if) 602 if (primary_if)
572 hardif_free_ref(primary_if); 603 hardif_free_ref(primary_if);
573} 604}
574 605
575static void bat_ogm_orig_update(struct bat_priv *bat_priv, 606static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
576 struct orig_node *orig_node, 607 struct orig_node *orig_node,
577 const struct ethhdr *ethhdr, 608 const struct ethhdr *ethhdr,
578 const struct batman_ogm_packet 609 const struct batman_ogm_packet
579 *batman_ogm_packet, 610 *batman_ogm_packet,
580 struct hard_iface *if_incoming, 611 struct hard_iface *if_incoming,
581 const unsigned char *tt_buff, int is_duplicate) 612 const unsigned char *tt_buff,
613 int is_duplicate)
582{ 614{
583 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 615 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
584 struct neigh_node *router = NULL; 616 struct neigh_node *router = NULL;
@@ -586,8 +618,8 @@ static void bat_ogm_orig_update(struct bat_priv *bat_priv,
586 struct hlist_node *node; 618 struct hlist_node *node;
587 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 619 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
588 620
589 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " 621 bat_dbg(DBG_BATMAN, bat_priv,
590 "Searching and updating originator entry of received packet\n"); 622 "update_originator(): Searching and updating originator entry of received packet\n");
591 623
592 rcu_read_lock(); 624 rcu_read_lock();
593 hlist_for_each_entry_rcu(tmp_neigh_node, node, 625 hlist_for_each_entry_rcu(tmp_neigh_node, node,
@@ -604,12 +636,12 @@ static void bat_ogm_orig_update(struct bat_priv *bat_priv,
604 if (is_duplicate) 636 if (is_duplicate)
605 continue; 637 continue;
606 638
607 spin_lock_bh(&tmp_neigh_node->tq_lock); 639 spin_lock_bh(&tmp_neigh_node->lq_update_lock);
608 ring_buffer_set(tmp_neigh_node->tq_recv, 640 ring_buffer_set(tmp_neigh_node->tq_recv,
609 &tmp_neigh_node->tq_index, 0); 641 &tmp_neigh_node->tq_index, 0);
610 tmp_neigh_node->tq_avg = 642 tmp_neigh_node->tq_avg =
611 ring_buffer_avg(tmp_neigh_node->tq_recv); 643 ring_buffer_avg(tmp_neigh_node->tq_recv);
612 spin_unlock_bh(&tmp_neigh_node->tq_lock); 644 spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
613 } 645 }
614 646
615 if (!neigh_node) { 647 if (!neigh_node) {
@@ -619,8 +651,9 @@ static void bat_ogm_orig_update(struct bat_priv *bat_priv,
619 if (!orig_tmp) 651 if (!orig_tmp)
620 goto unlock; 652 goto unlock;
621 653
622 neigh_node = create_neighbor(orig_node, orig_tmp, 654 neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source,
623 ethhdr->h_source, if_incoming); 655 orig_node, orig_tmp,
656 batman_ogm_packet->seqno);
624 657
625 orig_node_free_ref(orig_tmp); 658 orig_node_free_ref(orig_tmp);
626 if (!neigh_node) 659 if (!neigh_node)
@@ -632,18 +665,18 @@ static void bat_ogm_orig_update(struct bat_priv *bat_priv,
632 rcu_read_unlock(); 665 rcu_read_unlock();
633 666
634 orig_node->flags = batman_ogm_packet->flags; 667 orig_node->flags = batman_ogm_packet->flags;
635 neigh_node->last_valid = jiffies; 668 neigh_node->last_seen = jiffies;
636 669
637 spin_lock_bh(&neigh_node->tq_lock); 670 spin_lock_bh(&neigh_node->lq_update_lock);
638 ring_buffer_set(neigh_node->tq_recv, 671 ring_buffer_set(neigh_node->tq_recv,
639 &neigh_node->tq_index, 672 &neigh_node->tq_index,
640 batman_ogm_packet->tq); 673 batman_ogm_packet->tq);
641 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); 674 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
642 spin_unlock_bh(&neigh_node->tq_lock); 675 spin_unlock_bh(&neigh_node->lq_update_lock);
643 676
644 if (!is_duplicate) { 677 if (!is_duplicate) {
645 orig_node->last_ttl = batman_ogm_packet->ttl; 678 orig_node->last_ttl = batman_ogm_packet->header.ttl;
646 neigh_node->last_ttl = batman_ogm_packet->ttl; 679 neigh_node->last_ttl = batman_ogm_packet->header.ttl;
647 } 680 }
648 681
649 bonding_candidate_add(orig_node, neigh_node); 682 bonding_candidate_add(orig_node, neigh_node);
@@ -683,7 +716,7 @@ update_tt:
683 /* I have to check for transtable changes only if the OGM has been 716 /* I have to check for transtable changes only if the OGM has been
684 * sent through a primary interface */ 717 * sent through a primary interface */
685 if (((batman_ogm_packet->orig != ethhdr->h_source) && 718 if (((batman_ogm_packet->orig != ethhdr->h_source) &&
686 (batman_ogm_packet->ttl > 2)) || 719 (batman_ogm_packet->header.ttl > 2)) ||
687 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) 720 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
688 tt_update_orig(bat_priv, orig_node, tt_buff, 721 tt_update_orig(bat_priv, orig_node, tt_buff,
689 batman_ogm_packet->tt_num_changes, 722 batman_ogm_packet->tt_num_changes,
@@ -713,10 +746,10 @@ out:
713 neigh_node_free_ref(router); 746 neigh_node_free_ref(router);
714} 747}
715 748
716static int bat_ogm_calc_tq(struct orig_node *orig_node, 749static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
717 struct orig_node *orig_neigh_node, 750 struct orig_node *orig_neigh_node,
718 struct batman_ogm_packet *batman_ogm_packet, 751 struct batman_ogm_packet *batman_ogm_packet,
719 struct hard_iface *if_incoming) 752 struct hard_iface *if_incoming)
720{ 753{
721 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 754 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
722 struct neigh_node *neigh_node = NULL, *tmp_neigh_node; 755 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
@@ -745,19 +778,20 @@ static int bat_ogm_calc_tq(struct orig_node *orig_node,
745 rcu_read_unlock(); 778 rcu_read_unlock();
746 779
747 if (!neigh_node) 780 if (!neigh_node)
748 neigh_node = create_neighbor(orig_neigh_node, 781 neigh_node = bat_iv_ogm_neigh_new(if_incoming,
749 orig_neigh_node, 782 orig_neigh_node->orig,
750 orig_neigh_node->orig, 783 orig_neigh_node,
751 if_incoming); 784 orig_neigh_node,
785 batman_ogm_packet->seqno);
752 786
753 if (!neigh_node) 787 if (!neigh_node)
754 goto out; 788 goto out;
755 789
756 /* if orig_node is direct neighbor update neigh_node last_valid */ 790 /* if orig_node is direct neighbor update neigh_node last_seen */
757 if (orig_node == orig_neigh_node) 791 if (orig_node == orig_neigh_node)
758 neigh_node->last_valid = jiffies; 792 neigh_node->last_seen = jiffies;
759 793
760 orig_node->last_valid = jiffies; 794 orig_node->last_seen = jiffies;
761 795
762 /* find packet count of corresponding one hop neighbor */ 796 /* find packet count of corresponding one hop neighbor */
763 spin_lock_bh(&orig_node->ogm_cnt_lock); 797 spin_lock_bh(&orig_node->ogm_cnt_lock);
@@ -780,8 +814,7 @@ static int bat_ogm_calc_tq(struct orig_node *orig_node,
780 * information */ 814 * information */
781 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; 815 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
782 816
783 /* 817 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
784 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
785 * affect the nearly-symmetric links only a little, but 818 * affect the nearly-symmetric links only a little, but
786 * punishes asymmetric links more. This will give a value 819 * punishes asymmetric links more. This will give a value
787 * between 0 and TQ_MAX_VALUE 820 * between 0 and TQ_MAX_VALUE
@@ -799,10 +832,7 @@ static int bat_ogm_calc_tq(struct orig_node *orig_node,
799 (TQ_MAX_VALUE * TQ_MAX_VALUE)); 832 (TQ_MAX_VALUE * TQ_MAX_VALUE));
800 833
801 bat_dbg(DBG_BATMAN, bat_priv, 834 bat_dbg(DBG_BATMAN, bat_priv,
802 "bidirectional: " 835 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
803 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
804 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
805 "total tq: %3i\n",
806 orig_node->orig, orig_neigh_node->orig, total_count, 836 orig_node->orig, orig_neigh_node->orig, total_count,
807 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq); 837 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
808 838
@@ -825,10 +855,10 @@ out:
825 * -1 the packet is old and has been received while the seqno window 855 * -1 the packet is old and has been received while the seqno window
826 * was protected. Caller should drop it. 856 * was protected. Caller should drop it.
827 */ 857 */
828static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr, 858static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
829 const struct batman_ogm_packet 859 const struct batman_ogm_packet
830 *batman_ogm_packet, 860 *batman_ogm_packet,
831 const struct hard_iface *if_incoming) 861 const struct hard_iface *if_incoming)
832{ 862{
833 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 863 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
834 struct orig_node *orig_node; 864 struct orig_node *orig_node;
@@ -847,7 +877,8 @@ static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
847 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; 877 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
848 878
849 /* signalize caller that the packet is to be dropped. */ 879 /* signalize caller that the packet is to be dropped. */
850 if (window_protected(bat_priv, seq_diff, 880 if (!hlist_empty(&orig_node->neigh_list) &&
881 window_protected(bat_priv, seq_diff,
851 &orig_node->batman_seqno_reset)) 882 &orig_node->batman_seqno_reset))
852 goto out; 883 goto out;
853 884
@@ -855,9 +886,9 @@ static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
855 hlist_for_each_entry_rcu(tmp_neigh_node, node, 886 hlist_for_each_entry_rcu(tmp_neigh_node, node,
856 &orig_node->neigh_list, list) { 887 &orig_node->neigh_list, list) {
857 888
858 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, 889 is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
859 orig_node->last_real_seqno, 890 orig_node->last_real_seqno,
860 batman_ogm_packet->seqno); 891 batman_ogm_packet->seqno);
861 892
862 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && 893 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
863 (tmp_neigh_node->if_incoming == if_incoming)) 894 (tmp_neigh_node->if_incoming == if_incoming))
@@ -871,13 +902,14 @@ static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
871 seq_diff, set_mark); 902 seq_diff, set_mark);
872 903
873 tmp_neigh_node->real_packet_count = 904 tmp_neigh_node->real_packet_count =
874 bit_packet_count(tmp_neigh_node->real_bits); 905 bitmap_weight(tmp_neigh_node->real_bits,
906 TQ_LOCAL_WINDOW_SIZE);
875 } 907 }
876 rcu_read_unlock(); 908 rcu_read_unlock();
877 909
878 if (need_update) { 910 if (need_update) {
879 bat_dbg(DBG_BATMAN, bat_priv, 911 bat_dbg(DBG_BATMAN, bat_priv,
880 "updating last_seqno: old %d, new %d\n", 912 "updating last_seqno: old %u, new %u\n",
881 orig_node->last_real_seqno, batman_ogm_packet->seqno); 913 orig_node->last_real_seqno, batman_ogm_packet->seqno);
882 orig_node->last_real_seqno = batman_ogm_packet->seqno; 914 orig_node->last_real_seqno = batman_ogm_packet->seqno;
883 } 915 }
@@ -890,10 +922,10 @@ out:
890 return ret; 922 return ret;
891} 923}
892 924
893static void bat_ogm_process(const struct ethhdr *ethhdr, 925static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
894 struct batman_ogm_packet *batman_ogm_packet, 926 struct batman_ogm_packet *batman_ogm_packet,
895 const unsigned char *tt_buff, 927 const unsigned char *tt_buff,
896 struct hard_iface *if_incoming) 928 struct hard_iface *if_incoming)
897{ 929{
898 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 930 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
899 struct hard_iface *hard_iface; 931 struct hard_iface *hard_iface;
@@ -902,7 +934,9 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
902 struct neigh_node *orig_neigh_router = NULL; 934 struct neigh_node *orig_neigh_router = NULL;
903 int has_directlink_flag; 935 int has_directlink_flag;
904 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 936 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
905 int is_broadcast = 0, is_bidirectional, is_single_hop_neigh; 937 int is_broadcast = 0, is_bidirectional;
938 bool is_single_hop_neigh = false;
939 bool is_from_best_next_hop = false;
906 int is_duplicate; 940 int is_duplicate;
907 uint32_t if_incoming_seqno; 941 uint32_t if_incoming_seqno;
908 942
@@ -918,7 +952,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
918 * packet in an aggregation. Here we expect that the padding 952 * packet in an aggregation. Here we expect that the padding
919 * is always zero (or not 0x01) 953 * is always zero (or not 0x01)
920 */ 954 */
921 if (batman_ogm_packet->packet_type != BAT_OGM) 955 if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
922 return; 956 return;
923 957
924 /* could be changed by schedule_own_packet() */ 958 /* could be changed by schedule_own_packet() */
@@ -926,20 +960,18 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
926 960
927 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); 961 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
928 962
929 is_single_hop_neigh = (compare_eth(ethhdr->h_source, 963 if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
930 batman_ogm_packet->orig) ? 1 : 0); 964 is_single_hop_neigh = true;
931 965
932 bat_dbg(DBG_BATMAN, bat_priv, 966 bat_dbg(DBG_BATMAN, bat_priv,
933 "Received BATMAN packet via NB: %pM, IF: %s [%pM] " 967 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
934 "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
935 "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
936 ethhdr->h_source, if_incoming->net_dev->name, 968 ethhdr->h_source, if_incoming->net_dev->name,
937 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, 969 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
938 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, 970 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
939 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc, 971 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
940 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq, 972 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
941 batman_ogm_packet->ttl, batman_ogm_packet->version, 973 batman_ogm_packet->header.ttl,
942 has_directlink_flag); 974 batman_ogm_packet->header.version, has_directlink_flag);
943 975
944 rcu_read_lock(); 976 rcu_read_lock();
945 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 977 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
@@ -966,25 +998,24 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
966 } 998 }
967 rcu_read_unlock(); 999 rcu_read_unlock();
968 1000
969 if (batman_ogm_packet->version != COMPAT_VERSION) { 1001 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
970 bat_dbg(DBG_BATMAN, bat_priv, 1002 bat_dbg(DBG_BATMAN, bat_priv,
971 "Drop packet: incompatible batman version (%i)\n", 1003 "Drop packet: incompatible batman version (%i)\n",
972 batman_ogm_packet->version); 1004 batman_ogm_packet->header.version);
973 return; 1005 return;
974 } 1006 }
975 1007
976 if (is_my_addr) { 1008 if (is_my_addr) {
977 bat_dbg(DBG_BATMAN, bat_priv, 1009 bat_dbg(DBG_BATMAN, bat_priv,
978 "Drop packet: received my own broadcast (sender: %pM" 1010 "Drop packet: received my own broadcast (sender: %pM)\n",
979 ")\n",
980 ethhdr->h_source); 1011 ethhdr->h_source);
981 return; 1012 return;
982 } 1013 }
983 1014
984 if (is_broadcast) { 1015 if (is_broadcast) {
985 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " 1016 bat_dbg(DBG_BATMAN, bat_priv,
986 "ignoring all packets with broadcast source addr (sender: %pM" 1017 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
987 ")\n", ethhdr->h_source); 1018 ethhdr->h_source);
988 return; 1019 return;
989 } 1020 }
990 1021
@@ -1006,24 +1037,31 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1006 1037
1007 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); 1038 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1008 word = &(orig_neigh_node->bcast_own[offset]); 1039 word = &(orig_neigh_node->bcast_own[offset]);
1009 bit_mark(word, 1040 bat_set_bit(word,
1010 if_incoming_seqno - 1041 if_incoming_seqno -
1011 batman_ogm_packet->seqno - 2); 1042 batman_ogm_packet->seqno - 2);
1012 orig_neigh_node->bcast_own_sum[if_incoming->if_num] = 1043 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
1013 bit_packet_count(word); 1044 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
1014 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); 1045 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1015 } 1046 }
1016 1047
1017 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " 1048 bat_dbg(DBG_BATMAN, bat_priv,
1018 "originator packet from myself (via neighbor)\n"); 1049 "Drop packet: originator packet from myself (via neighbor)\n");
1019 orig_node_free_ref(orig_neigh_node); 1050 orig_node_free_ref(orig_neigh_node);
1020 return; 1051 return;
1021 } 1052 }
1022 1053
1023 if (is_my_oldorig) { 1054 if (is_my_oldorig) {
1024 bat_dbg(DBG_BATMAN, bat_priv, 1055 bat_dbg(DBG_BATMAN, bat_priv,
1025 "Drop packet: ignoring all rebroadcast echos (sender: " 1056 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
1026 "%pM)\n", ethhdr->h_source); 1057 ethhdr->h_source);
1058 return;
1059 }
1060
1061 if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) {
1062 bat_dbg(DBG_BATMAN, bat_priv,
1063 "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
1064 ethhdr->h_source);
1027 return; 1065 return;
1028 } 1066 }
1029 1067
@@ -1031,13 +1069,13 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1031 if (!orig_node) 1069 if (!orig_node)
1032 return; 1070 return;
1033 1071
1034 is_duplicate = bat_ogm_update_seqnos(ethhdr, batman_ogm_packet, 1072 is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
1035 if_incoming); 1073 if_incoming);
1036 1074
1037 if (is_duplicate == -1) { 1075 if (is_duplicate == -1) {
1038 bat_dbg(DBG_BATMAN, bat_priv, 1076 bat_dbg(DBG_BATMAN, bat_priv,
1039 "Drop packet: packet within seqno protection time " 1077 "Drop packet: packet within seqno protection time (sender: %pM)\n",
1040 "(sender: %pM)\n", ethhdr->h_source); 1078 ethhdr->h_source);
1041 goto out; 1079 goto out;
1042 } 1080 }
1043 1081
@@ -1051,6 +1089,10 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1051 if (router) 1089 if (router)
1052 router_router = orig_node_get_router(router->orig_node); 1090 router_router = orig_node_get_router(router->orig_node);
1053 1091
1092 if ((router && router->tq_avg != 0) &&
1093 (compare_eth(router->addr, ethhdr->h_source)))
1094 is_from_best_next_hop = true;
1095
1054 /* avoid temporary routing loops */ 1096 /* avoid temporary routing loops */
1055 if (router && router_router && 1097 if (router && router_router &&
1056 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) && 1098 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
@@ -1058,8 +1100,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1058 batman_ogm_packet->prev_sender)) && 1100 batman_ogm_packet->prev_sender)) &&
1059 (compare_eth(router->addr, router_router->addr))) { 1101 (compare_eth(router->addr, router_router->addr))) {
1060 bat_dbg(DBG_BATMAN, bat_priv, 1102 bat_dbg(DBG_BATMAN, bat_priv,
1061 "Drop packet: ignoring all rebroadcast packets that " 1103 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
1062 "may make me loop (sender: %pM)\n", ethhdr->h_source); 1104 ethhdr->h_source);
1063 goto out; 1105 goto out;
1064 } 1106 }
1065 1107
@@ -1081,8 +1123,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1081 goto out_neigh; 1123 goto out_neigh;
1082 } 1124 }
1083 1125
1084 is_bidirectional = bat_ogm_calc_tq(orig_node, orig_neigh_node, 1126 is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
1085 batman_ogm_packet, if_incoming); 1127 batman_ogm_packet, if_incoming);
1086 1128
1087 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet); 1129 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
1088 1130
@@ -1091,20 +1133,21 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1091 if (is_bidirectional && 1133 if (is_bidirectional &&
1092 (!is_duplicate || 1134 (!is_duplicate ||
1093 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) && 1135 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
1094 (orig_node->last_ttl - 3 <= batman_ogm_packet->ttl)))) 1136 (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
1095 bat_ogm_orig_update(bat_priv, orig_node, ethhdr, 1137 bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
1096 batman_ogm_packet, if_incoming, 1138 batman_ogm_packet, if_incoming,
1097 tt_buff, is_duplicate); 1139 tt_buff, is_duplicate);
1098 1140
1099 /* is single hop (direct) neighbor */ 1141 /* is single hop (direct) neighbor */
1100 if (is_single_hop_neigh) { 1142 if (is_single_hop_neigh) {
1101 1143
1102 /* mark direct link on incoming interface */ 1144 /* mark direct link on incoming interface */
1103 bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 1145 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1104 1, if_incoming); 1146 is_single_hop_neigh, is_from_best_next_hop,
1147 if_incoming);
1105 1148
1106 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " 1149 bat_dbg(DBG_BATMAN, bat_priv,
1107 "rebroadcast neighbor packet with direct link flag\n"); 1150 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
1108 goto out_neigh; 1151 goto out_neigh;
1109 } 1152 }
1110 1153
@@ -1123,7 +1166,9 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1123 1166
1124 bat_dbg(DBG_BATMAN, bat_priv, 1167 bat_dbg(DBG_BATMAN, bat_priv,
1125 "Forwarding packet: rebroadcast originator packet\n"); 1168 "Forwarding packet: rebroadcast originator packet\n");
1126 bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 0, if_incoming); 1169 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1170 is_single_hop_neigh, is_from_best_next_hop,
1171 if_incoming);
1127 1172
1128out_neigh: 1173out_neigh:
1129 if ((orig_neigh_node) && (!is_single_hop_neigh)) 1174 if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1139,13 +1184,29 @@ out:
1139 orig_node_free_ref(orig_node); 1184 orig_node_free_ref(orig_node);
1140} 1185}
1141 1186
1142void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff, 1187static int bat_iv_ogm_receive(struct sk_buff *skb,
1143 int packet_len, struct hard_iface *if_incoming) 1188 struct hard_iface *if_incoming)
1144{ 1189{
1190 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
1145 struct batman_ogm_packet *batman_ogm_packet; 1191 struct batman_ogm_packet *batman_ogm_packet;
1146 int buff_pos = 0; 1192 struct ethhdr *ethhdr;
1147 unsigned char *tt_buff; 1193 int buff_pos = 0, packet_len;
1194 unsigned char *tt_buff, *packet_buff;
1195 bool ret;
1148 1196
1197 ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN);
1198 if (!ret)
1199 return NET_RX_DROP;
1200
1201 /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
1202 * that does not have B.A.T.M.A.N. IV enabled ?
1203 */
1204 if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit)
1205 return NET_RX_DROP;
1206
1207 packet_len = skb_headlen(skb);
1208 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1209 packet_buff = skb->data;
1149 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; 1210 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
1150 1211
1151 /* unpack the aggregated packets and process them one by one */ 1212 /* unpack the aggregated packets and process them one by one */
@@ -1155,16 +1216,50 @@ void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
1155 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno); 1216 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1156 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc); 1217 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1157 1218
1158 tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN; 1219 tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
1159 1220
1160 bat_ogm_process(ethhdr, batman_ogm_packet, 1221 bat_iv_ogm_process(ethhdr, batman_ogm_packet,
1161 tt_buff, if_incoming); 1222 tt_buff, if_incoming);
1162 1223
1163 buff_pos += BATMAN_OGM_LEN + 1224 buff_pos += BATMAN_OGM_HLEN +
1164 tt_len(batman_ogm_packet->tt_num_changes); 1225 tt_len(batman_ogm_packet->tt_num_changes);
1165 1226
1166 batman_ogm_packet = (struct batman_ogm_packet *) 1227 batman_ogm_packet = (struct batman_ogm_packet *)
1167 (packet_buff + buff_pos); 1228 (packet_buff + buff_pos);
1168 } while (bat_ogm_aggr_packet(buff_pos, packet_len, 1229 } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
1169 batman_ogm_packet->tt_num_changes)); 1230 batman_ogm_packet->tt_num_changes));
1231
1232 kfree_skb(skb);
1233 return NET_RX_SUCCESS;
1234}
1235
1236static struct bat_algo_ops batman_iv __read_mostly = {
1237 .name = "BATMAN IV",
1238 .bat_iface_enable = bat_iv_ogm_iface_enable,
1239 .bat_iface_disable = bat_iv_ogm_iface_disable,
1240 .bat_iface_update_mac = bat_iv_ogm_iface_update_mac,
1241 .bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
1242 .bat_ogm_schedule = bat_iv_ogm_schedule,
1243 .bat_ogm_emit = bat_iv_ogm_emit,
1244};
1245
1246int __init bat_iv_init(void)
1247{
1248 int ret;
1249
1250 /* batman originator packet */
1251 ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive);
1252 if (ret < 0)
1253 goto out;
1254
1255 ret = bat_algo_register(&batman_iv);
1256 if (ret < 0)
1257 goto handler_unregister;
1258
1259 goto out;
1260
1261handler_unregister:
1262 recv_handler_unregister(BAT_IV_OGM);
1263out:
1264 return ret;
1170} 1265}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index c25492f7d665..5bc7b66d32dc 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -63,7 +63,7 @@ struct bat_attribute bat_attr_##_name = { \
63 .store = _store, \ 63 .store = _store, \
64}; 64};
65 65
66#define BAT_ATTR_STORE_BOOL(_name, _post_func) \ 66#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
67ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ 67ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
68 char *buff, size_t count) \ 68 char *buff, size_t count) \
69{ \ 69{ \
@@ -73,9 +73,9 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
73 &bat_priv->_name, net_dev); \ 73 &bat_priv->_name, net_dev); \
74} 74}
75 75
76#define BAT_ATTR_SHOW_BOOL(_name) \ 76#define BAT_ATTR_SIF_SHOW_BOOL(_name) \
77ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ 77ssize_t show_##_name(struct kobject *kobj, \
78 char *buff) \ 78 struct attribute *attr, char *buff) \
79{ \ 79{ \
80 struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ 80 struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
81 return sprintf(buff, "%s\n", \ 81 return sprintf(buff, "%s\n", \
@@ -83,16 +83,17 @@ ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
83 "disabled" : "enabled"); \ 83 "disabled" : "enabled"); \
84} \ 84} \
85 85
86/* Use this, if you are going to turn a [name] in bat_priv on or off */ 86/* Use this, if you are going to turn a [name] in the soft-interface
87#define BAT_ATTR_BOOL(_name, _mode, _post_func) \ 87 * (bat_priv) on or off */
88 static BAT_ATTR_STORE_BOOL(_name, _post_func) \ 88#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \
89 static BAT_ATTR_SHOW_BOOL(_name) \ 89 static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
90 static BAT_ATTR_SIF_SHOW_BOOL(_name) \
90 static BAT_ATTR(_name, _mode, show_##_name, store_##_name) 91 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
91 92
92 93
93#define BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ 94#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
94ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ 95ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
95 char *buff, size_t count) \ 96 char *buff, size_t count) \
96{ \ 97{ \
97 struct net_device *net_dev = kobj_to_netdev(kobj); \ 98 struct net_device *net_dev = kobj_to_netdev(kobj); \
98 struct bat_priv *bat_priv = netdev_priv(net_dev); \ 99 struct bat_priv *bat_priv = netdev_priv(net_dev); \
@@ -100,19 +101,62 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
100 attr, &bat_priv->_name, net_dev); \ 101 attr, &bat_priv->_name, net_dev); \
101} 102}
102 103
103#define BAT_ATTR_SHOW_UINT(_name) \ 104#define BAT_ATTR_SIF_SHOW_UINT(_name) \
104ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ 105ssize_t show_##_name(struct kobject *kobj, \
105 char *buff) \ 106 struct attribute *attr, char *buff) \
106{ \ 107{ \
107 struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ 108 struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
108 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \ 109 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
109} \ 110} \
110 111
111/* Use this, if you are going to set [name] in bat_priv to unsigned integer 112/* Use this, if you are going to set [name] in the soft-interface
112 * values only */ 113 * (bat_priv) to an unsigned integer value */
113#define BAT_ATTR_UINT(_name, _mode, _min, _max, _post_func) \ 114#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
114 static BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ 115 static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
115 static BAT_ATTR_SHOW_UINT(_name) \ 116 static BAT_ATTR_SIF_SHOW_UINT(_name) \
117 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
118
119
120#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
121ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
122 char *buff, size_t count) \
123{ \
124 struct net_device *net_dev = kobj_to_netdev(kobj); \
125 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
126 ssize_t length; \
127 \
128 if (!hard_iface) \
129 return 0; \
130 \
131 length = __store_uint_attr(buff, count, _min, _max, _post_func, \
132 attr, &hard_iface->_name, net_dev); \
133 \
134 hardif_free_ref(hard_iface); \
135 return length; \
136}
137
138#define BAT_ATTR_HIF_SHOW_UINT(_name) \
139ssize_t show_##_name(struct kobject *kobj, \
140 struct attribute *attr, char *buff) \
141{ \
142 struct net_device *net_dev = kobj_to_netdev(kobj); \
143 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
144 ssize_t length; \
145 \
146 if (!hard_iface) \
147 return 0; \
148 \
149 length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
150 \
151 hardif_free_ref(hard_iface); \
152 return length; \
153}
154
155/* Use this, if you are going to set [name] in hard_iface to an
156 * unsigned integer value*/
157#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
158 static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
159 static BAT_ATTR_HIF_SHOW_UINT(_name) \
116 static BAT_ATTR(_name, _mode, show_##_name, store_##_name) 160 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
117 161
118 162
@@ -149,7 +193,7 @@ static int store_bool_attr(char *buff, size_t count,
149 atomic_read(attr) == 1 ? "enabled" : "disabled", 193 atomic_read(attr) == 1 ? "enabled" : "disabled",
150 enabled == 1 ? "enabled" : "disabled"); 194 enabled == 1 ? "enabled" : "disabled");
151 195
152 atomic_set(attr, (unsigned)enabled); 196 atomic_set(attr, (unsigned int)enabled);
153 return count; 197 return count;
154} 198}
155 199
@@ -255,8 +299,8 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
255 buff[count - 1] = '\0'; 299 buff[count - 1] = '\0';
256 300
257 bat_info(net_dev, 301 bat_info(net_dev,
258 "Invalid parameter for 'vis mode' setting received: " 302 "Invalid parameter for 'vis mode' setting received: %s\n",
259 "%s\n", buff); 303 buff);
260 return -EINVAL; 304 return -EINVAL;
261 } 305 }
262 306
@@ -268,10 +312,17 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
268 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ? 312 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
269 "client" : "server"); 313 "client" : "server");
270 314
271 atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp); 315 atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
272 return count; 316 return count;
273} 317}
274 318
319static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr,
320 char *buff)
321{
322 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
323 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
324}
325
275static void post_gw_deselect(struct net_device *net_dev) 326static void post_gw_deselect(struct net_device *net_dev)
276{ 327{
277 struct bat_priv *bat_priv = netdev_priv(net_dev); 328 struct bat_priv *bat_priv = netdev_priv(net_dev);
@@ -314,17 +365,17 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
314 gw_mode_tmp = GW_MODE_OFF; 365 gw_mode_tmp = GW_MODE_OFF;
315 366
316 if (strncmp(buff, GW_MODE_CLIENT_NAME, 367 if (strncmp(buff, GW_MODE_CLIENT_NAME,
317 strlen(GW_MODE_CLIENT_NAME)) == 0) 368 strlen(GW_MODE_CLIENT_NAME)) == 0)
318 gw_mode_tmp = GW_MODE_CLIENT; 369 gw_mode_tmp = GW_MODE_CLIENT;
319 370
320 if (strncmp(buff, GW_MODE_SERVER_NAME, 371 if (strncmp(buff, GW_MODE_SERVER_NAME,
321 strlen(GW_MODE_SERVER_NAME)) == 0) 372 strlen(GW_MODE_SERVER_NAME)) == 0)
322 gw_mode_tmp = GW_MODE_SERVER; 373 gw_mode_tmp = GW_MODE_SERVER;
323 374
324 if (gw_mode_tmp < 0) { 375 if (gw_mode_tmp < 0) {
325 bat_info(net_dev, 376 bat_info(net_dev,
326 "Invalid parameter for 'gw mode' setting received: " 377 "Invalid parameter for 'gw mode' setting received: %s\n",
327 "%s\n", buff); 378 buff);
328 return -EINVAL; 379 return -EINVAL;
329 } 380 }
330 381
@@ -347,7 +398,7 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
347 curr_gw_mode_str, buff); 398 curr_gw_mode_str, buff);
348 399
349 gw_deselect(bat_priv); 400 gw_deselect(bat_priv);
350 atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp); 401 atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
351 return count; 402 return count;
352} 403}
353 404
@@ -377,28 +428,36 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
377 return gw_bandwidth_set(net_dev, buff, count); 428 return gw_bandwidth_set(net_dev, buff, count);
378} 429}
379 430
380BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); 431BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
381BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); 432BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
382BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); 433#ifdef CONFIG_BATMAN_ADV_BLA
383BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); 434BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
435#endif
436BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
437BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
384static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); 438static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
439static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
385static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); 440static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
386BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); 441BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
387BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); 442BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
388BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, 443BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
389 post_gw_deselect); 444 post_gw_deselect);
390static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, 445static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
391 store_gw_bwidth); 446 store_gw_bwidth);
392#ifdef CONFIG_BATMAN_ADV_DEBUG 447#ifdef CONFIG_BATMAN_ADV_DEBUG
393BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL); 448BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
394#endif 449#endif
395 450
396static struct bat_attribute *mesh_attrs[] = { 451static struct bat_attribute *mesh_attrs[] = {
397 &bat_attr_aggregated_ogms, 452 &bat_attr_aggregated_ogms,
398 &bat_attr_bonding, 453 &bat_attr_bonding,
454#ifdef CONFIG_BATMAN_ADV_BLA
455 &bat_attr_bridge_loop_avoidance,
456#endif
399 &bat_attr_fragmentation, 457 &bat_attr_fragmentation,
400 &bat_attr_ap_isolation, 458 &bat_attr_ap_isolation,
401 &bat_attr_vis_mode, 459 &bat_attr_vis_mode,
460 &bat_attr_routing_algo,
402 &bat_attr_gw_mode, 461 &bat_attr_gw_mode,
403 &bat_attr_orig_interval, 462 &bat_attr_orig_interval,
404 &bat_attr_hop_penalty, 463 &bat_attr_hop_penalty,
@@ -493,8 +552,8 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
493 buff[count - 1] = '\0'; 552 buff[count - 1] = '\0';
494 553
495 if (strlen(buff) >= IFNAMSIZ) { 554 if (strlen(buff) >= IFNAMSIZ) {
496 pr_err("Invalid parameter for 'mesh_iface' setting received: " 555 pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
497 "interface name too long '%s'\n", buff); 556 buff);
498 hardif_free_ref(hard_iface); 557 hardif_free_ref(hard_iface);
499 return -EINVAL; 558 return -EINVAL;
500 } 559 }
@@ -668,8 +727,8 @@ out:
668 hardif_free_ref(primary_if); 727 hardif_free_ref(primary_if);
669 728
670 if (ret) 729 if (ret)
671 bat_dbg(DBG_BATMAN, bat_priv, "Impossible to send " 730 bat_dbg(DBG_BATMAN, bat_priv,
672 "uevent for (%s,%s,%s) event (err: %d)\n", 731 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
673 uev_type_str[type], uev_action_str[action], 732 uev_type_str[type], uev_action_str[action],
674 (action == UEV_DEL ? "NULL" : data), ret); 733 (action == UEV_DEL ? "NULL" : data), ret);
675 return ret; 734 return ret;
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index a3f75a723c56..fece77ae586e 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 9bc63b209b3f..07ae6e1b8aca 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
@@ -24,100 +24,13 @@
24 24
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26 26
27/* returns true if the corresponding bit in the given seq_bits indicates true
28 * and curr_seqno is within range of last_seqno */
29int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
30 uint32_t curr_seqno)
31{
32 int32_t diff, word_offset, word_num;
33
34 diff = last_seqno - curr_seqno;
35 if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
36 return 0;
37 } else {
38 /* which word */
39 word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE;
40 /* which position in the selected word */
41 word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE;
42
43 if (test_bit(word_offset, &seq_bits[word_num]))
44 return 1;
45 else
46 return 0;
47 }
48}
49
50/* turn corresponding bit on, so we can remember that we got the packet */
51void bit_mark(unsigned long *seq_bits, int32_t n)
52{
53 int32_t word_offset, word_num;
54
55 /* if too old, just drop it */
56 if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
57 return;
58
59 /* which word */
60 word_num = n / WORD_BIT_SIZE;
61 /* which position in the selected word */
62 word_offset = n % WORD_BIT_SIZE;
63
64 set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */
65}
66
67/* shift the packet array by n places. */ 27/* shift the packet array by n places. */
68static void bit_shift(unsigned long *seq_bits, int32_t n) 28static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
69{ 29{
70 int32_t word_offset, word_num;
71 int32_t i;
72
73 if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) 30 if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
74 return; 31 return;
75 32
76 word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */ 33 bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE);
77 word_num = n / WORD_BIT_SIZE; /* shift over how much (full) words */
78
79 for (i = NUM_WORDS - 1; i > word_num; i--) {
80 /* going from old to new, so we don't overwrite the data we copy
81 * from.
82 *
83 * left is high, right is low: FEDC BA98 7654 3210
84 * ^^ ^^
85 * vvvv
86 * ^^^^ = from, vvvvv =to, we'd have word_num==1 and
87 * word_offset==WORD_BIT_SIZE/2 ????? in this example.
88 * (=24 bits)
89 *
90 * our desired output would be: 9876 5432 1000 0000
91 * */
92
93 seq_bits[i] =
94 (seq_bits[i - word_num] << word_offset) +
95 /* take the lower port from the left half, shift it left
96 * to its final position */
97 (seq_bits[i - word_num - 1] >>
98 (WORD_BIT_SIZE-word_offset));
99 /* and the upper part of the right half and shift it left to
100 * its position */
101 /* for our example that would be: word[0] = 9800 + 0076 =
102 * 9876 */
103 }
104 /* now for our last word, i==word_num, we only have its "left" half.
105 * that's the 1000 word in our example.*/
106
107 seq_bits[i] = (seq_bits[i - word_num] << word_offset);
108
109 /* pad the rest with 0, if there is anything */
110 i--;
111
112 for (; i >= 0; i--)
113 seq_bits[i] = 0;
114}
115
116static void bit_reset_window(unsigned long *seq_bits)
117{
118 int i;
119 for (i = 0; i < NUM_WORDS; i++)
120 seq_bits[i] = 0;
121} 34}
122 35
123 36
@@ -137,7 +50,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
137 50
138 if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { 51 if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
139 if (set_mark) 52 if (set_mark)
140 bit_mark(seq_bits, -seq_num_diff); 53 bat_set_bit(seq_bits, -seq_num_diff);
141 return 0; 54 return 0;
142 } 55 }
143 56
@@ -145,23 +58,23 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
145 * set the mark if required */ 58 * set the mark if required */
146 59
147 if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { 60 if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
148 bit_shift(seq_bits, seq_num_diff); 61 bat_bitmap_shift_left(seq_bits, seq_num_diff);
149 62
150 if (set_mark) 63 if (set_mark)
151 bit_mark(seq_bits, 0); 64 bat_set_bit(seq_bits, 0);
152 return 1; 65 return 1;
153 } 66 }
154 67
155 /* sequence number is much newer, probably missed a lot of packets */ 68 /* sequence number is much newer, probably missed a lot of packets */
156 69
157 if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) 70 if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) &&
158 && (seq_num_diff < EXPECTED_SEQNO_RANGE)) { 71 (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
159 bat_dbg(DBG_BATMAN, bat_priv, 72 bat_dbg(DBG_BATMAN, bat_priv,
160 "We missed a lot of packets (%i) !\n", 73 "We missed a lot of packets (%i) !\n",
161 seq_num_diff - 1); 74 seq_num_diff - 1);
162 bit_reset_window(seq_bits); 75 bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
163 if (set_mark) 76 if (set_mark)
164 bit_mark(seq_bits, 0); 77 bat_set_bit(seq_bits, 0);
165 return 1; 78 return 1;
166 } 79 }
167 80
@@ -170,15 +83,15 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
170 * packet should be dropped without calling this function if the 83 * packet should be dropped without calling this function if the
171 * seqno window is protected. */ 84 * seqno window is protected. */
172 85
173 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) 86 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
174 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 87 (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
175 88
176 bat_dbg(DBG_BATMAN, bat_priv, 89 bat_dbg(DBG_BATMAN, bat_priv,
177 "Other host probably restarted!\n"); 90 "Other host probably restarted!\n");
178 91
179 bit_reset_window(seq_bits); 92 bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
180 if (set_mark) 93 if (set_mark)
181 bit_mark(seq_bits, 0); 94 bat_set_bit(seq_bits, 0);
182 95
183 return 1; 96 return 1;
184 } 97 }
@@ -186,16 +99,3 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
186 /* never reached */ 99 /* never reached */
187 return 0; 100 return 0;
188} 101}
189
190/* count the hamming weight, how many good packets did we receive? just count
191 * the 1's.
192 */
193int bit_packet_count(const unsigned long *seq_bits)
194{
195 int i, hamming = 0;
196
197 for (i = 0; i < NUM_WORDS; i++)
198 hamming += hweight_long(seq_bits[i]);
199
200 return hamming;
201}
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 9c04422aeb07..1835c15cda41 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
@@ -22,23 +22,33 @@
22#ifndef _NET_BATMAN_ADV_BITARRAY_H_ 22#ifndef _NET_BATMAN_ADV_BITARRAY_H_
23#define _NET_BATMAN_ADV_BITARRAY_H_ 23#define _NET_BATMAN_ADV_BITARRAY_H_
24 24
25#define WORD_BIT_SIZE (sizeof(unsigned long) * 8)
26
27/* returns true if the corresponding bit in the given seq_bits indicates true 25/* returns true if the corresponding bit in the given seq_bits indicates true
28 * and curr_seqno is within range of last_seqno */ 26 * and curr_seqno is within range of last_seqno */
29int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, 27static inline int bat_test_bit(const unsigned long *seq_bits,
30 uint32_t curr_seqno); 28 uint32_t last_seqno, uint32_t curr_seqno)
29{
30 int32_t diff;
31
32 diff = last_seqno - curr_seqno;
33 if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
34 return 0;
35 else
36 return test_bit(diff, seq_bits);
37}
31 38
32/* turn corresponding bit on, so we can remember that we got the packet */ 39/* turn corresponding bit on, so we can remember that we got the packet */
33void bit_mark(unsigned long *seq_bits, int32_t n); 40static inline void bat_set_bit(unsigned long *seq_bits, int32_t n)
41{
42 /* if too old, just drop it */
43 if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
44 return;
34 45
46 set_bit(n, seq_bits); /* turn the position on */
47}
35 48
36/* receive and process one packet, returns 1 if received seq_num is considered 49/* receive and process one packet, returns 1 if received seq_num is considered
37 * new, 0 if old */ 50 * new, 0 if old */
38int bit_get_packet(void *priv, unsigned long *seq_bits, 51int bit_get_packet(void *priv, unsigned long *seq_bits,
39 int32_t seq_num_diff, int set_mark); 52 int32_t seq_num_diff, int set_mark);
40 53
41/* count the hamming weight, how many good packets did we receive? */
42int bit_packet_count(const unsigned long *seq_bits);
43
44#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ 54#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
new file mode 100644
index 000000000000..8bf97515a77d
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -0,0 +1,1580 @@
1/*
2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "hash.h"
24#include "hard-interface.h"
25#include "originator.h"
26#include "bridge_loop_avoidance.h"
27#include "translation-table.h"
28#include "send.h"
29
30#include <linux/etherdevice.h>
31#include <linux/crc16.h>
32#include <linux/if_arp.h>
33#include <net/arp.h>
34#include <linux/if_vlan.h>
35
36static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
37
38static void bla_periodic_work(struct work_struct *work);
39static void bla_send_announce(struct bat_priv *bat_priv,
40 struct backbone_gw *backbone_gw);
41
42/* return the index of the claim */
43static inline uint32_t choose_claim(const void *data, uint32_t size)
44{
45 const unsigned char *key = data;
46 uint32_t hash = 0;
47 size_t i;
48
49 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
50 hash += key[i];
51 hash += (hash << 10);
52 hash ^= (hash >> 6);
53 }
54
55 hash += (hash << 3);
56 hash ^= (hash >> 11);
57 hash += (hash << 15);
58
59 return hash % size;
60}
61
62/* return the index of the backbone gateway */
63static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
64{
65 const unsigned char *key = data;
66 uint32_t hash = 0;
67 size_t i;
68
69 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
70 hash += key[i];
71 hash += (hash << 10);
72 hash ^= (hash >> 6);
73 }
74
75 hash += (hash << 3);
76 hash ^= (hash >> 11);
77 hash += (hash << 15);
78
79 return hash % size;
80}
81
82
83/* compares address and vid of two backbone gws */
84static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
85{
86 const void *data1 = container_of(node, struct backbone_gw,
87 hash_entry);
88
89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
90}
91
92/* compares address and vid of two claims */
93static int compare_claim(const struct hlist_node *node, const void *data2)
94{
95 const void *data1 = container_of(node, struct claim,
96 hash_entry);
97
98 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
99}
100
101/* free a backbone gw */
102static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
103{
104 if (atomic_dec_and_test(&backbone_gw->refcount))
105 kfree_rcu(backbone_gw, rcu);
106}
107
108/* finally deinitialize the claim */
109static void claim_free_rcu(struct rcu_head *rcu)
110{
111 struct claim *claim;
112
113 claim = container_of(rcu, struct claim, rcu);
114
115 backbone_gw_free_ref(claim->backbone_gw);
116 kfree(claim);
117}
118
119/* free a claim, call claim_free_rcu if its the last reference */
120static void claim_free_ref(struct claim *claim)
121{
122 if (atomic_dec_and_test(&claim->refcount))
123 call_rcu(&claim->rcu, claim_free_rcu);
124}
125
126/**
127 * @bat_priv: the bat priv with all the soft interface information
128 * @data: search data (may be local/static data)
129 *
130 * looks for a claim in the hash, and returns it if found
131 * or NULL otherwise.
132 */
133static struct claim *claim_hash_find(struct bat_priv *bat_priv,
134 struct claim *data)
135{
136 struct hashtable_t *hash = bat_priv->claim_hash;
137 struct hlist_head *head;
138 struct hlist_node *node;
139 struct claim *claim;
140 struct claim *claim_tmp = NULL;
141 int index;
142
143 if (!hash)
144 return NULL;
145
146 index = choose_claim(data, hash->size);
147 head = &hash->table[index];
148
149 rcu_read_lock();
150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
151 if (!compare_claim(&claim->hash_entry, data))
152 continue;
153
154 if (!atomic_inc_not_zero(&claim->refcount))
155 continue;
156
157 claim_tmp = claim;
158 break;
159 }
160 rcu_read_unlock();
161
162 return claim_tmp;
163}
164
165/**
166 * @bat_priv: the bat priv with all the soft interface information
167 * @addr: the address of the originator
168 * @vid: the VLAN ID
169 *
170 * looks for a claim in the hash, and returns it if found
171 * or NULL otherwise.
172 */
173static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
174 uint8_t *addr, short vid)
175{
176 struct hashtable_t *hash = bat_priv->backbone_hash;
177 struct hlist_head *head;
178 struct hlist_node *node;
179 struct backbone_gw search_entry, *backbone_gw;
180 struct backbone_gw *backbone_gw_tmp = NULL;
181 int index;
182
183 if (!hash)
184 return NULL;
185
186 memcpy(search_entry.orig, addr, ETH_ALEN);
187 search_entry.vid = vid;
188
189 index = choose_backbone_gw(&search_entry, hash->size);
190 head = &hash->table[index];
191
192 rcu_read_lock();
193 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
194 if (!compare_backbone_gw(&backbone_gw->hash_entry,
195 &search_entry))
196 continue;
197
198 if (!atomic_inc_not_zero(&backbone_gw->refcount))
199 continue;
200
201 backbone_gw_tmp = backbone_gw;
202 break;
203 }
204 rcu_read_unlock();
205
206 return backbone_gw_tmp;
207}
208
209/* delete all claims for a backbone */
210static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
211{
212 struct hashtable_t *hash;
213 struct hlist_node *node, *node_tmp;
214 struct hlist_head *head;
215 struct claim *claim;
216 int i;
217 spinlock_t *list_lock; /* protects write access to the hash lists */
218
219 hash = backbone_gw->bat_priv->claim_hash;
220 if (!hash)
221 return;
222
223 for (i = 0; i < hash->size; i++) {
224 head = &hash->table[i];
225 list_lock = &hash->list_locks[i];
226
227 spin_lock_bh(list_lock);
228 hlist_for_each_entry_safe(claim, node, node_tmp,
229 head, hash_entry) {
230
231 if (claim->backbone_gw != backbone_gw)
232 continue;
233
234 claim_free_ref(claim);
235 hlist_del_rcu(node);
236 }
237 spin_unlock_bh(list_lock);
238 }
239
240 /* all claims gone, intialize CRC */
241 backbone_gw->crc = BLA_CRC_INIT;
242}
243
244/**
245 * @bat_priv: the bat priv with all the soft interface information
246 * @orig: the mac address to be announced within the claim
247 * @vid: the VLAN ID
248 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
249 *
250 * sends a claim frame according to the provided info.
251 */
252static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
253 short vid, int claimtype)
254{
255 struct sk_buff *skb;
256 struct ethhdr *ethhdr;
257 struct hard_iface *primary_if;
258 struct net_device *soft_iface;
259 uint8_t *hw_src;
260 struct bla_claim_dst local_claim_dest;
261 uint32_t zeroip = 0;
262
263 primary_if = primary_if_get_selected(bat_priv);
264 if (!primary_if)
265 return;
266
267 memcpy(&local_claim_dest, &bat_priv->claim_dest,
268 sizeof(local_claim_dest));
269 local_claim_dest.type = claimtype;
270
271 soft_iface = primary_if->soft_iface;
272
273 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
274 /* IP DST: 0.0.0.0 */
275 zeroip,
276 primary_if->soft_iface,
277 /* IP SRC: 0.0.0.0 */
278 zeroip,
279 /* Ethernet DST: Broadcast */
280 NULL,
281 /* Ethernet SRC/HW SRC: originator mac */
282 primary_if->net_dev->dev_addr,
283 /* HW DST: FF:43:05:XX:00:00
284 * with XX = claim type
285 * and YY:YY = group id
286 */
287 (uint8_t *)&local_claim_dest);
288
289 if (!skb)
290 goto out;
291
292 ethhdr = (struct ethhdr *)skb->data;
293 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
294
295 /* now we pretend that the client would have sent this ... */
296 switch (claimtype) {
297 case CLAIM_TYPE_ADD:
298 /* normal claim frame
299 * set Ethernet SRC to the clients mac
300 */
301 memcpy(ethhdr->h_source, mac, ETH_ALEN);
302 bat_dbg(DBG_BLA, bat_priv,
303 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
304 break;
305 case CLAIM_TYPE_DEL:
306 /* unclaim frame
307 * set HW SRC to the clients mac
308 */
309 memcpy(hw_src, mac, ETH_ALEN);
310 bat_dbg(DBG_BLA, bat_priv,
311 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
312 break;
313 case CLAIM_TYPE_ANNOUNCE:
314 /* announcement frame
315 * set HW SRC to the special mac containg the crc
316 */
317 memcpy(hw_src, mac, ETH_ALEN);
318 bat_dbg(DBG_BLA, bat_priv,
319 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
320 ethhdr->h_source, vid);
321 break;
322 case CLAIM_TYPE_REQUEST:
323 /* request frame
324 * set HW SRC to the special mac containg the crc
325 */
326 memcpy(hw_src, mac, ETH_ALEN);
327 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
328 bat_dbg(DBG_BLA, bat_priv,
329 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
330 ethhdr->h_source, ethhdr->h_dest, vid);
331 break;
332
333 }
334
335 if (vid != -1)
336 skb = vlan_insert_tag(skb, vid);
337
338 skb_reset_mac_header(skb);
339 skb->protocol = eth_type_trans(skb, soft_iface);
340 bat_priv->stats.rx_packets++;
341 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
342 soft_iface->last_rx = jiffies;
343
344 netif_rx(skb);
345out:
346 if (primary_if)
347 hardif_free_ref(primary_if);
348}
349
350/**
351 * @bat_priv: the bat priv with all the soft interface information
352 * @orig: the mac address of the originator
353 * @vid: the VLAN ID
354 *
355 * searches for the backbone gw or creates a new one if it could not
356 * be found.
357 */
358static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
359 uint8_t *orig, short vid)
360{
361 struct backbone_gw *entry;
362 struct orig_node *orig_node;
363 int hash_added;
364
365 entry = backbone_hash_find(bat_priv, orig, vid);
366
367 if (entry)
368 return entry;
369
370 bat_dbg(DBG_BLA, bat_priv,
371 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
372 orig, vid);
373
374 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
375 if (!entry)
376 return NULL;
377
378 entry->vid = vid;
379 entry->lasttime = jiffies;
380 entry->crc = BLA_CRC_INIT;
381 entry->bat_priv = bat_priv;
382 atomic_set(&entry->request_sent, 0);
383 memcpy(entry->orig, orig, ETH_ALEN);
384
385 /* one for the hash, one for returning */
386 atomic_set(&entry->refcount, 2);
387
388 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
389 choose_backbone_gw, entry, &entry->hash_entry);
390
391 if (unlikely(hash_added != 0)) {
392 /* hash failed, free the structure */
393 kfree(entry);
394 return NULL;
395 }
396
397 /* this is a gateway now, remove any tt entries */
398 orig_node = orig_hash_find(bat_priv, orig);
399 if (orig_node) {
400 tt_global_del_orig(bat_priv, orig_node,
401 "became a backbone gateway");
402 orig_node_free_ref(orig_node);
403 }
404 return entry;
405}
406
407/* update or add the own backbone gw to make sure we announce
408 * where we receive other backbone gws
409 */
410static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
411 struct hard_iface *primary_if,
412 short vid)
413{
414 struct backbone_gw *backbone_gw;
415
416 backbone_gw = bla_get_backbone_gw(bat_priv,
417 primary_if->net_dev->dev_addr, vid);
418 if (unlikely(!backbone_gw))
419 return;
420
421 backbone_gw->lasttime = jiffies;
422 backbone_gw_free_ref(backbone_gw);
423}
424
425/**
426 * @bat_priv: the bat priv with all the soft interface information
427 * @vid: the vid where the request came on
428 *
429 * Repeat all of our own claims, and finally send an ANNOUNCE frame
430 * to allow the requester another check if the CRC is correct now.
431 */
432static void bla_answer_request(struct bat_priv *bat_priv,
433 struct hard_iface *primary_if, short vid)
434{
435 struct hlist_node *node;
436 struct hlist_head *head;
437 struct hashtable_t *hash;
438 struct claim *claim;
439 struct backbone_gw *backbone_gw;
440 int i;
441
442 bat_dbg(DBG_BLA, bat_priv,
443 "bla_answer_request(): received a claim request, send all of our own claims again\n");
444
445 backbone_gw = backbone_hash_find(bat_priv,
446 primary_if->net_dev->dev_addr, vid);
447 if (!backbone_gw)
448 return;
449
450 hash = bat_priv->claim_hash;
451 for (i = 0; i < hash->size; i++) {
452 head = &hash->table[i];
453
454 rcu_read_lock();
455 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
456 /* only own claims are interesting */
457 if (claim->backbone_gw != backbone_gw)
458 continue;
459
460 bla_send_claim(bat_priv, claim->addr, claim->vid,
461 CLAIM_TYPE_ADD);
462 }
463 rcu_read_unlock();
464 }
465
466 /* finally, send an announcement frame */
467 bla_send_announce(bat_priv, backbone_gw);
468 backbone_gw_free_ref(backbone_gw);
469}
470
471/**
472 * @backbone_gw: the backbone gateway from whom we are out of sync
473 *
474 * When the crc is wrong, ask the backbone gateway for a full table update.
475 * After the request, it will repeat all of his own claims and finally
476 * send an announcement claim with which we can check again.
477 */
478static void bla_send_request(struct backbone_gw *backbone_gw)
479{
480 /* first, remove all old entries */
481 bla_del_backbone_claims(backbone_gw);
482
483 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
484 "Sending REQUEST to %pM\n",
485 backbone_gw->orig);
486
487 /* send request */
488 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
489 backbone_gw->vid, CLAIM_TYPE_REQUEST);
490
491 /* no local broadcasts should be sent or received, for now. */
492 if (!atomic_read(&backbone_gw->request_sent)) {
493 atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
494 atomic_set(&backbone_gw->request_sent, 1);
495 }
496}
497
498/**
499 * @bat_priv: the bat priv with all the soft interface information
500 * @backbone_gw: our backbone gateway which should be announced
501 *
502 * This function sends an announcement. It is called from multiple
503 * places.
504 */
505static void bla_send_announce(struct bat_priv *bat_priv,
506 struct backbone_gw *backbone_gw)
507{
508 uint8_t mac[ETH_ALEN];
509 uint16_t crc;
510
511 memcpy(mac, announce_mac, 4);
512 crc = htons(backbone_gw->crc);
513 memcpy(&mac[4], (uint8_t *)&crc, 2);
514
515 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
516
517}
518
519/**
520 * @bat_priv: the bat priv with all the soft interface information
521 * @mac: the mac address of the claim
522 * @vid: the VLAN ID of the frame
523 * @backbone_gw: the backbone gateway which claims it
524 *
525 * Adds a claim in the claim hash.
526 */
527static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
528 const short vid, struct backbone_gw *backbone_gw)
529{
530 struct claim *claim;
531 struct claim search_claim;
532 int hash_added;
533
534 memcpy(search_claim.addr, mac, ETH_ALEN);
535 search_claim.vid = vid;
536 claim = claim_hash_find(bat_priv, &search_claim);
537
538 /* create a new claim entry if it does not exist yet. */
539 if (!claim) {
540 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
541 if (!claim)
542 return;
543
544 memcpy(claim->addr, mac, ETH_ALEN);
545 claim->vid = vid;
546 claim->lasttime = jiffies;
547 claim->backbone_gw = backbone_gw;
548
549 atomic_set(&claim->refcount, 2);
550 bat_dbg(DBG_BLA, bat_priv,
551 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
552 mac, vid);
553 hash_added = hash_add(bat_priv->claim_hash, compare_claim,
554 choose_claim, claim, &claim->hash_entry);
555
556 if (unlikely(hash_added != 0)) {
557 /* only local changes happened. */
558 kfree(claim);
559 return;
560 }
561 } else {
562 claim->lasttime = jiffies;
563 if (claim->backbone_gw == backbone_gw)
564 /* no need to register a new backbone */
565 goto claim_free_ref;
566
567 bat_dbg(DBG_BLA, bat_priv,
568 "bla_add_claim(): changing ownership for %pM, vid %d\n",
569 mac, vid);
570
571 claim->backbone_gw->crc ^=
572 crc16(0, claim->addr, ETH_ALEN);
573 backbone_gw_free_ref(claim->backbone_gw);
574
575 }
576 /* set (new) backbone gw */
577 atomic_inc(&backbone_gw->refcount);
578 claim->backbone_gw = backbone_gw;
579
580 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
581 backbone_gw->lasttime = jiffies;
582
583claim_free_ref:
584 claim_free_ref(claim);
585}
586
587/* Delete a claim from the claim hash which has the
588 * given mac address and vid.
589 */
590static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
591 const short vid)
592{
593 struct claim search_claim, *claim;
594
595 memcpy(search_claim.addr, mac, ETH_ALEN);
596 search_claim.vid = vid;
597 claim = claim_hash_find(bat_priv, &search_claim);
598 if (!claim)
599 return;
600
601 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
602
603 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
604 claim_free_ref(claim); /* reference from the hash is gone */
605
606 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
607
608 /* don't need the reference from hash_find() anymore */
609 claim_free_ref(claim);
610}
611
612/* check for ANNOUNCE frame, return 1 if handled */
613static int handle_announce(struct bat_priv *bat_priv,
614 uint8_t *an_addr, uint8_t *backbone_addr, short vid)
615{
616 struct backbone_gw *backbone_gw;
617 uint16_t crc;
618
619 if (memcmp(an_addr, announce_mac, 4) != 0)
620 return 0;
621
622 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
623
624 if (unlikely(!backbone_gw))
625 return 1;
626
627
628 /* handle as ANNOUNCE frame */
629 backbone_gw->lasttime = jiffies;
630 crc = ntohs(*((uint16_t *)(&an_addr[4])));
631
632 bat_dbg(DBG_BLA, bat_priv,
633 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
634 vid, backbone_gw->orig, crc);
635
636 if (backbone_gw->crc != crc) {
637 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
638 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
639 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
640 crc);
641
642 bla_send_request(backbone_gw);
643 } else {
644 /* if we have sent a request and the crc was OK,
645 * we can allow traffic again.
646 */
647 if (atomic_read(&backbone_gw->request_sent)) {
648 atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
649 atomic_set(&backbone_gw->request_sent, 0);
650 }
651 }
652
653 backbone_gw_free_ref(backbone_gw);
654 return 1;
655}
656
657/* check for REQUEST frame, return 1 if handled */
658static int handle_request(struct bat_priv *bat_priv,
659 struct hard_iface *primary_if,
660 uint8_t *backbone_addr,
661 struct ethhdr *ethhdr, short vid)
662{
663 /* check for REQUEST frame */
664 if (!compare_eth(backbone_addr, ethhdr->h_dest))
665 return 0;
666
667 /* sanity check, this should not happen on a normal switch,
668 * we ignore it in this case.
669 */
670 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
671 return 1;
672
673 bat_dbg(DBG_BLA, bat_priv,
674 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
675 vid, ethhdr->h_source);
676
677 bla_answer_request(bat_priv, primary_if, vid);
678 return 1;
679}
680
681/* check for UNCLAIM frame, return 1 if handled */
682static int handle_unclaim(struct bat_priv *bat_priv,
683 struct hard_iface *primary_if,
684 uint8_t *backbone_addr,
685 uint8_t *claim_addr, short vid)
686{
687 struct backbone_gw *backbone_gw;
688
689 /* unclaim in any case if it is our own */
690 if (primary_if && compare_eth(backbone_addr,
691 primary_if->net_dev->dev_addr))
692 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
693
694 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
695
696 if (!backbone_gw)
697 return 1;
698
699 /* this must be an UNCLAIM frame */
700 bat_dbg(DBG_BLA, bat_priv,
701 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
702 claim_addr, vid, backbone_gw->orig);
703
704 bla_del_claim(bat_priv, claim_addr, vid);
705 backbone_gw_free_ref(backbone_gw);
706 return 1;
707}
708
709/* check for CLAIM frame, return 1 if handled */
710static int handle_claim(struct bat_priv *bat_priv,
711 struct hard_iface *primary_if, uint8_t *backbone_addr,
712 uint8_t *claim_addr, short vid)
713{
714 struct backbone_gw *backbone_gw;
715
716 /* register the gateway if not yet available, and add the claim. */
717
718 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
719
720 if (unlikely(!backbone_gw))
721 return 1;
722
723 /* this must be a CLAIM frame */
724 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
725 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
726 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
727
728 /* TODO: we could call something like tt_local_del() here. */
729
730 backbone_gw_free_ref(backbone_gw);
731 return 1;
732}
733
734/**
735 * @bat_priv: the bat priv with all the soft interface information
736 * @hw_src: the Hardware source in the ARP Header
737 * @hw_dst: the Hardware destination in the ARP Header
738 * @ethhdr: pointer to the Ethernet header of the claim frame
739 *
740 * checks if it is a claim packet and if its on the same group.
741 * This function also applies the group ID of the sender
742 * if it is in the same mesh.
743 *
744 * returns:
745 * 2 - if it is a claim packet and on the same group
746 * 1 - if is a claim packet from another group
747 * 0 - if it is not a claim packet
748 */
749static int check_claim_group(struct bat_priv *bat_priv,
750 struct hard_iface *primary_if,
751 uint8_t *hw_src, uint8_t *hw_dst,
752 struct ethhdr *ethhdr)
753{
754 uint8_t *backbone_addr;
755 struct orig_node *orig_node;
756 struct bla_claim_dst *bla_dst, *bla_dst_own;
757
758 bla_dst = (struct bla_claim_dst *)hw_dst;
759 bla_dst_own = &bat_priv->claim_dest;
760
761 /* check if it is a claim packet in general */
762 if (memcmp(bla_dst->magic, bla_dst_own->magic,
763 sizeof(bla_dst->magic)) != 0)
764 return 0;
765
766 /* if announcement packet, use the source,
767 * otherwise assume it is in the hw_src
768 */
769 switch (bla_dst->type) {
770 case CLAIM_TYPE_ADD:
771 backbone_addr = hw_src;
772 break;
773 case CLAIM_TYPE_REQUEST:
774 case CLAIM_TYPE_ANNOUNCE:
775 case CLAIM_TYPE_DEL:
776 backbone_addr = ethhdr->h_source;
777 break;
778 default:
779 return 0;
780 }
781
782 /* don't accept claim frames from ourselves */
783 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
784 return 0;
785
786 /* if its already the same group, it is fine. */
787 if (bla_dst->group == bla_dst_own->group)
788 return 2;
789
790 /* lets see if this originator is in our mesh */
791 orig_node = orig_hash_find(bat_priv, backbone_addr);
792
793 /* dont accept claims from gateways which are not in
794 * the same mesh or group.
795 */
796 if (!orig_node)
797 return 1;
798
799 /* if our mesh friends mac is bigger, use it for ourselves. */
800 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
801 bat_dbg(DBG_BLA, bat_priv,
802 "taking other backbones claim group: %04x\n",
803 ntohs(bla_dst->group));
804 bla_dst_own->group = bla_dst->group;
805 }
806
807 orig_node_free_ref(orig_node);
808
809 return 2;
810}
811
812
813/**
814 * @bat_priv: the bat priv with all the soft interface information
815 * @skb: the frame to be checked
816 *
817 * Check if this is a claim frame, and process it accordingly.
818 *
819 * returns 1 if it was a claim frame, otherwise return 0 to
820 * tell the callee that it can use the frame on its own.
821 */
822static int bla_process_claim(struct bat_priv *bat_priv,
823 struct hard_iface *primary_if,
824 struct sk_buff *skb)
825{
826 struct ethhdr *ethhdr;
827 struct vlan_ethhdr *vhdr;
828 struct arphdr *arphdr;
829 uint8_t *hw_src, *hw_dst;
830 struct bla_claim_dst *bla_dst;
831 uint16_t proto;
832 int headlen;
833 short vid = -1;
834 int ret;
835
836 ethhdr = (struct ethhdr *)skb_mac_header(skb);
837
838 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
839 vhdr = (struct vlan_ethhdr *)ethhdr;
840 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
841 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
842 headlen = sizeof(*vhdr);
843 } else {
844 proto = ntohs(ethhdr->h_proto);
845 headlen = ETH_HLEN;
846 }
847
848 if (proto != ETH_P_ARP)
849 return 0; /* not a claim frame */
850
851 /* this must be a ARP frame. check if it is a claim. */
852
853 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
854 return 0;
855
856 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
857 ethhdr = (struct ethhdr *)skb_mac_header(skb);
858 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
859
860 /* Check whether the ARP frame carries a valid
861 * IP information
862 */
863
864 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
865 return 0;
866 if (arphdr->ar_pro != htons(ETH_P_IP))
867 return 0;
868 if (arphdr->ar_hln != ETH_ALEN)
869 return 0;
870 if (arphdr->ar_pln != 4)
871 return 0;
872
873 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
874 hw_dst = hw_src + ETH_ALEN + 4;
875 bla_dst = (struct bla_claim_dst *)hw_dst;
876
877 /* check if it is a claim frame. */
878 ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
879 if (ret == 1)
880 bat_dbg(DBG_BLA, bat_priv,
881 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
882 ethhdr->h_source, vid, hw_src, hw_dst);
883
884 if (ret < 2)
885 return ret;
886
887 /* become a backbone gw ourselves on this vlan if not happened yet */
888 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
889
890 /* check for the different types of claim frames ... */
891 switch (bla_dst->type) {
892 case CLAIM_TYPE_ADD:
893 if (handle_claim(bat_priv, primary_if, hw_src,
894 ethhdr->h_source, vid))
895 return 1;
896 break;
897 case CLAIM_TYPE_DEL:
898 if (handle_unclaim(bat_priv, primary_if,
899 ethhdr->h_source, hw_src, vid))
900 return 1;
901 break;
902
903 case CLAIM_TYPE_ANNOUNCE:
904 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
905 return 1;
906 break;
907 case CLAIM_TYPE_REQUEST:
908 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
909 return 1;
910 break;
911 }
912
913 bat_dbg(DBG_BLA, bat_priv,
914 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
915 ethhdr->h_source, vid, hw_src, hw_dst);
916 return 1;
917}
918
919/* Check when we last heard from other nodes, and remove them in case of
920 * a time out, or clean all backbone gws if now is set.
921 */
922static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
923{
924 struct backbone_gw *backbone_gw;
925 struct hlist_node *node, *node_tmp;
926 struct hlist_head *head;
927 struct hashtable_t *hash;
928 spinlock_t *list_lock; /* protects write access to the hash lists */
929 int i;
930
931 hash = bat_priv->backbone_hash;
932 if (!hash)
933 return;
934
935 for (i = 0; i < hash->size; i++) {
936 head = &hash->table[i];
937 list_lock = &hash->list_locks[i];
938
939 spin_lock_bh(list_lock);
940 hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
941 head, hash_entry) {
942 if (now)
943 goto purge_now;
944 if (!has_timed_out(backbone_gw->lasttime,
945 BLA_BACKBONE_TIMEOUT))
946 continue;
947
948 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
949 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
950 backbone_gw->orig);
951
952purge_now:
953 /* don't wait for the pending request anymore */
954 if (atomic_read(&backbone_gw->request_sent))
955 atomic_dec(&bat_priv->bla_num_requests);
956
957 bla_del_backbone_claims(backbone_gw);
958
959 hlist_del_rcu(node);
960 backbone_gw_free_ref(backbone_gw);
961 }
962 spin_unlock_bh(list_lock);
963 }
964}
965
966/**
967 * @bat_priv: the bat priv with all the soft interface information
968 * @primary_if: the selected primary interface, may be NULL if now is set
969 * @now: whether the whole hash shall be wiped now
970 *
971 * Check when we heard last time from our own claims, and remove them in case of
972 * a time out, or clean all claims if now is set
973 */
974static void bla_purge_claims(struct bat_priv *bat_priv,
975 struct hard_iface *primary_if, int now)
976{
977 struct claim *claim;
978 struct hlist_node *node;
979 struct hlist_head *head;
980 struct hashtable_t *hash;
981 int i;
982
983 hash = bat_priv->claim_hash;
984 if (!hash)
985 return;
986
987 for (i = 0; i < hash->size; i++) {
988 head = &hash->table[i];
989
990 rcu_read_lock();
991 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
992 if (now)
993 goto purge_now;
994 if (!compare_eth(claim->backbone_gw->orig,
995 primary_if->net_dev->dev_addr))
996 continue;
997 if (!has_timed_out(claim->lasttime,
998 BLA_CLAIM_TIMEOUT))
999 continue;
1000
1001 bat_dbg(DBG_BLA, bat_priv,
1002 "bla_purge_claims(): %pM, vid %d, time out\n",
1003 claim->addr, claim->vid);
1004
1005purge_now:
1006 handle_unclaim(bat_priv, primary_if,
1007 claim->backbone_gw->orig,
1008 claim->addr, claim->vid);
1009 }
1010 rcu_read_unlock();
1011 }
1012}
1013
1014/**
1015 * @bat_priv: the bat priv with all the soft interface information
1016 * @primary_if: the new selected primary_if
1017 * @oldif: the old primary interface, may be NULL
1018 *
1019 * Update the backbone gateways when the own orig address changes.
1020 *
1021 */
1022void bla_update_orig_address(struct bat_priv *bat_priv,
1023 struct hard_iface *primary_if,
1024 struct hard_iface *oldif)
1025{
1026 struct backbone_gw *backbone_gw;
1027 struct hlist_node *node;
1028 struct hlist_head *head;
1029 struct hashtable_t *hash;
1030 int i;
1031
1032 /* reset bridge loop avoidance group id */
1033 bat_priv->claim_dest.group =
1034 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1035
1036 if (!oldif) {
1037 bla_purge_claims(bat_priv, NULL, 1);
1038 bla_purge_backbone_gw(bat_priv, 1);
1039 return;
1040 }
1041
1042 hash = bat_priv->backbone_hash;
1043 if (!hash)
1044 return;
1045
1046 for (i = 0; i < hash->size; i++) {
1047 head = &hash->table[i];
1048
1049 rcu_read_lock();
1050 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1051 /* own orig still holds the old value. */
1052 if (!compare_eth(backbone_gw->orig,
1053 oldif->net_dev->dev_addr))
1054 continue;
1055
1056 memcpy(backbone_gw->orig,
1057 primary_if->net_dev->dev_addr, ETH_ALEN);
1058 /* send an announce frame so others will ask for our
1059 * claims and update their tables.
1060 */
1061 bla_send_announce(bat_priv, backbone_gw);
1062 }
1063 rcu_read_unlock();
1064 }
1065}
1066
1067
1068
1069/* (re)start the timer */
1070static void bla_start_timer(struct bat_priv *bat_priv)
1071{
1072 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
1073 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
1074 msecs_to_jiffies(BLA_PERIOD_LENGTH));
1075}
1076
1077/* periodic work to do:
1078 * * purge structures when they are too old
1079 * * send announcements
1080 */
1081static void bla_periodic_work(struct work_struct *work)
1082{
1083 struct delayed_work *delayed_work =
1084 container_of(work, struct delayed_work, work);
1085 struct bat_priv *bat_priv =
1086 container_of(delayed_work, struct bat_priv, bla_work);
1087 struct hlist_node *node;
1088 struct hlist_head *head;
1089 struct backbone_gw *backbone_gw;
1090 struct hashtable_t *hash;
1091 struct hard_iface *primary_if;
1092 int i;
1093
1094 primary_if = primary_if_get_selected(bat_priv);
1095 if (!primary_if)
1096 goto out;
1097
1098 bla_purge_claims(bat_priv, primary_if, 0);
1099 bla_purge_backbone_gw(bat_priv, 0);
1100
1101 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1102 goto out;
1103
1104 hash = bat_priv->backbone_hash;
1105 if (!hash)
1106 goto out;
1107
1108 for (i = 0; i < hash->size; i++) {
1109 head = &hash->table[i];
1110
1111 rcu_read_lock();
1112 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1113 if (!compare_eth(backbone_gw->orig,
1114 primary_if->net_dev->dev_addr))
1115 continue;
1116
1117 backbone_gw->lasttime = jiffies;
1118
1119 bla_send_announce(bat_priv, backbone_gw);
1120 }
1121 rcu_read_unlock();
1122 }
1123out:
1124 if (primary_if)
1125 hardif_free_ref(primary_if);
1126
1127 bla_start_timer(bat_priv);
1128}
1129
1130/* initialize all bla structures */
1131int bla_init(struct bat_priv *bat_priv)
1132{
1133 int i;
1134 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1135 struct hard_iface *primary_if;
1136
1137 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
1138
1139 /* setting claim destination address */
1140 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
1141 bat_priv->claim_dest.type = 0;
1142 primary_if = primary_if_get_selected(bat_priv);
1143 if (primary_if) {
1144 bat_priv->claim_dest.group =
1145 htons(crc16(0, primary_if->net_dev->dev_addr,
1146 ETH_ALEN));
1147 hardif_free_ref(primary_if);
1148 } else {
1149 bat_priv->claim_dest.group = 0; /* will be set later */
1150 }
1151
1152 /* initialize the duplicate list */
1153 for (i = 0; i < DUPLIST_SIZE; i++)
1154 bat_priv->bcast_duplist[i].entrytime =
1155 jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
1156 bat_priv->bcast_duplist_curr = 0;
1157
1158 if (bat_priv->claim_hash)
1159 return 1;
1160
1161 bat_priv->claim_hash = hash_new(128);
1162 bat_priv->backbone_hash = hash_new(32);
1163
1164 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1165 return -1;
1166
1167 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
1168
1169 bla_start_timer(bat_priv);
1170 return 1;
1171}
1172
1173/**
1174 * @bat_priv: the bat priv with all the soft interface information
1175 * @bcast_packet: originator mac address
1176 * @hdr_size: maximum length of the frame
1177 *
1178 * check if it is on our broadcast list. Another gateway might
1179 * have sent the same packet because it is connected to the same backbone,
1180 * so we have to remove this duplicate.
1181 *
1182 * This is performed by checking the CRC, which will tell us
1183 * with a good chance that it is the same packet. If it is furthermore
1184 * sent by another host, drop it. We allow equal packets from
1185 * the same host however as this might be intended.
1186 *
1187 **/
1188
1189int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1190 struct bcast_packet *bcast_packet,
1191 int hdr_size)
1192{
1193 int i, length, curr;
1194 uint8_t *content;
1195 uint16_t crc;
1196 struct bcast_duplist_entry *entry;
1197
1198 length = hdr_size - sizeof(*bcast_packet);
1199 content = (uint8_t *)bcast_packet;
1200 content += sizeof(*bcast_packet);
1201
1202 /* calculate the crc ... */
1203 crc = crc16(0, content, length);
1204
1205 for (i = 0 ; i < DUPLIST_SIZE; i++) {
1206 curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
1207 entry = &bat_priv->bcast_duplist[curr];
1208
1209 /* we can stop searching if the entry is too old ;
1210 * later entries will be even older
1211 */
1212 if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
1213 break;
1214
1215 if (entry->crc != crc)
1216 continue;
1217
1218 if (compare_eth(entry->orig, bcast_packet->orig))
1219 continue;
1220
1221 /* this entry seems to match: same crc, not too old,
1222 * and from another gw. therefore return 1 to forbid it.
1223 */
1224 return 1;
1225 }
1226 /* not found, add a new entry (overwrite the oldest entry) */
1227 curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
1228 entry = &bat_priv->bcast_duplist[curr];
1229 entry->crc = crc;
1230 entry->entrytime = jiffies;
1231 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1232 bat_priv->bcast_duplist_curr = curr;
1233
1234 /* allow it, its the first occurence. */
1235 return 0;
1236}
1237
1238
1239
1240/**
1241 * @bat_priv: the bat priv with all the soft interface information
1242 * @orig: originator mac address
1243 *
1244 * check if the originator is a gateway for any VLAN ID.
1245 *
1246 * returns 1 if it is found, 0 otherwise
1247 *
1248 */
1249
1250int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1251{
1252 struct hashtable_t *hash = bat_priv->backbone_hash;
1253 struct hlist_head *head;
1254 struct hlist_node *node;
1255 struct backbone_gw *backbone_gw;
1256 int i;
1257
1258 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1259 return 0;
1260
1261 if (!hash)
1262 return 0;
1263
1264 for (i = 0; i < hash->size; i++) {
1265 head = &hash->table[i];
1266
1267 rcu_read_lock();
1268 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1269 if (compare_eth(backbone_gw->orig, orig)) {
1270 rcu_read_unlock();
1271 return 1;
1272 }
1273 }
1274 rcu_read_unlock();
1275 }
1276
1277 return 0;
1278}
1279
1280
1281/**
1282 * @skb: the frame to be checked
1283 * @orig_node: the orig_node of the frame
1284 * @hdr_size: maximum length of the frame
1285 *
1286 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1287 * if the orig_node is also a gateway on the soft interface, otherwise it
1288 * returns 0.
1289 *
1290 */
1291int bla_is_backbone_gw(struct sk_buff *skb,
1292 struct orig_node *orig_node, int hdr_size)
1293{
1294 struct ethhdr *ethhdr;
1295 struct vlan_ethhdr *vhdr;
1296 struct backbone_gw *backbone_gw;
1297 short vid = -1;
1298
1299 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1300 return 0;
1301
1302 /* first, find out the vid. */
1303 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1304 return 0;
1305
1306 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1307
1308 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1309 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1310 return 0;
1311
1312 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
1313 hdr_size);
1314 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1315 }
1316
1317 /* see if this originator is a backbone gw for this VLAN */
1318
1319 backbone_gw = backbone_hash_find(orig_node->bat_priv,
1320 orig_node->orig, vid);
1321 if (!backbone_gw)
1322 return 0;
1323
1324 backbone_gw_free_ref(backbone_gw);
1325 return 1;
1326}
1327
1328/* free all bla structures (for softinterface free or module unload) */
1329void bla_free(struct bat_priv *bat_priv)
1330{
1331 struct hard_iface *primary_if;
1332
1333 cancel_delayed_work_sync(&bat_priv->bla_work);
1334 primary_if = primary_if_get_selected(bat_priv);
1335
1336 if (bat_priv->claim_hash) {
1337 bla_purge_claims(bat_priv, primary_if, 1);
1338 hash_destroy(bat_priv->claim_hash);
1339 bat_priv->claim_hash = NULL;
1340 }
1341 if (bat_priv->backbone_hash) {
1342 bla_purge_backbone_gw(bat_priv, 1);
1343 hash_destroy(bat_priv->backbone_hash);
1344 bat_priv->backbone_hash = NULL;
1345 }
1346 if (primary_if)
1347 hardif_free_ref(primary_if);
1348}
1349
1350/**
1351 * @bat_priv: the bat priv with all the soft interface information
1352 * @skb: the frame to be checked
1353 * @vid: the VLAN ID of the frame
1354 *
1355 * bla_rx avoidance checks if:
1356 * * we have to race for a claim
1357 * * if the frame is allowed on the LAN
1358 *
1359 * in these cases, the skb is further handled by this function and
1360 * returns 1, otherwise it returns 0 and the caller shall further
1361 * process the skb.
1362 *
1363 */
1364int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1365{
1366 struct ethhdr *ethhdr;
1367 struct claim search_claim, *claim = NULL;
1368 struct hard_iface *primary_if;
1369 int ret;
1370
1371 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1372
1373 primary_if = primary_if_get_selected(bat_priv);
1374 if (!primary_if)
1375 goto handled;
1376
1377 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1378 goto allow;
1379
1380
1381 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1382 /* don't allow broadcasts while requests are in flight */
1383 if (is_multicast_ether_addr(ethhdr->h_dest))
1384 goto handled;
1385
1386 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1387 search_claim.vid = vid;
1388 claim = claim_hash_find(bat_priv, &search_claim);
1389
1390 if (!claim) {
1391 /* possible optimization: race for a claim */
1392 /* No claim exists yet, claim it for us!
1393 */
1394 handle_claim(bat_priv, primary_if,
1395 primary_if->net_dev->dev_addr,
1396 ethhdr->h_source, vid);
1397 goto allow;
1398 }
1399
1400 /* if it is our own claim ... */
1401 if (compare_eth(claim->backbone_gw->orig,
1402 primary_if->net_dev->dev_addr)) {
1403 /* ... allow it in any case */
1404 claim->lasttime = jiffies;
1405 goto allow;
1406 }
1407
1408 /* if it is a broadcast ... */
1409 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1410 /* ... drop it. the responsible gateway is in charge. */
1411 goto handled;
1412 } else {
1413 /* seems the client considers us as its best gateway.
1414 * send a claim and update the claim table
1415 * immediately.
1416 */
1417 handle_claim(bat_priv, primary_if,
1418 primary_if->net_dev->dev_addr,
1419 ethhdr->h_source, vid);
1420 goto allow;
1421 }
1422allow:
1423 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1424 ret = 0;
1425 goto out;
1426
1427handled:
1428 kfree_skb(skb);
1429 ret = 1;
1430
1431out:
1432 if (primary_if)
1433 hardif_free_ref(primary_if);
1434 if (claim)
1435 claim_free_ref(claim);
1436 return ret;
1437}
1438
1439/**
1440 * @bat_priv: the bat priv with all the soft interface information
1441 * @skb: the frame to be checked
1442 * @vid: the VLAN ID of the frame
1443 *
1444 * bla_tx checks if:
1445 * * a claim was received which has to be processed
1446 * * the frame is allowed on the mesh
1447 *
1448 * in these cases, the skb is further handled by this function and
1449 * returns 1, otherwise it returns 0 and the caller shall further
1450 * process the skb.
1451 *
1452 */
1453int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1454{
1455 struct ethhdr *ethhdr;
1456 struct claim search_claim, *claim = NULL;
1457 struct hard_iface *primary_if;
1458 int ret = 0;
1459
1460 primary_if = primary_if_get_selected(bat_priv);
1461 if (!primary_if)
1462 goto out;
1463
1464 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1465 goto allow;
1466
1467 /* in VLAN case, the mac header might not be set. */
1468 skb_reset_mac_header(skb);
1469
1470 if (bla_process_claim(bat_priv, primary_if, skb))
1471 goto handled;
1472
1473 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1474
1475 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1476 /* don't allow broadcasts while requests are in flight */
1477 if (is_multicast_ether_addr(ethhdr->h_dest))
1478 goto handled;
1479
1480 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1481 search_claim.vid = vid;
1482
1483 claim = claim_hash_find(bat_priv, &search_claim);
1484
1485 /* if no claim exists, allow it. */
1486 if (!claim)
1487 goto allow;
1488
1489 /* check if we are responsible. */
1490 if (compare_eth(claim->backbone_gw->orig,
1491 primary_if->net_dev->dev_addr)) {
1492 /* if yes, the client has roamed and we have
1493 * to unclaim it.
1494 */
1495 handle_unclaim(bat_priv, primary_if,
1496 primary_if->net_dev->dev_addr,
1497 ethhdr->h_source, vid);
1498 goto allow;
1499 }
1500
1501 /* check if it is a multicast/broadcast frame */
1502 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1503 /* drop it. the responsible gateway has forwarded it into
1504 * the backbone network.
1505 */
1506 goto handled;
1507 } else {
1508 /* we must allow it. at least if we are
1509 * responsible for the DESTINATION.
1510 */
1511 goto allow;
1512 }
1513allow:
1514 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1515 ret = 0;
1516 goto out;
1517handled:
1518 ret = 1;
1519out:
1520 if (primary_if)
1521 hardif_free_ref(primary_if);
1522 if (claim)
1523 claim_free_ref(claim);
1524 return ret;
1525}
1526
1527int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1528{
1529 struct net_device *net_dev = (struct net_device *)seq->private;
1530 struct bat_priv *bat_priv = netdev_priv(net_dev);
1531 struct hashtable_t *hash = bat_priv->claim_hash;
1532 struct claim *claim;
1533 struct hard_iface *primary_if;
1534 struct hlist_node *node;
1535 struct hlist_head *head;
1536 uint32_t i;
1537 bool is_own;
1538 int ret = 0;
1539
1540 primary_if = primary_if_get_selected(bat_priv);
1541 if (!primary_if) {
1542 ret = seq_printf(seq,
1543 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1544 net_dev->name);
1545 goto out;
1546 }
1547
1548 if (primary_if->if_status != IF_ACTIVE) {
1549 ret = seq_printf(seq,
1550 "BATMAN mesh %s disabled - primary interface not active\n",
1551 net_dev->name);
1552 goto out;
1553 }
1554
1555 seq_printf(seq,
1556 "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
1557 net_dev->name, primary_if->net_dev->dev_addr,
1558 ntohs(bat_priv->claim_dest.group));
1559 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1560 "Client", "VID", "Originator", "CRC");
1561 for (i = 0; i < hash->size; i++) {
1562 head = &hash->table[i];
1563
1564 rcu_read_lock();
1565 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1566 is_own = compare_eth(claim->backbone_gw->orig,
1567 primary_if->net_dev->dev_addr);
1568 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1569 claim->addr, claim->vid,
1570 claim->backbone_gw->orig,
1571 (is_own ? 'x' : ' '),
1572 claim->backbone_gw->crc);
1573 }
1574 rcu_read_unlock();
1575 }
1576out:
1577 if (primary_if)
1578 hardif_free_ref(primary_if);
1579 return ret;
1580}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
new file mode 100644
index 000000000000..e39f93acc28f
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#ifndef _NET_BATMAN_ADV_BLA_H_
23#define _NET_BATMAN_ADV_BLA_H_
24
25#ifdef CONFIG_BATMAN_ADV_BLA
26int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
27int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
28int bla_is_backbone_gw(struct sk_buff *skb,
29 struct orig_node *orig_node, int hdr_size);
30int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
31int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
32int bla_check_bcast_duplist(struct bat_priv *bat_priv,
33 struct bcast_packet *bcast_packet, int hdr_size);
34void bla_update_orig_address(struct bat_priv *bat_priv,
35 struct hard_iface *primary_if,
36 struct hard_iface *oldif);
37int bla_init(struct bat_priv *bat_priv);
38void bla_free(struct bat_priv *bat_priv);
39
40#define BLA_CRC_INIT 0
41#else /* ifdef CONFIG_BATMAN_ADV_BLA */
42
43static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
44 short vid)
45{
46 return 0;
47}
48
49static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
50 short vid)
51{
52 return 0;
53}
54
55static inline int bla_is_backbone_gw(struct sk_buff *skb,
56 struct orig_node *orig_node,
57 int hdr_size)
58{
59 return 0;
60}
61
62static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
63 void *offset)
64{
65 return 0;
66}
67
68static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
69 uint8_t *orig)
70{
71 return 0;
72}
73
74static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
75 struct bcast_packet *bcast_packet,
76 int hdr_size)
77{
78 return 0;
79}
80
81static inline void bla_update_orig_address(struct bat_priv *bat_priv,
82 struct hard_iface *primary_if,
83 struct hard_iface *oldif)
84{
85}
86
87static inline int bla_init(struct bat_priv *bat_priv)
88{
89 return 1;
90}
91
92static inline void bla_free(struct bat_priv *bat_priv)
93{
94}
95
96#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
97
98#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 24403a7350f7..47f7186dcefc 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -224,16 +224,13 @@ void gw_election(struct bat_priv *bat_priv)
224 } else if ((!curr_gw) && (next_gw)) { 224 } else if ((!curr_gw) && (next_gw)) {
225 bat_dbg(DBG_BATMAN, bat_priv, 225 bat_dbg(DBG_BATMAN, bat_priv,
226 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n", 226 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
227 next_gw->orig_node->orig, 227 next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
228 next_gw->orig_node->gw_flags,
229 router->tq_avg); 228 router->tq_avg);
230 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr); 229 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
231 } else { 230 } else {
232 bat_dbg(DBG_BATMAN, bat_priv, 231 bat_dbg(DBG_BATMAN, bat_priv,
233 "Changing route to gateway %pM " 232 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
234 "(gw_flags: %i, tq: %i)\n", 233 next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
235 next_gw->orig_node->orig,
236 next_gw->orig_node->gw_flags,
237 router->tq_avg); 234 router->tq_avg);
238 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr); 235 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
239 } 236 }
@@ -287,8 +284,7 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
287 goto out; 284 goto out;
288 285
289 bat_dbg(DBG_BATMAN, bat_priv, 286 bat_dbg(DBG_BATMAN, bat_priv,
290 "Restarting gateway selection: better gateway found (tq curr: " 287 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
291 "%i, tq new: %i)\n",
292 gw_tq_avg, orig_tq_avg); 288 gw_tq_avg, orig_tq_avg);
293 289
294deselect: 290deselect:
@@ -352,8 +348,7 @@ void gw_node_update(struct bat_priv *bat_priv,
352 continue; 348 continue;
353 349
354 bat_dbg(DBG_BATMAN, bat_priv, 350 bat_dbg(DBG_BATMAN, bat_priv,
355 "Gateway class of originator %pM changed from " 351 "Gateway class of originator %pM changed from %i to %i\n",
356 "%i to %i\n",
357 orig_node->orig, gw_node->orig_node->gw_flags, 352 orig_node->orig, gw_node->orig_node->gw_flags,
358 new_gwflags); 353 new_gwflags);
359 354
@@ -396,7 +391,7 @@ void gw_node_purge(struct bat_priv *bat_priv)
396{ 391{
397 struct gw_node *gw_node, *curr_gw; 392 struct gw_node *gw_node, *curr_gw;
398 struct hlist_node *node, *node_tmp; 393 struct hlist_node *node, *node_tmp;
399 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; 394 unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT);
400 int do_deselect = 0; 395 int do_deselect = 0;
401 396
402 curr_gw = gw_get_selected_gw_node(bat_priv); 397 curr_gw = gw_get_selected_gw_node(bat_priv);
@@ -474,23 +469,23 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
474 469
475 primary_if = primary_if_get_selected(bat_priv); 470 primary_if = primary_if_get_selected(bat_priv);
476 if (!primary_if) { 471 if (!primary_if) {
477 ret = seq_printf(seq, "BATMAN mesh %s disabled - please " 472 ret = seq_printf(seq,
478 "specify interfaces to enable it\n", 473 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
479 net_dev->name); 474 net_dev->name);
480 goto out; 475 goto out;
481 } 476 }
482 477
483 if (primary_if->if_status != IF_ACTIVE) { 478 if (primary_if->if_status != IF_ACTIVE) {
484 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 479 ret = seq_printf(seq,
485 "primary interface not active\n", 480 "BATMAN mesh %s disabled - primary interface not active\n",
486 net_dev->name); 481 net_dev->name);
487 goto out; 482 goto out;
488 } 483 }
489 484
490 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... " 485 seq_printf(seq,
491 "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 486 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
492 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", 487 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
493 "outgoingIF", SOURCE_VERSION, primary_if->net_dev->name, 488 SOURCE_VERSION, primary_if->net_dev->name,
494 primary_if->net_dev->dev_addr, net_dev->name); 489 primary_if->net_dev->dev_addr, net_dev->name);
495 490
496 rcu_read_lock(); 491 rcu_read_lock();
@@ -563,10 +558,10 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
563 p++; 558 p++;
564 559
565 /* ...and then we jump over the data */ 560 /* ...and then we jump over the data */
566 if (pkt_len < *p) 561 if (pkt_len < 1 + (*p))
567 goto out; 562 goto out;
568 pkt_len -= *p; 563 pkt_len -= 1 + (*p);
569 p += (*p); 564 p += 1 + (*p);
570 } 565 }
571 } 566 }
572out: 567out:
@@ -629,7 +624,7 @@ bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
629 624
630 /* check for bootp port */ 625 /* check for bootp port */
631 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && 626 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
632 (ntohs(udphdr->dest) != 67)) 627 (ntohs(udphdr->dest) != 67))
633 return false; 628 return false;
634 629
635 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) && 630 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index e1edba08eb1d..bf56a5aea10b 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index c4ac7b0a2a63..ca57ac7d73b2 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -93,7 +93,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
93 multi = 1024; 93 multi = 1024;
94 94
95 if ((strnicmp(tmp_ptr, "kbit", 4) == 0) || 95 if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
96 (multi > 1)) 96 (multi > 1))
97 *tmp_ptr = '\0'; 97 *tmp_ptr = '\0';
98 } 98 }
99 99
@@ -118,15 +118,15 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
118 multi = 1024; 118 multi = 1024;
119 119
120 if ((strnicmp(tmp_ptr, "kbit", 4) == 0) || 120 if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
121 (multi > 1)) 121 (multi > 1))
122 *tmp_ptr = '\0'; 122 *tmp_ptr = '\0';
123 } 123 }
124 124
125 ret = kstrtol(slash_ptr + 1, 10, &lup); 125 ret = kstrtol(slash_ptr + 1, 10, &lup);
126 if (ret) { 126 if (ret) {
127 bat_err(net_dev, 127 bat_err(net_dev,
128 "Upload speed of gateway mode invalid: " 128 "Upload speed of gateway mode invalid: %s\n",
129 "%s\n", slash_ptr + 1); 129 slash_ptr + 1);
130 return false; 130 return false;
131 } 131 }
132 132
@@ -163,8 +163,8 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
163 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up); 163 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
164 164
165 gw_deselect(bat_priv); 165 gw_deselect(bat_priv);
166 bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' " 166 bat_info(net_dev,
167 "(propagating: %d%s/%d%s)\n", 167 "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
168 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, 168 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
169 (down > 2048 ? down / 1024 : down), 169 (down > 2048 ? down / 1024 : down),
170 (down > 2048 ? "MBit" : "KBit"), 170 (down > 2048 ? "MBit" : "KBit"),
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 55e527a489fe..b8fb11c4f927 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 7704df468e0b..dc334fa89847 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -28,16 +28,10 @@
28#include "bat_sysfs.h" 28#include "bat_sysfs.h"
29#include "originator.h" 29#include "originator.h"
30#include "hash.h" 30#include "hash.h"
31#include "bat_ogm.h" 31#include "bridge_loop_avoidance.h"
32 32
33#include <linux/if_arp.h> 33#include <linux/if_arp.h>
34 34
35
36static int batman_skb_recv(struct sk_buff *skb,
37 struct net_device *dev,
38 struct packet_type *ptype,
39 struct net_device *orig_dev);
40
41void hardif_free_rcu(struct rcu_head *rcu) 35void hardif_free_rcu(struct rcu_head *rcu)
42{ 36{
43 struct hard_iface *hard_iface; 37 struct hard_iface *hard_iface;
@@ -108,7 +102,8 @@ out:
108 return hard_iface; 102 return hard_iface;
109} 103}
110 104
111static void primary_if_update_addr(struct bat_priv *bat_priv) 105static void primary_if_update_addr(struct bat_priv *bat_priv,
106 struct hard_iface *oldif)
112{ 107{
113 struct vis_packet *vis_packet; 108 struct vis_packet *vis_packet;
114 struct hard_iface *primary_if; 109 struct hard_iface *primary_if;
@@ -123,6 +118,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv)
123 memcpy(vis_packet->sender_orig, 118 memcpy(vis_packet->sender_orig,
124 primary_if->net_dev->dev_addr, ETH_ALEN); 119 primary_if->net_dev->dev_addr, ETH_ALEN);
125 120
121 bla_update_orig_address(bat_priv, primary_if, oldif);
126out: 122out:
127 if (primary_if) 123 if (primary_if)
128 hardif_free_ref(primary_if); 124 hardif_free_ref(primary_if);
@@ -141,14 +137,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
141 curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); 137 curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
142 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); 138 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
143 139
144 if (curr_hard_iface)
145 hardif_free_ref(curr_hard_iface);
146
147 if (!new_hard_iface) 140 if (!new_hard_iface)
148 return; 141 goto out;
142
143 bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
144 primary_if_update_addr(bat_priv, curr_hard_iface);
149 145
150 bat_ogm_init_primary(new_hard_iface); 146out:
151 primary_if_update_addr(bat_priv); 147 if (curr_hard_iface)
148 hardif_free_ref(curr_hard_iface);
152} 149}
153 150
154static bool hardif_is_iface_up(const struct hard_iface *hard_iface) 151static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
@@ -176,11 +173,9 @@ static void check_known_mac_addr(const struct net_device *net_dev)
176 net_dev->dev_addr)) 173 net_dev->dev_addr))
177 continue; 174 continue;
178 175
179 pr_warning("The newly added mac address (%pM) already exists " 176 pr_warn("The newly added mac address (%pM) already exists on: %s\n",
180 "on: %s\n", net_dev->dev_addr, 177 net_dev->dev_addr, hard_iface->net_dev->name);
181 hard_iface->net_dev->name); 178 pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
182 pr_warning("It is strongly recommended to keep mac addresses "
183 "unique to avoid problems!\n");
184 } 179 }
185 rcu_read_unlock(); 180 rcu_read_unlock();
186} 181}
@@ -233,7 +228,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
233 228
234 bat_priv = netdev_priv(hard_iface->soft_iface); 229 bat_priv = netdev_priv(hard_iface->soft_iface);
235 230
236 bat_ogm_update_mac(hard_iface); 231 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
237 hard_iface->if_status = IF_TO_BE_ACTIVATED; 232 hard_iface->if_status = IF_TO_BE_ACTIVATED;
238 233
239 /** 234 /**
@@ -281,6 +276,11 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
281 if (!atomic_inc_not_zero(&hard_iface->refcount)) 276 if (!atomic_inc_not_zero(&hard_iface->refcount))
282 goto out; 277 goto out;
283 278
279 /* hard-interface is part of a bridge */
280 if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT)
281 pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n",
282 hard_iface->net_dev->name);
283
284 soft_iface = dev_get_by_name(&init_net, iface_name); 284 soft_iface = dev_get_by_name(&init_net, iface_name);
285 285
286 if (!soft_iface) { 286 if (!soft_iface) {
@@ -296,24 +296,19 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
296 } 296 }
297 297
298 if (!softif_is_valid(soft_iface)) { 298 if (!softif_is_valid(soft_iface)) {
299 pr_err("Can't create batman mesh interface %s: " 299 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
300 "already exists as regular interface\n",
301 soft_iface->name); 300 soft_iface->name);
302 dev_put(soft_iface);
303 ret = -EINVAL; 301 ret = -EINVAL;
304 goto err; 302 goto err_dev;
305 } 303 }
306 304
307 hard_iface->soft_iface = soft_iface; 305 hard_iface->soft_iface = soft_iface;
308 bat_priv = netdev_priv(hard_iface->soft_iface); 306 bat_priv = netdev_priv(hard_iface->soft_iface);
309 307
310 bat_ogm_init(hard_iface); 308 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
311 309 if (ret < 0) {
312 if (!hard_iface->packet_buff) {
313 bat_err(hard_iface->soft_iface, "Can't add interface packet "
314 "(%s): out of memory\n", hard_iface->net_dev->name);
315 ret = -ENOMEM; 310 ret = -ENOMEM;
316 goto err; 311 goto err_dev;
317 } 312 }
318 313
319 hard_iface->if_num = bat_priv->num_ifaces; 314 hard_iface->if_num = bat_priv->num_ifaces;
@@ -326,7 +321,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
326 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; 321 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
327 dev_add_pack(&hard_iface->batman_adv_ptype); 322 dev_add_pack(&hard_iface->batman_adv_ptype);
328 323
329 atomic_set(&hard_iface->seqno, 1);
330 atomic_set(&hard_iface->frag_seqno, 1); 324 atomic_set(&hard_iface->frag_seqno, 1);
331 bat_info(hard_iface->soft_iface, "Adding interface: %s\n", 325 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
332 hard_iface->net_dev->name); 326 hard_iface->net_dev->name);
@@ -334,29 +328,22 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
334 if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < 328 if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
335 ETH_DATA_LEN + BAT_HEADER_LEN) 329 ETH_DATA_LEN + BAT_HEADER_LEN)
336 bat_info(hard_iface->soft_iface, 330 bat_info(hard_iface->soft_iface,
337 "The MTU of interface %s is too small (%i) to handle " 331 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
338 "the transport of batman-adv packets. Packets going " 332 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
339 "over this interface will be fragmented on layer2 " 333 ETH_DATA_LEN + BAT_HEADER_LEN);
340 "which could impact the performance. Setting the MTU "
341 "to %zi would solve the problem.\n",
342 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
343 ETH_DATA_LEN + BAT_HEADER_LEN);
344 334
345 if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < 335 if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
346 ETH_DATA_LEN + BAT_HEADER_LEN) 336 ETH_DATA_LEN + BAT_HEADER_LEN)
347 bat_info(hard_iface->soft_iface, 337 bat_info(hard_iface->soft_iface,
348 "The MTU of interface %s is too small (%i) to handle " 338 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
349 "the transport of batman-adv packets. If you experience" 339 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
350 " problems getting traffic through try increasing the " 340 ETH_DATA_LEN + BAT_HEADER_LEN);
351 "MTU to %zi.\n",
352 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
353 ETH_DATA_LEN + BAT_HEADER_LEN);
354 341
355 if (hardif_is_iface_up(hard_iface)) 342 if (hardif_is_iface_up(hard_iface))
356 hardif_activate_interface(hard_iface); 343 hardif_activate_interface(hard_iface);
357 else 344 else
358 bat_err(hard_iface->soft_iface, "Not using interface %s " 345 bat_err(hard_iface->soft_iface,
359 "(retrying later): interface not active\n", 346 "Not using interface %s (retrying later): interface not active\n",
360 hard_iface->net_dev->name); 347 hard_iface->net_dev->name);
361 348
362 /* begin scheduling originator messages on that interface */ 349 /* begin scheduling originator messages on that interface */
@@ -365,6 +352,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
365out: 352out:
366 return 0; 353 return 0;
367 354
355err_dev:
356 dev_put(soft_iface);
368err: 357err:
369 hardif_free_ref(hard_iface); 358 hardif_free_ref(hard_iface);
370 return ret; 359 return ret;
@@ -399,8 +388,7 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
399 hardif_free_ref(new_if); 388 hardif_free_ref(new_if);
400 } 389 }
401 390
402 kfree(hard_iface->packet_buff); 391 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
403 hard_iface->packet_buff = NULL;
404 hard_iface->if_status = IF_NOT_IN_USE; 392 hard_iface->if_status = IF_NOT_IN_USE;
405 393
406 /* delete all references to this hard_iface */ 394 /* delete all references to this hard_iface */
@@ -452,6 +440,13 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
452 check_known_mac_addr(hard_iface->net_dev); 440 check_known_mac_addr(hard_iface->net_dev);
453 list_add_tail_rcu(&hard_iface->list, &hardif_list); 441 list_add_tail_rcu(&hard_iface->list, &hardif_list);
454 442
443 /**
444 * This can't be called via a bat_priv callback because
445 * we have no bat_priv yet.
446 */
447 atomic_set(&hard_iface->seqno, 1);
448 hard_iface->packet_buff = NULL;
449
455 return hard_iface; 450 return hard_iface;
456 451
457free_if: 452free_if:
@@ -527,15 +522,16 @@ static int hard_if_event(struct notifier_block *this,
527 goto hardif_put; 522 goto hardif_put;
528 523
529 check_known_mac_addr(hard_iface->net_dev); 524 check_known_mac_addr(hard_iface->net_dev);
530 bat_ogm_update_mac(hard_iface);
531 525
532 bat_priv = netdev_priv(hard_iface->soft_iface); 526 bat_priv = netdev_priv(hard_iface->soft_iface);
527 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
528
533 primary_if = primary_if_get_selected(bat_priv); 529 primary_if = primary_if_get_selected(bat_priv);
534 if (!primary_if) 530 if (!primary_if)
535 goto hardif_put; 531 goto hardif_put;
536 532
537 if (hard_iface == primary_if) 533 if (hard_iface == primary_if)
538 primary_if_update_addr(bat_priv); 534 primary_if_update_addr(bat_priv, NULL);
539 break; 535 break;
540 default: 536 default:
541 break; 537 break;
@@ -549,114 +545,6 @@ out:
549 return NOTIFY_DONE; 545 return NOTIFY_DONE;
550} 546}
551 547
552/* incoming packets with the batman ethertype received on any active hard
553 * interface */
554static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
555 struct packet_type *ptype,
556 struct net_device *orig_dev)
557{
558 struct bat_priv *bat_priv;
559 struct batman_ogm_packet *batman_ogm_packet;
560 struct hard_iface *hard_iface;
561 int ret;
562
563 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
564 skb = skb_share_check(skb, GFP_ATOMIC);
565
566 /* skb was released by skb_share_check() */
567 if (!skb)
568 goto err_out;
569
570 /* packet should hold at least type and version */
571 if (unlikely(!pskb_may_pull(skb, 2)))
572 goto err_free;
573
574 /* expect a valid ethernet header here. */
575 if (unlikely(skb->mac_len != sizeof(struct ethhdr)
576 || !skb_mac_header(skb)))
577 goto err_free;
578
579 if (!hard_iface->soft_iface)
580 goto err_free;
581
582 bat_priv = netdev_priv(hard_iface->soft_iface);
583
584 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
585 goto err_free;
586
587 /* discard frames on not active interfaces */
588 if (hard_iface->if_status != IF_ACTIVE)
589 goto err_free;
590
591 batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
592
593 if (batman_ogm_packet->version != COMPAT_VERSION) {
594 bat_dbg(DBG_BATMAN, bat_priv,
595 "Drop packet: incompatible batman version (%i)\n",
596 batman_ogm_packet->version);
597 goto err_free;
598 }
599
600 /* all receive handlers return whether they received or reused
601 * the supplied skb. if not, we have to free the skb. */
602
603 switch (batman_ogm_packet->packet_type) {
604 /* batman originator packet */
605 case BAT_OGM:
606 ret = recv_bat_ogm_packet(skb, hard_iface);
607 break;
608
609 /* batman icmp packet */
610 case BAT_ICMP:
611 ret = recv_icmp_packet(skb, hard_iface);
612 break;
613
614 /* unicast packet */
615 case BAT_UNICAST:
616 ret = recv_unicast_packet(skb, hard_iface);
617 break;
618
619 /* fragmented unicast packet */
620 case BAT_UNICAST_FRAG:
621 ret = recv_ucast_frag_packet(skb, hard_iface);
622 break;
623
624 /* broadcast packet */
625 case BAT_BCAST:
626 ret = recv_bcast_packet(skb, hard_iface);
627 break;
628
629 /* vis packet */
630 case BAT_VIS:
631 ret = recv_vis_packet(skb, hard_iface);
632 break;
633 /* Translation table query (request or response) */
634 case BAT_TT_QUERY:
635 ret = recv_tt_query(skb, hard_iface);
636 break;
637 /* Roaming advertisement */
638 case BAT_ROAM_ADV:
639 ret = recv_roam_adv(skb, hard_iface);
640 break;
641 default:
642 ret = NET_RX_DROP;
643 }
644
645 if (ret == NET_RX_DROP)
646 kfree_skb(skb);
647
648 /* return NET_RX_SUCCESS in any case as we
649 * most probably dropped the packet for
650 * routing-logical reasons. */
651
652 return NET_RX_SUCCESS;
653
654err_free:
655 kfree_skb(skb);
656err_out:
657 return NET_RX_DROP;
658}
659
660/* This function returns true if the interface represented by ifindex is a 548/* This function returns true if the interface represented by ifindex is a
661 * 802.11 wireless device */ 549 * 802.11 wireless device */
662bool is_wifi_iface(int ifindex) 550bool is_wifi_iface(int ifindex)
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 67f78d1a63b4..e68c5655e616 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index d1da29da333b..117687bedf25 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 4768717f07f9..d4bd7862719b 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index d9c1e7bb7fbf..2e98a57f3407 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -59,8 +59,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
59 } 59 }
60 60
61 if (i == ARRAY_SIZE(socket_client_hash)) { 61 if (i == ARRAY_SIZE(socket_client_hash)) {
62 pr_err("Error - can't add another packet client: " 62 pr_err("Error - can't add another packet client: maximum number of clients reached\n");
63 "maximum number of clients reached\n");
64 kfree(socket_client); 63 kfree(socket_client);
65 return -EXFULL; 64 return -EXFULL;
66 } 65 }
@@ -162,8 +161,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
162 161
163 if (len < sizeof(struct icmp_packet)) { 162 if (len < sizeof(struct icmp_packet)) {
164 bat_dbg(DBG_BATMAN, bat_priv, 163 bat_dbg(DBG_BATMAN, bat_priv,
165 "Error - can't send packet from char device: " 164 "Error - can't send packet from char device: invalid packet size\n");
166 "invalid packet size\n");
167 return -EINVAL; 165 return -EINVAL;
168 } 166 }
169 167
@@ -177,13 +175,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
177 if (len >= sizeof(struct icmp_packet_rr)) 175 if (len >= sizeof(struct icmp_packet_rr))
178 packet_len = sizeof(struct icmp_packet_rr); 176 packet_len = sizeof(struct icmp_packet_rr);
179 177
180 skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); 178 skb = dev_alloc_skb(packet_len + ETH_HLEN);
181 if (!skb) { 179 if (!skb) {
182 len = -ENOMEM; 180 len = -ENOMEM;
183 goto out; 181 goto out;
184 } 182 }
185 183
186 skb_reserve(skb, sizeof(struct ethhdr)); 184 skb_reserve(skb, ETH_HLEN);
187 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); 185 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
188 186
189 if (copy_from_user(icmp_packet, buff, packet_len)) { 187 if (copy_from_user(icmp_packet, buff, packet_len)) {
@@ -191,27 +189,25 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
191 goto free_skb; 189 goto free_skb;
192 } 190 }
193 191
194 if (icmp_packet->packet_type != BAT_ICMP) { 192 if (icmp_packet->header.packet_type != BAT_ICMP) {
195 bat_dbg(DBG_BATMAN, bat_priv, 193 bat_dbg(DBG_BATMAN, bat_priv,
196 "Error - can't send packet from char device: " 194 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
197 "got bogus packet type (expected: BAT_ICMP)\n");
198 len = -EINVAL; 195 len = -EINVAL;
199 goto free_skb; 196 goto free_skb;
200 } 197 }
201 198
202 if (icmp_packet->msg_type != ECHO_REQUEST) { 199 if (icmp_packet->msg_type != ECHO_REQUEST) {
203 bat_dbg(DBG_BATMAN, bat_priv, 200 bat_dbg(DBG_BATMAN, bat_priv,
204 "Error - can't send packet from char device: " 201 "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
205 "got bogus message type (expected: ECHO_REQUEST)\n");
206 len = -EINVAL; 202 len = -EINVAL;
207 goto free_skb; 203 goto free_skb;
208 } 204 }
209 205
210 icmp_packet->uid = socket_client->index; 206 icmp_packet->uid = socket_client->index;
211 207
212 if (icmp_packet->version != COMPAT_VERSION) { 208 if (icmp_packet->header.version != COMPAT_VERSION) {
213 icmp_packet->msg_type = PARAMETER_PROBLEM; 209 icmp_packet->msg_type = PARAMETER_PROBLEM;
214 icmp_packet->version = COMPAT_VERSION; 210 icmp_packet->header.version = COMPAT_VERSION;
215 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 211 bat_socket_add_packet(socket_client, icmp_packet, packet_len);
216 goto free_skb; 212 goto free_skb;
217 } 213 }
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 462b190fa101..380ed4c2443a 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index fb87bdc2ce9b..083a2993efe4 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -30,21 +30,33 @@
30#include "translation-table.h" 30#include "translation-table.h"
31#include "hard-interface.h" 31#include "hard-interface.h"
32#include "gateway_client.h" 32#include "gateway_client.h"
33#include "bridge_loop_avoidance.h"
33#include "vis.h" 34#include "vis.h"
34#include "hash.h" 35#include "hash.h"
36#include "bat_algo.h"
35 37
36 38
37/* List manipulations on hardif_list have to be rtnl_lock()'ed, 39/* List manipulations on hardif_list have to be rtnl_lock()'ed,
38 * list traversals just rcu-locked */ 40 * list traversals just rcu-locked */
39struct list_head hardif_list; 41struct list_head hardif_list;
42static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
43char bat_routing_algo[20] = "BATMAN IV";
44static struct hlist_head bat_algo_list;
40 45
41unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 46unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
42 47
43struct workqueue_struct *bat_event_workqueue; 48struct workqueue_struct *bat_event_workqueue;
44 49
50static void recv_handler_init(void);
51
45static int __init batman_init(void) 52static int __init batman_init(void)
46{ 53{
47 INIT_LIST_HEAD(&hardif_list); 54 INIT_LIST_HEAD(&hardif_list);
55 INIT_HLIST_HEAD(&bat_algo_list);
56
57 recv_handler_init();
58
59 bat_iv_init();
48 60
49 /* the name should not be longer than 10 chars - see 61 /* the name should not be longer than 10 chars - see
50 * http://lwn.net/Articles/23634/ */ 62 * http://lwn.net/Articles/23634/ */
@@ -58,8 +70,8 @@ static int __init batman_init(void)
58 70
59 register_netdevice_notifier(&hard_if_notifier); 71 register_netdevice_notifier(&hard_if_notifier);
60 72
61 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) " 73 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
62 "loaded\n", SOURCE_VERSION, COMPAT_VERSION); 74 SOURCE_VERSION, COMPAT_VERSION);
63 75
64 return 0; 76 return 0;
65} 77}
@@ -90,13 +102,10 @@ int mesh_init(struct net_device *soft_iface)
90 spin_lock_init(&bat_priv->gw_list_lock); 102 spin_lock_init(&bat_priv->gw_list_lock);
91 spin_lock_init(&bat_priv->vis_hash_lock); 103 spin_lock_init(&bat_priv->vis_hash_lock);
92 spin_lock_init(&bat_priv->vis_list_lock); 104 spin_lock_init(&bat_priv->vis_list_lock);
93 spin_lock_init(&bat_priv->softif_neigh_lock);
94 spin_lock_init(&bat_priv->softif_neigh_vid_lock);
95 105
96 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 106 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
97 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 107 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
98 INIT_HLIST_HEAD(&bat_priv->gw_list); 108 INIT_HLIST_HEAD(&bat_priv->gw_list);
99 INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
100 INIT_LIST_HEAD(&bat_priv->tt_changes_list); 109 INIT_LIST_HEAD(&bat_priv->tt_changes_list);
101 INIT_LIST_HEAD(&bat_priv->tt_req_list); 110 INIT_LIST_HEAD(&bat_priv->tt_req_list);
102 INIT_LIST_HEAD(&bat_priv->tt_roam_list); 111 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
@@ -112,6 +121,9 @@ int mesh_init(struct net_device *soft_iface)
112 if (vis_init(bat_priv) < 1) 121 if (vis_init(bat_priv) < 1)
113 goto err; 122 goto err;
114 123
124 if (bla_init(bat_priv) < 1)
125 goto err;
126
115 atomic_set(&bat_priv->gw_reselect, 0); 127 atomic_set(&bat_priv->gw_reselect, 0);
116 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); 128 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
117 goto end; 129 goto end;
@@ -139,7 +151,7 @@ void mesh_free(struct net_device *soft_iface)
139 151
140 tt_free(bat_priv); 152 tt_free(bat_priv);
141 153
142 softif_neigh_purge(bat_priv); 154 bla_free(bat_priv);
143 155
144 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 156 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
145} 157}
@@ -170,9 +182,224 @@ int is_my_mac(const uint8_t *addr)
170 } 182 }
171 rcu_read_unlock(); 183 rcu_read_unlock();
172 return 0; 184 return 0;
185}
186
187static int recv_unhandled_packet(struct sk_buff *skb,
188 struct hard_iface *recv_if)
189{
190 return NET_RX_DROP;
191}
192
193/* incoming packets with the batman ethertype received on any active hard
194 * interface
195 */
196int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
197 struct packet_type *ptype, struct net_device *orig_dev)
198{
199 struct bat_priv *bat_priv;
200 struct batman_ogm_packet *batman_ogm_packet;
201 struct hard_iface *hard_iface;
202 uint8_t idx;
203 int ret;
204
205 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
206 skb = skb_share_check(skb, GFP_ATOMIC);
207
208 /* skb was released by skb_share_check() */
209 if (!skb)
210 goto err_out;
211
212 /* packet should hold at least type and version */
213 if (unlikely(!pskb_may_pull(skb, 2)))
214 goto err_free;
215
216 /* expect a valid ethernet header here. */
217 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
218 goto err_free;
219
220 if (!hard_iface->soft_iface)
221 goto err_free;
222
223 bat_priv = netdev_priv(hard_iface->soft_iface);
224
225 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
226 goto err_free;
227
228 /* discard frames on not active interfaces */
229 if (hard_iface->if_status != IF_ACTIVE)
230 goto err_free;
231
232 batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
233
234 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
235 bat_dbg(DBG_BATMAN, bat_priv,
236 "Drop packet: incompatible batman version (%i)\n",
237 batman_ogm_packet->header.version);
238 goto err_free;
239 }
240
241 /* all receive handlers return whether they received or reused
242 * the supplied skb. if not, we have to free the skb.
243 */
244 idx = batman_ogm_packet->header.packet_type;
245 ret = (*recv_packet_handler[idx])(skb, hard_iface);
246
247 if (ret == NET_RX_DROP)
248 kfree_skb(skb);
249
250 /* return NET_RX_SUCCESS in any case as we
251 * most probably dropped the packet for
252 * routing-logical reasons.
253 */
254 return NET_RX_SUCCESS;
255
256err_free:
257 kfree_skb(skb);
258err_out:
259 return NET_RX_DROP;
260}
261
262static void recv_handler_init(void)
263{
264 int i;
265
266 for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
267 recv_packet_handler[i] = recv_unhandled_packet;
268
269 /* batman icmp packet */
270 recv_packet_handler[BAT_ICMP] = recv_icmp_packet;
271 /* unicast packet */
272 recv_packet_handler[BAT_UNICAST] = recv_unicast_packet;
273 /* fragmented unicast packet */
274 recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet;
275 /* broadcast packet */
276 recv_packet_handler[BAT_BCAST] = recv_bcast_packet;
277 /* vis packet */
278 recv_packet_handler[BAT_VIS] = recv_vis_packet;
279 /* Translation table query (request or response) */
280 recv_packet_handler[BAT_TT_QUERY] = recv_tt_query;
281 /* Roaming advertisement */
282 recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv;
283}
284
285int recv_handler_register(uint8_t packet_type,
286 int (*recv_handler)(struct sk_buff *,
287 struct hard_iface *))
288{
289 if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
290 return -EBUSY;
291
292 recv_packet_handler[packet_type] = recv_handler;
293 return 0;
294}
295
296void recv_handler_unregister(uint8_t packet_type)
297{
298 recv_packet_handler[packet_type] = recv_unhandled_packet;
299}
300
301static struct bat_algo_ops *bat_algo_get(char *name)
302{
303 struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
304 struct hlist_node *node;
305
306 hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
307 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
308 continue;
309
310 bat_algo_ops = bat_algo_ops_tmp;
311 break;
312 }
313
314 return bat_algo_ops;
315}
316
317int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
318{
319 struct bat_algo_ops *bat_algo_ops_tmp;
320 int ret = -1;
321
322 bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
323 if (bat_algo_ops_tmp) {
324 pr_info("Trying to register already registered routing algorithm: %s\n",
325 bat_algo_ops->name);
326 goto out;
327 }
328
329 /* all algorithms must implement all ops (for now) */
330 if (!bat_algo_ops->bat_iface_enable ||
331 !bat_algo_ops->bat_iface_disable ||
332 !bat_algo_ops->bat_iface_update_mac ||
333 !bat_algo_ops->bat_primary_iface_set ||
334 !bat_algo_ops->bat_ogm_schedule ||
335 !bat_algo_ops->bat_ogm_emit) {
336 pr_info("Routing algo '%s' does not implement required ops\n",
337 bat_algo_ops->name);
338 goto out;
339 }
340
341 INIT_HLIST_NODE(&bat_algo_ops->list);
342 hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
343 ret = 0;
344
345out:
346 return ret;
347}
348
349int bat_algo_select(struct bat_priv *bat_priv, char *name)
350{
351 struct bat_algo_ops *bat_algo_ops;
352 int ret = -1;
353
354 bat_algo_ops = bat_algo_get(name);
355 if (!bat_algo_ops)
356 goto out;
357
358 bat_priv->bat_algo_ops = bat_algo_ops;
359 ret = 0;
360
361out:
362 return ret;
363}
364
365int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
366{
367 struct bat_algo_ops *bat_algo_ops;
368 struct hlist_node *node;
369
370 seq_printf(seq, "Available routing algorithms:\n");
371
372 hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
373 seq_printf(seq, "%s\n", bat_algo_ops->name);
374 }
375
376 return 0;
377}
378
379static int param_set_ra(const char *val, const struct kernel_param *kp)
380{
381 struct bat_algo_ops *bat_algo_ops;
173 382
383 bat_algo_ops = bat_algo_get((char *)val);
384 if (!bat_algo_ops) {
385 pr_err("Routing algorithm '%s' is not supported\n", val);
386 return -EINVAL;
387 }
388
389 return param_set_copystring(val, kp);
174} 390}
175 391
392static const struct kernel_param_ops param_ops_ra = {
393 .set = param_set_ra,
394 .get = param_get_string,
395};
396
397static struct kparam_string __param_string_ra = {
398 .maxlen = sizeof(bat_routing_algo),
399 .string = bat_routing_algo,
400};
401
402module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644);
176module_init(batman_init); 403module_init(batman_init);
177module_exit(batman_exit); 404module_exit(batman_exit);
178 405
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 86354e06eb48..f4a3ec003479 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -28,7 +28,7 @@
28#define DRIVER_DEVICE "batman-adv" 28#define DRIVER_DEVICE "batman-adv"
29 29
30#ifndef SOURCE_VERSION 30#ifndef SOURCE_VERSION
31#define SOURCE_VERSION "2012.0.0" 31#define SOURCE_VERSION "2012.2.0"
32#endif 32#endif
33 33
34/* B.A.T.M.A.N. parameters */ 34/* B.A.T.M.A.N. parameters */
@@ -41,13 +41,14 @@
41 41
42/* purge originators after time in seconds if no valid packet comes in 42/* purge originators after time in seconds if no valid packet comes in
43 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ 43 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
44#define PURGE_TIMEOUT 200 44#define PURGE_TIMEOUT 200000 /* 200 seconds */
45#define TT_LOCAL_TIMEOUT 3600 /* in seconds */ 45#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
46#define TT_CLIENT_ROAM_TIMEOUT 600 46#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
47/* sliding packet range of received originator messages in sequence numbers 47/* sliding packet range of received originator messages in sequence numbers
48 * (should be a multiple of our word size) */ 48 * (should be a multiple of our word size) */
49#define TQ_LOCAL_WINDOW_SIZE 64 49#define TQ_LOCAL_WINDOW_SIZE 64
50#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */ 50#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep
51 * pending tt_req */
51 52
52#define TQ_GLOBAL_WINDOW_SIZE 5 53#define TQ_GLOBAL_WINDOW_SIZE 5
53#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 54#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
@@ -56,15 +57,15 @@
56 57
57#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */ 58#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
58 59
59#define ROAMING_MAX_TIME 20 /* Time in which a client can roam at most 60#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most
60 * ROAMING_MAX_COUNT times */ 61 * ROAMING_MAX_COUNT times in miliseconds*/
61#define ROAMING_MAX_COUNT 5 62#define ROAMING_MAX_COUNT 5
62 63
63#define NO_FLAGS 0 64#define NO_FLAGS 0
64 65
65#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ 66#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
66 67
67#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) 68#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE)
68 69
69#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ 70#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
70 71
@@ -79,8 +80,12 @@
79#define MAX_AGGREGATION_BYTES 512 80#define MAX_AGGREGATION_BYTES 512
80#define MAX_AGGREGATION_MS 100 81#define MAX_AGGREGATION_MS 100
81 82
82#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ 83#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */
84#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3)
85#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10)
83 86
87#define DUPLIST_SIZE 16
88#define DUPLIST_TIMEOUT 500 /* 500 ms */
84/* don't reset again within 30 seconds */ 89/* don't reset again within 30 seconds */
85#define RESET_PROTECTION_MS 30000 90#define RESET_PROTECTION_MS 30000
86#define EXPECTED_SEQNO_RANGE 65536 91#define EXPECTED_SEQNO_RANGE 65536
@@ -106,9 +111,7 @@ enum uev_type {
106 111
107#define GW_THRESHOLD 50 112#define GW_THRESHOLD 50
108 113
109/* 114/* Debug Messages */
110 * Debug Messages
111 */
112#ifdef pr_fmt 115#ifdef pr_fmt
113#undef pr_fmt 116#undef pr_fmt
114#endif 117#endif
@@ -120,17 +123,11 @@ enum dbg_level {
120 DBG_BATMAN = 1 << 0, 123 DBG_BATMAN = 1 << 0,
121 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ 124 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
122 DBG_TT = 1 << 2, /* translation table operations */ 125 DBG_TT = 1 << 2, /* translation table operations */
123 DBG_ALL = 7 126 DBG_BLA = 1 << 3, /* bridge loop avoidance */
127 DBG_ALL = 15
124}; 128};
125 129
126 130/* Kernel headers */
127/*
128 * Vis
129 */
130
131/*
132 * Kernel headers
133 */
134 131
135#include <linux/mutex.h> /* mutex */ 132#include <linux/mutex.h> /* mutex */
136#include <linux/module.h> /* needed by all modules */ 133#include <linux/module.h> /* needed by all modules */
@@ -147,6 +144,7 @@ enum dbg_level {
147#include <linux/seq_file.h> 144#include <linux/seq_file.h>
148#include "types.h" 145#include "types.h"
149 146
147extern char bat_routing_algo[];
150extern struct list_head hardif_list; 148extern struct list_head hardif_list;
151 149
152extern unsigned char broadcast_addr[]; 150extern unsigned char broadcast_addr[];
@@ -157,6 +155,15 @@ void mesh_free(struct net_device *soft_iface);
157void inc_module_count(void); 155void inc_module_count(void);
158void dec_module_count(void); 156void dec_module_count(void);
159int is_my_mac(const uint8_t *addr); 157int is_my_mac(const uint8_t *addr);
158int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
159 struct packet_type *ptype, struct net_device *orig_dev);
160int recv_handler_register(uint8_t packet_type,
161 int (*recv_handler)(struct sk_buff *,
162 struct hard_iface *));
163void recv_handler_unregister(uint8_t packet_type);
164int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
165int bat_algo_select(struct bat_priv *bat_priv, char *name);
166int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
160 167
161#ifdef CONFIG_BATMAN_ADV_DEBUG 168#ifdef CONFIG_BATMAN_ADV_DEBUG
162int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3); 169int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
@@ -202,6 +209,17 @@ static inline int compare_eth(const void *data1, const void *data2)
202 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 209 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
203} 210}
204 211
212/**
213 * has_timed_out - compares current time (jiffies) and timestamp + timeout
214 * @timestamp: base value to compare with (in jiffies)
215 * @timeout: added to base value before comparing (in milliseconds)
216 *
217 * Returns true if current time is after timestamp + timeout
218 */
219static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
220{
221 return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
222}
205 223
206#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 224#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
207 225
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0bc2045a2f2e..41147942ba53 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -28,13 +28,15 @@
28#include "hard-interface.h" 28#include "hard-interface.h"
29#include "unicast.h" 29#include "unicast.h"
30#include "soft-interface.h" 30#include "soft-interface.h"
31#include "bridge_loop_avoidance.h"
31 32
32static void purge_orig(struct work_struct *work); 33static void purge_orig(struct work_struct *work);
33 34
34static void start_purge_timer(struct bat_priv *bat_priv) 35static void start_purge_timer(struct bat_priv *bat_priv)
35{ 36{
36 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); 37 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
37 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); 38 queue_delayed_work(bat_event_workqueue,
39 &bat_priv->orig_work, msecs_to_jiffies(1000));
38} 40}
39 41
40/* returns 1 if they are the same originator */ 42/* returns 1 if they are the same originator */
@@ -83,35 +85,30 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
83 return router; 85 return router;
84} 86}
85 87
86struct neigh_node *create_neighbor(struct orig_node *orig_node, 88struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
87 struct orig_node *orig_neigh_node, 89 const uint8_t *neigh_addr,
88 const uint8_t *neigh, 90 uint32_t seqno)
89 struct hard_iface *if_incoming)
90{ 91{
91 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 92 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
92 struct neigh_node *neigh_node; 93 struct neigh_node *neigh_node;
93 94
94 bat_dbg(DBG_BATMAN, bat_priv,
95 "Creating new last-hop neighbor of originator\n");
96
97 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 95 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
98 if (!neigh_node) 96 if (!neigh_node)
99 return NULL; 97 goto out;
100 98
101 INIT_HLIST_NODE(&neigh_node->list); 99 INIT_HLIST_NODE(&neigh_node->list);
102 INIT_LIST_HEAD(&neigh_node->bonding_list);
103 spin_lock_init(&neigh_node->tq_lock);
104 100
105 memcpy(neigh_node->addr, neigh, ETH_ALEN); 101 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
106 neigh_node->orig_node = orig_neigh_node; 102 spin_lock_init(&neigh_node->lq_update_lock);
107 neigh_node->if_incoming = if_incoming;
108 103
109 /* extra reference for return */ 104 /* extra reference for return */
110 atomic_set(&neigh_node->refcount, 2); 105 atomic_set(&neigh_node->refcount, 2);
111 106
112 spin_lock_bh(&orig_node->neigh_list_lock); 107 bat_dbg(DBG_BATMAN, bat_priv,
113 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 108 "Creating new neighbor %pM, initial seqno %d\n",
114 spin_unlock_bh(&orig_node->neigh_list_lock); 109 neigh_addr, seqno);
110
111out:
115 return neigh_node; 112 return neigh_node;
116} 113}
117 114
@@ -143,7 +140,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
143 140
144 frag_list_free(&orig_node->frag_list); 141 frag_list_free(&orig_node->frag_list);
145 tt_global_del_orig(orig_node->bat_priv, orig_node, 142 tt_global_del_orig(orig_node->bat_priv, orig_node,
146 "originator timed out"); 143 "originator timed out");
147 144
148 kfree(orig_node->tt_buff); 145 kfree(orig_node->tt_buff);
149 kfree(orig_node->bcast_own); 146 kfree(orig_node->bcast_own);
@@ -219,6 +216,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
219 /* extra reference for return */ 216 /* extra reference for return */
220 atomic_set(&orig_node->refcount, 2); 217 atomic_set(&orig_node->refcount, 2);
221 218
219 orig_node->tt_initialised = false;
222 orig_node->tt_poss_change = false; 220 orig_node->tt_poss_change = false;
223 orig_node->bat_priv = bat_priv; 221 orig_node->bat_priv = bat_priv;
224 memcpy(orig_node->orig, addr, ETH_ALEN); 222 memcpy(orig_node->orig, addr, ETH_ALEN);
@@ -272,6 +270,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
272 struct hlist_node *node, *node_tmp; 270 struct hlist_node *node, *node_tmp;
273 struct neigh_node *neigh_node; 271 struct neigh_node *neigh_node;
274 bool neigh_purged = false; 272 bool neigh_purged = false;
273 unsigned long last_seen;
275 274
276 *best_neigh_node = NULL; 275 *best_neigh_node = NULL;
277 276
@@ -281,12 +280,13 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
281 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 280 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
282 &orig_node->neigh_list, list) { 281 &orig_node->neigh_list, list) {
283 282
284 if ((time_after(jiffies, 283 if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) ||
285 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
286 (neigh_node->if_incoming->if_status == IF_INACTIVE) || 284 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
287 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || 285 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
288 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { 286 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
289 287
288 last_seen = neigh_node->last_seen;
289
290 if ((neigh_node->if_incoming->if_status == 290 if ((neigh_node->if_incoming->if_status ==
291 IF_INACTIVE) || 291 IF_INACTIVE) ||
292 (neigh_node->if_incoming->if_status == 292 (neigh_node->if_incoming->if_status ==
@@ -294,16 +294,14 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
294 (neigh_node->if_incoming->if_status == 294 (neigh_node->if_incoming->if_status ==
295 IF_TO_BE_REMOVED)) 295 IF_TO_BE_REMOVED))
296 bat_dbg(DBG_BATMAN, bat_priv, 296 bat_dbg(DBG_BATMAN, bat_priv,
297 "neighbor purge: originator %pM, " 297 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
298 "neighbor: %pM, iface: %s\n",
299 orig_node->orig, neigh_node->addr, 298 orig_node->orig, neigh_node->addr,
300 neigh_node->if_incoming->net_dev->name); 299 neigh_node->if_incoming->net_dev->name);
301 else 300 else
302 bat_dbg(DBG_BATMAN, bat_priv, 301 bat_dbg(DBG_BATMAN, bat_priv,
303 "neighbor timeout: originator %pM, " 302 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
304 "neighbor: %pM, last_valid: %lu\n",
305 orig_node->orig, neigh_node->addr, 303 orig_node->orig, neigh_node->addr,
306 (neigh_node->last_valid / HZ)); 304 jiffies_to_msecs(last_seen));
307 305
308 neigh_purged = true; 306 neigh_purged = true;
309 307
@@ -326,18 +324,16 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
326{ 324{
327 struct neigh_node *best_neigh_node; 325 struct neigh_node *best_neigh_node;
328 326
329 if (time_after(jiffies, 327 if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) {
330 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
331
332 bat_dbg(DBG_BATMAN, bat_priv, 328 bat_dbg(DBG_BATMAN, bat_priv,
333 "Originator timeout: originator %pM, last_valid %lu\n", 329 "Originator timeout: originator %pM, last_seen %u\n",
334 orig_node->orig, (orig_node->last_valid / HZ)); 330 orig_node->orig,
331 jiffies_to_msecs(orig_node->last_seen));
335 return true; 332 return true;
336 } else { 333 } else {
337 if (purge_orig_neighbors(bat_priv, orig_node, 334 if (purge_orig_neighbors(bat_priv, orig_node,
338 &best_neigh_node)) { 335 &best_neigh_node))
339 update_route(bat_priv, orig_node, best_neigh_node); 336 update_route(bat_priv, orig_node, best_neigh_node);
340 }
341 } 337 }
342 338
343 return false; 339 return false;
@@ -371,8 +367,8 @@ static void _purge_orig(struct bat_priv *bat_priv)
371 continue; 367 continue;
372 } 368 }
373 369
374 if (time_after(jiffies, orig_node->last_frag_packet + 370 if (has_timed_out(orig_node->last_frag_packet,
375 msecs_to_jiffies(FRAG_TIMEOUT))) 371 FRAG_TIMEOUT))
376 frag_list_free(&orig_node->frag_list); 372 frag_list_free(&orig_node->frag_list);
377 } 373 }
378 spin_unlock_bh(list_lock); 374 spin_unlock_bh(list_lock);
@@ -380,8 +376,6 @@ static void _purge_orig(struct bat_priv *bat_priv)
380 376
381 gw_node_purge(bat_priv); 377 gw_node_purge(bat_priv);
382 gw_election(bat_priv); 378 gw_election(bat_priv);
383
384 softif_neigh_purge(bat_priv);
385} 379}
386 380
387static void purge_orig(struct work_struct *work) 381static void purge_orig(struct work_struct *work)
@@ -419,15 +413,15 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
419 primary_if = primary_if_get_selected(bat_priv); 413 primary_if = primary_if_get_selected(bat_priv);
420 414
421 if (!primary_if) { 415 if (!primary_if) {
422 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 416 ret = seq_printf(seq,
423 "please specify interfaces to enable it\n", 417 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
424 net_dev->name); 418 net_dev->name);
425 goto out; 419 goto out;
426 } 420 }
427 421
428 if (primary_if->if_status != IF_ACTIVE) { 422 if (primary_if->if_status != IF_ACTIVE) {
429 ret = seq_printf(seq, "BATMAN mesh %s " 423 ret = seq_printf(seq,
430 "disabled - primary interface not active\n", 424 "BATMAN mesh %s disabled - primary interface not active\n",
431 net_dev->name); 425 net_dev->name);
432 goto out; 426 goto out;
433 } 427 }
@@ -452,9 +446,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
452 goto next; 446 goto next;
453 447
454 last_seen_secs = jiffies_to_msecs(jiffies - 448 last_seen_secs = jiffies_to_msecs(jiffies -
455 orig_node->last_valid) / 1000; 449 orig_node->last_seen) / 1000;
456 last_seen_msecs = jiffies_to_msecs(jiffies - 450 last_seen_msecs = jiffies_to_msecs(jiffies -
457 orig_node->last_valid) % 1000; 451 orig_node->last_seen) % 1000;
458 452
459 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 453 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
460 orig_node->orig, last_seen_secs, 454 orig_node->orig, last_seen_secs,
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 67765ffef731..f74d0d693359 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -29,10 +29,9 @@ void originator_free(struct bat_priv *bat_priv);
29void purge_orig_ref(struct bat_priv *bat_priv); 29void purge_orig_ref(struct bat_priv *bat_priv);
30void orig_node_free_ref(struct orig_node *orig_node); 30void orig_node_free_ref(struct orig_node *orig_node);
31struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr); 31struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
32struct neigh_node *create_neighbor(struct orig_node *orig_node, 32struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
33 struct orig_node *orig_neigh_node, 33 const uint8_t *neigh_addr,
34 const uint8_t *neigh, 34 uint32_t seqno);
35 struct hard_iface *if_incoming);
36void neigh_node_free_ref(struct neigh_node *neigh_node); 35void neigh_node_free_ref(struct neigh_node *neigh_node);
37struct neigh_node *orig_node_get_router(struct orig_node *orig_node); 36struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
38int orig_seq_print_text(struct seq_file *seq, void *offset); 37int orig_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 4d9e54c57a36..0ee1af770798 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -25,7 +25,7 @@
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 26
27enum bat_packettype { 27enum bat_packettype {
28 BAT_OGM = 0x01, 28 BAT_IV_OGM = 0x01,
29 BAT_ICMP = 0x02, 29 BAT_ICMP = 0x02,
30 BAT_UNICAST = 0x03, 30 BAT_UNICAST = 0x03,
31 BAT_BCAST = 0x04, 31 BAT_BCAST = 0x04,
@@ -38,7 +38,8 @@ enum bat_packettype {
38/* this file is included by batctl which needs these defines */ 38/* this file is included by batctl which needs these defines */
39#define COMPAT_VERSION 14 39#define COMPAT_VERSION 14
40 40
41enum batman_flags { 41enum batman_iv_flags {
42 NOT_BEST_NEXT_HOP = 1 << 3,
42 PRIMARIES_FIRST_HOP = 1 << 4, 43 PRIMARIES_FIRST_HOP = 1 << 4,
43 VIS_SERVER = 1 << 5, 44 VIS_SERVER = 1 << 5,
44 DIRECTLINK = 1 << 6 45 DIRECTLINK = 1 << 6
@@ -90,14 +91,35 @@ enum tt_client_flags {
90 TT_CLIENT_PENDING = 1 << 10 91 TT_CLIENT_PENDING = 1 << 10
91}; 92};
92 93
93struct batman_ogm_packet { 94/* claim frame types for the bridge loop avoidance */
95enum bla_claimframe {
96 CLAIM_TYPE_ADD = 0x00,
97 CLAIM_TYPE_DEL = 0x01,
98 CLAIM_TYPE_ANNOUNCE = 0x02,
99 CLAIM_TYPE_REQUEST = 0x03
100};
101
102/* the destination hardware field in the ARP frame is used to
103 * transport the claim type and the group id
104 */
105struct bla_claim_dst {
106 uint8_t magic[3]; /* FF:43:05 */
107 uint8_t type; /* bla_claimframe */
108 uint16_t group; /* group id */
109} __packed;
110
111struct batman_header {
94 uint8_t packet_type; 112 uint8_t packet_type;
95 uint8_t version; /* batman version field */ 113 uint8_t version; /* batman version field */
96 uint8_t ttl; 114 uint8_t ttl;
115} __packed;
116
117struct batman_ogm_packet {
118 struct batman_header header;
97 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ 119 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
98 uint32_t seqno; 120 uint32_t seqno;
99 uint8_t orig[6]; 121 uint8_t orig[ETH_ALEN];
100 uint8_t prev_sender[6]; 122 uint8_t prev_sender[ETH_ALEN];
101 uint8_t gw_flags; /* flags related to gateway class */ 123 uint8_t gw_flags; /* flags related to gateway class */
102 uint8_t tq; 124 uint8_t tq;
103 uint8_t tt_num_changes; 125 uint8_t tt_num_changes;
@@ -105,15 +127,13 @@ struct batman_ogm_packet {
105 uint16_t tt_crc; 127 uint16_t tt_crc;
106} __packed; 128} __packed;
107 129
108#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet) 130#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet)
109 131
110struct icmp_packet { 132struct icmp_packet {
111 uint8_t packet_type; 133 struct batman_header header;
112 uint8_t version; /* batman version field */
113 uint8_t ttl;
114 uint8_t msg_type; /* see ICMP message types above */ 134 uint8_t msg_type; /* see ICMP message types above */
115 uint8_t dst[6]; 135 uint8_t dst[ETH_ALEN];
116 uint8_t orig[6]; 136 uint8_t orig[ETH_ALEN];
117 uint16_t seqno; 137 uint16_t seqno;
118 uint8_t uid; 138 uint8_t uid;
119 uint8_t reserved; 139 uint8_t reserved;
@@ -124,12 +144,10 @@ struct icmp_packet {
124/* icmp_packet_rr must start with all fields from imcp_packet 144/* icmp_packet_rr must start with all fields from imcp_packet
125 * as this is assumed by code that handles ICMP packets */ 145 * as this is assumed by code that handles ICMP packets */
126struct icmp_packet_rr { 146struct icmp_packet_rr {
127 uint8_t packet_type; 147 struct batman_header header;
128 uint8_t version; /* batman version field */
129 uint8_t ttl;
130 uint8_t msg_type; /* see ICMP message types above */ 148 uint8_t msg_type; /* see ICMP message types above */
131 uint8_t dst[6]; 149 uint8_t dst[ETH_ALEN];
132 uint8_t orig[6]; 150 uint8_t orig[ETH_ALEN];
133 uint16_t seqno; 151 uint16_t seqno;
134 uint8_t uid; 152 uint8_t uid;
135 uint8_t rr_cur; 153 uint8_t rr_cur;
@@ -137,51 +155,41 @@ struct icmp_packet_rr {
137} __packed; 155} __packed;
138 156
139struct unicast_packet { 157struct unicast_packet {
140 uint8_t packet_type; 158 struct batman_header header;
141 uint8_t version; /* batman version field */
142 uint8_t ttl;
143 uint8_t ttvn; /* destination translation table version number */ 159 uint8_t ttvn; /* destination translation table version number */
144 uint8_t dest[6]; 160 uint8_t dest[ETH_ALEN];
145} __packed; 161} __packed;
146 162
147struct unicast_frag_packet { 163struct unicast_frag_packet {
148 uint8_t packet_type; 164 struct batman_header header;
149 uint8_t version; /* batman version field */
150 uint8_t ttl;
151 uint8_t ttvn; /* destination translation table version number */ 165 uint8_t ttvn; /* destination translation table version number */
152 uint8_t dest[6]; 166 uint8_t dest[ETH_ALEN];
153 uint8_t flags; 167 uint8_t flags;
154 uint8_t align; 168 uint8_t align;
155 uint8_t orig[6]; 169 uint8_t orig[ETH_ALEN];
156 uint16_t seqno; 170 uint16_t seqno;
157} __packed; 171} __packed;
158 172
159struct bcast_packet { 173struct bcast_packet {
160 uint8_t packet_type; 174 struct batman_header header;
161 uint8_t version; /* batman version field */
162 uint8_t ttl;
163 uint8_t reserved; 175 uint8_t reserved;
164 uint32_t seqno; 176 uint32_t seqno;
165 uint8_t orig[6]; 177 uint8_t orig[ETH_ALEN];
166} __packed; 178} __packed;
167 179
168struct vis_packet { 180struct vis_packet {
169 uint8_t packet_type; 181 struct batman_header header;
170 uint8_t version; /* batman version field */
171 uint8_t ttl; /* TTL */
172 uint8_t vis_type; /* which type of vis-participant sent this? */ 182 uint8_t vis_type; /* which type of vis-participant sent this? */
173 uint32_t seqno; /* sequence number */ 183 uint32_t seqno; /* sequence number */
174 uint8_t entries; /* number of entries behind this struct */ 184 uint8_t entries; /* number of entries behind this struct */
175 uint8_t reserved; 185 uint8_t reserved;
176 uint8_t vis_orig[6]; /* originator that announces its neighbors */ 186 uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
177 uint8_t target_orig[6]; /* who should receive this packet */ 187 uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */
178 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ 188 uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
179} __packed; 189} __packed;
180 190
181struct tt_query_packet { 191struct tt_query_packet {
182 uint8_t packet_type; 192 struct batman_header header;
183 uint8_t version; /* batman version field */
184 uint8_t ttl;
185 /* the flag field is a combination of: 193 /* the flag field is a combination of:
186 * - TT_REQUEST or TT_RESPONSE 194 * - TT_REQUEST or TT_RESPONSE
187 * - TT_FULL_TABLE */ 195 * - TT_FULL_TABLE */
@@ -202,9 +210,7 @@ struct tt_query_packet {
202} __packed; 210} __packed;
203 211
204struct roam_adv_packet { 212struct roam_adv_packet {
205 uint8_t packet_type; 213 struct batman_header header;
206 uint8_t version;
207 uint8_t ttl;
208 uint8_t reserved; 214 uint8_t reserved;
209 uint8_t dst[ETH_ALEN]; 215 uint8_t dst[ETH_ALEN];
210 uint8_t src[ETH_ALEN]; 216 uint8_t src[ETH_ALEN];
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index f1ccfa76ce8a..fd63951d118d 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 7cdfe62b657c..8b58bd82767d 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 773e606f9702..840e2c64a301 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -29,7 +29,10 @@
29#include "originator.h" 29#include "originator.h"
30#include "vis.h" 30#include "vis.h"
31#include "unicast.h" 31#include "unicast.h"
32#include "bat_ogm.h" 32#include "bridge_loop_avoidance.h"
33
34static int route_unicast_packet(struct sk_buff *skb,
35 struct hard_iface *recv_if);
33 36
34void slide_own_bcast_window(struct hard_iface *hard_iface) 37void slide_own_bcast_window(struct hard_iface *hard_iface)
35{ 38{
@@ -53,7 +56,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
53 56
54 bit_get_packet(bat_priv, word, 1, 0); 57 bit_get_packet(bat_priv, word, 1, 0);
55 orig_node->bcast_own_sum[hard_iface->if_num] = 58 orig_node->bcast_own_sum[hard_iface->if_num] =
56 bit_packet_count(word); 59 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
57 spin_unlock_bh(&orig_node->ogm_cnt_lock); 60 spin_unlock_bh(&orig_node->ogm_cnt_lock);
58 } 61 }
59 rcu_read_unlock(); 62 rcu_read_unlock();
@@ -73,7 +76,7 @@ static void _update_route(struct bat_priv *bat_priv,
73 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 76 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
74 orig_node->orig); 77 orig_node->orig);
75 tt_global_del_orig(bat_priv, orig_node, 78 tt_global_del_orig(bat_priv, orig_node,
76 "Deleted route towards originator"); 79 "Deleted route towards originator");
77 80
78 /* route added */ 81 /* route added */
79 } else if ((!curr_router) && (neigh_node)) { 82 } else if ((!curr_router) && (neigh_node)) {
@@ -84,8 +87,7 @@ static void _update_route(struct bat_priv *bat_priv,
84 /* route changed */ 87 /* route changed */
85 } else if (neigh_node && curr_router) { 88 } else if (neigh_node && curr_router) {
86 bat_dbg(DBG_ROUTES, bat_priv, 89 bat_dbg(DBG_ROUTES, bat_priv,
87 "Changing route towards: %pM " 90 "Changing route towards: %pM (now via %pM - was via %pM)\n",
88 "(now via %pM - was via %pM)\n",
89 orig_node->orig, neigh_node->addr, 91 orig_node->orig, neigh_node->addr,
90 curr_router->addr); 92 curr_router->addr);
91 } 93 }
@@ -230,54 +232,48 @@ void bonding_save_primary(const struct orig_node *orig_node,
230int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, 232int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
231 unsigned long *last_reset) 233 unsigned long *last_reset)
232{ 234{
233 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) 235 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
234 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 236 (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
235 if (time_after(jiffies, *last_reset + 237 if (!has_timed_out(*last_reset, RESET_PROTECTION_MS))
236 msecs_to_jiffies(RESET_PROTECTION_MS))) {
237
238 *last_reset = jiffies;
239 bat_dbg(DBG_BATMAN, bat_priv,
240 "old packet received, start protection\n");
241
242 return 0;
243 } else
244 return 1; 238 return 1;
239
240 *last_reset = jiffies;
241 bat_dbg(DBG_BATMAN, bat_priv,
242 "old packet received, start protection\n");
245 } 243 }
244
246 return 0; 245 return 0;
247} 246}
248 247
249int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface) 248bool check_management_packet(struct sk_buff *skb,
249 struct hard_iface *hard_iface,
250 int header_len)
250{ 251{
251 struct ethhdr *ethhdr; 252 struct ethhdr *ethhdr;
252 253
253 /* drop packet if it has not necessary minimum size */ 254 /* drop packet if it has not necessary minimum size */
254 if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN))) 255 if (unlikely(!pskb_may_pull(skb, header_len)))
255 return NET_RX_DROP; 256 return false;
256 257
257 ethhdr = (struct ethhdr *)skb_mac_header(skb); 258 ethhdr = (struct ethhdr *)skb_mac_header(skb);
258 259
259 /* packet with broadcast indication but unicast recipient */ 260 /* packet with broadcast indication but unicast recipient */
260 if (!is_broadcast_ether_addr(ethhdr->h_dest)) 261 if (!is_broadcast_ether_addr(ethhdr->h_dest))
261 return NET_RX_DROP; 262 return false;
262 263
263 /* packet with broadcast sender address */ 264 /* packet with broadcast sender address */
264 if (is_broadcast_ether_addr(ethhdr->h_source)) 265 if (is_broadcast_ether_addr(ethhdr->h_source))
265 return NET_RX_DROP; 266 return false;
266 267
267 /* create a copy of the skb, if needed, to modify it. */ 268 /* create a copy of the skb, if needed, to modify it. */
268 if (skb_cow(skb, 0) < 0) 269 if (skb_cow(skb, 0) < 0)
269 return NET_RX_DROP; 270 return false;
270 271
271 /* keep skb linear */ 272 /* keep skb linear */
272 if (skb_linearize(skb) < 0) 273 if (skb_linearize(skb) < 0)
273 return NET_RX_DROP; 274 return false;
274
275 ethhdr = (struct ethhdr *)skb_mac_header(skb);
276 275
277 bat_ogm_receive(ethhdr, skb->data, skb_headlen(skb), hard_iface); 276 return true;
278
279 kfree_skb(skb);
280 return NET_RX_SUCCESS;
281} 277}
282 278
283static int recv_my_icmp_packet(struct bat_priv *bat_priv, 279static int recv_my_icmp_packet(struct bat_priv *bat_priv,
@@ -312,7 +308,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
312 goto out; 308 goto out;
313 309
314 /* create a copy of the skb, if needed, to modify it. */ 310 /* create a copy of the skb, if needed, to modify it. */
315 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 311 if (skb_cow(skb, ETH_HLEN) < 0)
316 goto out; 312 goto out;
317 313
318 icmp_packet = (struct icmp_packet_rr *)skb->data; 314 icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -320,7 +316,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
320 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 316 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
321 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 317 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
322 icmp_packet->msg_type = ECHO_REPLY; 318 icmp_packet->msg_type = ECHO_REPLY;
323 icmp_packet->ttl = TTL; 319 icmp_packet->header.ttl = TTL;
324 320
325 send_skb_packet(skb, router->if_incoming, router->addr); 321 send_skb_packet(skb, router->if_incoming, router->addr);
326 ret = NET_RX_SUCCESS; 322 ret = NET_RX_SUCCESS;
@@ -348,9 +344,8 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
348 344
349 /* send TTL exceeded if packet is an echo request (traceroute) */ 345 /* send TTL exceeded if packet is an echo request (traceroute) */
350 if (icmp_packet->msg_type != ECHO_REQUEST) { 346 if (icmp_packet->msg_type != ECHO_REQUEST) {
351 pr_debug("Warning - can't forward icmp packet from %pM to " 347 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
352 "%pM: ttl exceeded\n", icmp_packet->orig, 348 icmp_packet->orig, icmp_packet->dst);
353 icmp_packet->dst);
354 goto out; 349 goto out;
355 } 350 }
356 351
@@ -368,7 +363,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
368 goto out; 363 goto out;
369 364
370 /* create a copy of the skb, if needed, to modify it. */ 365 /* create a copy of the skb, if needed, to modify it. */
371 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 366 if (skb_cow(skb, ETH_HLEN) < 0)
372 goto out; 367 goto out;
373 368
374 icmp_packet = (struct icmp_packet *)skb->data; 369 icmp_packet = (struct icmp_packet *)skb->data;
@@ -376,7 +371,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
376 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 371 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
377 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 372 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
378 icmp_packet->msg_type = TTL_EXCEEDED; 373 icmp_packet->msg_type = TTL_EXCEEDED;
379 icmp_packet->ttl = TTL; 374 icmp_packet->header.ttl = TTL;
380 375
381 send_skb_packet(skb, router->if_incoming, router->addr); 376 send_skb_packet(skb, router->if_incoming, router->addr);
382 ret = NET_RX_SUCCESS; 377 ret = NET_RX_SUCCESS;
@@ -432,7 +427,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
432 if ((hdr_size == sizeof(struct icmp_packet_rr)) && 427 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
433 (icmp_packet->rr_cur < BAT_RR_LEN)) { 428 (icmp_packet->rr_cur < BAT_RR_LEN)) {
434 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), 429 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
435 ethhdr->h_dest, ETH_ALEN); 430 ethhdr->h_dest, ETH_ALEN);
436 icmp_packet->rr_cur++; 431 icmp_packet->rr_cur++;
437 } 432 }
438 433
@@ -441,7 +436,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
441 return recv_my_icmp_packet(bat_priv, skb, hdr_size); 436 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
442 437
443 /* TTL exceeded */ 438 /* TTL exceeded */
444 if (icmp_packet->ttl < 2) 439 if (icmp_packet->header.ttl < 2)
445 return recv_icmp_ttl_exceeded(bat_priv, skb); 440 return recv_icmp_ttl_exceeded(bat_priv, skb);
446 441
447 /* get routing information */ 442 /* get routing information */
@@ -454,13 +449,13 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
454 goto out; 449 goto out;
455 450
456 /* create a copy of the skb, if needed, to modify it. */ 451 /* create a copy of the skb, if needed, to modify it. */
457 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 452 if (skb_cow(skb, ETH_HLEN) < 0)
458 goto out; 453 goto out;
459 454
460 icmp_packet = (struct icmp_packet_rr *)skb->data; 455 icmp_packet = (struct icmp_packet_rr *)skb->data;
461 456
462 /* decrement ttl */ 457 /* decrement ttl */
463 icmp_packet->ttl--; 458 icmp_packet->header.ttl--;
464 459
465 /* route it */ 460 /* route it */
466 send_skb_packet(skb, router->if_incoming, router->addr); 461 send_skb_packet(skb, router->if_incoming, router->addr);
@@ -673,13 +668,20 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
673 if (!is_my_mac(roam_adv_packet->dst)) 668 if (!is_my_mac(roam_adv_packet->dst))
674 return route_unicast_packet(skb, recv_if); 669 return route_unicast_packet(skb, recv_if);
675 670
671 /* check if it is a backbone gateway. we don't accept
672 * roaming advertisement from it, as it has the same
673 * entries as we have.
674 */
675 if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
676 goto out;
677
676 orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); 678 orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
677 if (!orig_node) 679 if (!orig_node)
678 goto out; 680 goto out;
679 681
680 bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM " 682 bat_dbg(DBG_TT, bat_priv,
681 "(client %pM)\n", roam_adv_packet->src, 683 "Received ROAMING_ADV from %pM (client %pM)\n",
682 roam_adv_packet->client); 684 roam_adv_packet->src, roam_adv_packet->client);
683 685
684 tt_global_add(bat_priv, orig_node, roam_adv_packet->client, 686 tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
685 atomic_read(&orig_node->last_ttvn) + 1, true, false); 687 atomic_read(&orig_node->last_ttvn) + 1, true, false);
@@ -802,7 +804,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
802 return 0; 804 return 0;
803} 805}
804 806
805int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 807static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
806{ 808{
807 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 809 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
808 struct orig_node *orig_node = NULL; 810 struct orig_node *orig_node = NULL;
@@ -815,10 +817,9 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
815 unicast_packet = (struct unicast_packet *)skb->data; 817 unicast_packet = (struct unicast_packet *)skb->data;
816 818
817 /* TTL exceeded */ 819 /* TTL exceeded */
818 if (unicast_packet->ttl < 2) { 820 if (unicast_packet->header.ttl < 2) {
819 pr_debug("Warning - can't forward unicast packet from %pM to " 821 pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
820 "%pM: ttl exceeded\n", ethhdr->h_source, 822 ethhdr->h_source, unicast_packet->dest);
821 unicast_packet->dest);
822 goto out; 823 goto out;
823 } 824 }
824 825
@@ -835,12 +836,12 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
835 goto out; 836 goto out;
836 837
837 /* create a copy of the skb, if needed, to modify it. */ 838 /* create a copy of the skb, if needed, to modify it. */
838 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 839 if (skb_cow(skb, ETH_HLEN) < 0)
839 goto out; 840 goto out;
840 841
841 unicast_packet = (struct unicast_packet *)skb->data; 842 unicast_packet = (struct unicast_packet *)skb->data;
842 843
843 if (unicast_packet->packet_type == BAT_UNICAST && 844 if (unicast_packet->header.packet_type == BAT_UNICAST &&
844 atomic_read(&bat_priv->fragmentation) && 845 atomic_read(&bat_priv->fragmentation) &&
845 skb->len > neigh_node->if_incoming->net_dev->mtu) { 846 skb->len > neigh_node->if_incoming->net_dev->mtu) {
846 ret = frag_send_skb(skb, bat_priv, 847 ret = frag_send_skb(skb, bat_priv,
@@ -848,7 +849,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
848 goto out; 849 goto out;
849 } 850 }
850 851
851 if (unicast_packet->packet_type == BAT_UNICAST_FRAG && 852 if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
852 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { 853 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
853 854
854 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 855 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
@@ -867,7 +868,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
867 } 868 }
868 869
869 /* decrement ttl */ 870 /* decrement ttl */
870 unicast_packet->ttl--; 871 unicast_packet->header.ttl--;
871 872
872 /* route it */ 873 /* route it */
873 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 874 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
@@ -912,12 +913,20 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
912 913
913 /* Check whether I have to reroute the packet */ 914 /* Check whether I have to reroute the packet */
914 if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { 915 if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
915 /* Linearize the skb before accessing it */ 916 /* check if there is enough data before accessing it */
916 if (skb_linearize(skb) < 0) 917 if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
918 ETH_HLEN) < 0)
917 return 0; 919 return 0;
918 920
919 ethhdr = (struct ethhdr *)(skb->data + 921 ethhdr = (struct ethhdr *)(skb->data +
920 sizeof(struct unicast_packet)); 922 sizeof(struct unicast_packet));
923
924 /* we don't have an updated route for this client, so we should
925 * not try to reroute the packet!!
926 */
927 if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
928 return 1;
929
921 orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); 930 orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
922 931
923 if (!orig_node) { 932 if (!orig_node) {
@@ -937,10 +946,10 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
937 orig_node_free_ref(orig_node); 946 orig_node_free_ref(orig_node);
938 } 947 }
939 948
940 bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u " 949 bat_dbg(DBG_ROUTES, bat_priv,
941 "new_ttvn %u)! Rerouting unicast packet (for %pM) to " 950 "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
942 "%pM\n", unicast_packet->ttvn, curr_ttvn, 951 unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
943 ethhdr->h_dest, unicast_packet->dest); 952 unicast_packet->dest);
944 953
945 unicast_packet->ttvn = curr_ttvn; 954 unicast_packet->ttvn = curr_ttvn;
946 } 955 }
@@ -1041,7 +1050,7 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1041 if (is_my_mac(bcast_packet->orig)) 1050 if (is_my_mac(bcast_packet->orig))
1042 goto out; 1051 goto out;
1043 1052
1044 if (bcast_packet->ttl < 2) 1053 if (bcast_packet->header.ttl < 2)
1045 goto out; 1054 goto out;
1046 1055
1047 orig_node = orig_hash_find(bat_priv, bcast_packet->orig); 1056 orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
@@ -1052,8 +1061,8 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1052 spin_lock_bh(&orig_node->bcast_seqno_lock); 1061 spin_lock_bh(&orig_node->bcast_seqno_lock);
1053 1062
1054 /* check whether the packet is a duplicate */ 1063 /* check whether the packet is a duplicate */
1055 if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, 1064 if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1056 ntohl(bcast_packet->seqno))) 1065 ntohl(bcast_packet->seqno)))
1057 goto spin_unlock; 1066 goto spin_unlock;
1058 1067
1059 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; 1068 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
@@ -1070,9 +1079,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1070 1079
1071 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1080 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1072 1081
1082 /* check whether this has been sent by another originator before */
1083 if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
1084 goto out;
1085
1073 /* rebroadcast packet */ 1086 /* rebroadcast packet */
1074 add_bcast_packet_to_list(bat_priv, skb, 1); 1087 add_bcast_packet_to_list(bat_priv, skb, 1);
1075 1088
1089 /* don't hand the broadcast up if it is from an originator
1090 * from the same backbone.
1091 */
1092 if (bla_is_backbone_gw(skb, orig_node, hdr_size))
1093 goto out;
1094
1076 /* broadcast for me */ 1095 /* broadcast for me */
1077 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1096 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1078 ret = NET_RX_SUCCESS; 1097 ret = NET_RX_SUCCESS;
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 7aaee0fb0fdc..d6bbbebb6567 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -23,15 +23,16 @@
23#define _NET_BATMAN_ADV_ROUTING_H_ 23#define _NET_BATMAN_ADV_ROUTING_H_
24 24
25void slide_own_bcast_window(struct hard_iface *hard_iface); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26bool check_management_packet(struct sk_buff *skb,
27 struct hard_iface *hard_iface,
28 int header_len);
26void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, 29void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
27 struct neigh_node *neigh_node); 30 struct neigh_node *neigh_node);
28int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
29int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 31int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
30int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 32int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
31int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
32int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 34int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
33int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); 35int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
34int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if);
35int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); 36int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
36int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); 37int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
37struct neigh_node *find_router(struct bat_priv *bat_priv, 38struct neigh_node *find_router(struct bat_priv *bat_priv,
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 8a684eb738ad..f47299f22c68 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -28,7 +28,6 @@
28#include "vis.h" 28#include "vis.h"
29#include "gateway_common.h" 29#include "gateway_common.h"
30#include "originator.h" 30#include "originator.h"
31#include "bat_ogm.h"
32 31
33static void send_outstanding_bcast_packet(struct work_struct *work); 32static void send_outstanding_bcast_packet(struct work_struct *work);
34 33
@@ -46,18 +45,18 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
46 goto send_skb_err; 45 goto send_skb_err;
47 46
48 if (!(hard_iface->net_dev->flags & IFF_UP)) { 47 if (!(hard_iface->net_dev->flags & IFF_UP)) {
49 pr_warning("Interface %s is not up - can't send packet via " 48 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
50 "that interface!\n", hard_iface->net_dev->name); 49 hard_iface->net_dev->name);
51 goto send_skb_err; 50 goto send_skb_err;
52 } 51 }
53 52
54 /* push to the ethernet header. */ 53 /* push to the ethernet header. */
55 if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) 54 if (my_skb_head_push(skb, ETH_HLEN) < 0)
56 goto send_skb_err; 55 goto send_skb_err;
57 56
58 skb_reset_mac_header(skb); 57 skb_reset_mac_header(skb);
59 58
60 ethhdr = (struct ethhdr *) skb_mac_header(skb); 59 ethhdr = (struct ethhdr *)skb_mac_header(skb);
61 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); 60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
62 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
63 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); 62 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
@@ -88,7 +87,7 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface,
88 /* keep old buffer if kmalloc should fail */ 87 /* keep old buffer if kmalloc should fail */
89 if (new_buff) { 88 if (new_buff) {
90 memcpy(new_buff, hard_iface->packet_buff, 89 memcpy(new_buff, hard_iface->packet_buff,
91 BATMAN_OGM_LEN); 90 BATMAN_OGM_HLEN);
92 91
93 kfree(hard_iface->packet_buff); 92 kfree(hard_iface->packet_buff);
94 hard_iface->packet_buff = new_buff; 93 hard_iface->packet_buff = new_buff;
@@ -102,13 +101,13 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv,
102{ 101{
103 int new_len; 102 int new_len;
104 103
105 new_len = BATMAN_OGM_LEN + 104 new_len = BATMAN_OGM_HLEN +
106 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); 105 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
107 106
108 /* if we have too many changes for one packet don't send any 107 /* if we have too many changes for one packet don't send any
109 * and wait for the tt table request which will be fragmented */ 108 * and wait for the tt table request which will be fragmented */
110 if (new_len > hard_iface->soft_iface->mtu) 109 if (new_len > hard_iface->soft_iface->mtu)
111 new_len = BATMAN_OGM_LEN; 110 new_len = BATMAN_OGM_HLEN;
112 111
113 realloc_packet_buffer(hard_iface, new_len); 112 realloc_packet_buffer(hard_iface, new_len);
114 113
@@ -118,14 +117,14 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv,
118 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); 117 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
119 118
120 return tt_changes_fill_buffer(bat_priv, 119 return tt_changes_fill_buffer(bat_priv,
121 hard_iface->packet_buff + BATMAN_OGM_LEN, 120 hard_iface->packet_buff + BATMAN_OGM_HLEN,
122 hard_iface->packet_len - BATMAN_OGM_LEN); 121 hard_iface->packet_len - BATMAN_OGM_HLEN);
123} 122}
124 123
125static int reset_packet_buffer(struct bat_priv *bat_priv, 124static int reset_packet_buffer(struct bat_priv *bat_priv,
126 struct hard_iface *hard_iface) 125 struct hard_iface *hard_iface)
127{ 126{
128 realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN); 127 realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
129 return 0; 128 return 0;
130} 129}
131 130
@@ -168,7 +167,7 @@ void schedule_bat_ogm(struct hard_iface *hard_iface)
168 if (primary_if) 167 if (primary_if)
169 hardif_free_ref(primary_if); 168 hardif_free_ref(primary_if);
170 169
171 bat_ogm_schedule(hard_iface, tt_num_changes); 170 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
172} 171}
173 172
174static void forw_packet_free(struct forw_packet *forw_packet) 173static void forw_packet_free(struct forw_packet *forw_packet)
@@ -234,7 +233,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
234 233
235 /* as we have a copy now, it is safe to decrease the TTL */ 234 /* as we have a copy now, it is safe to decrease the TTL */
236 bcast_packet = (struct bcast_packet *)newskb->data; 235 bcast_packet = (struct bcast_packet *)newskb->data;
237 bcast_packet->ttl--; 236 bcast_packet->header.ttl--;
238 237
239 skb_reset_mac_header(newskb); 238 skb_reset_mac_header(newskb);
240 239
@@ -293,7 +292,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
293 /* if we still have some more bcasts to send */ 292 /* if we still have some more bcasts to send */
294 if (forw_packet->num_packets < 3) { 293 if (forw_packet->num_packets < 3) {
295 _add_bcast_packet_to_list(bat_priv, forw_packet, 294 _add_bcast_packet_to_list(bat_priv, forw_packet,
296 ((5 * HZ) / 1000)); 295 msecs_to_jiffies(5));
297 return; 296 return;
298 } 297 }
299 298
@@ -318,7 +317,7 @@ void send_outstanding_bat_ogm_packet(struct work_struct *work)
318 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 317 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
319 goto out; 318 goto out;
320 319
321 bat_ogm_emit(forw_packet); 320 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
322 321
323 /** 322 /**
324 * we have to have at least one packet in the queue 323 * we have to have at least one packet in the queue
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index c8ca3ef7385b..824ef06f9b01 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 987c75a775f9..6e2530b02043 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -36,6 +36,7 @@
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
38#include "unicast.h" 38#include "unicast.h"
39#include "bridge_loop_avoidance.h"
39 40
40 41
41static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); 42static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -73,440 +74,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
73 return 0; 74 return 0;
74} 75}
75 76
76static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
77{
78 if (atomic_dec_and_test(&softif_neigh->refcount))
79 kfree_rcu(softif_neigh, rcu);
80}
81
82static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
83{
84 struct softif_neigh_vid *softif_neigh_vid;
85 struct softif_neigh *softif_neigh;
86 struct hlist_node *node, *node_tmp;
87 struct bat_priv *bat_priv;
88
89 softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
90 bat_priv = softif_neigh_vid->bat_priv;
91
92 spin_lock_bh(&bat_priv->softif_neigh_lock);
93 hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
94 &softif_neigh_vid->softif_neigh_list, list) {
95 hlist_del_rcu(&softif_neigh->list);
96 softif_neigh_free_ref(softif_neigh);
97 }
98 spin_unlock_bh(&bat_priv->softif_neigh_lock);
99
100 kfree(softif_neigh_vid);
101}
102
103static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
104{
105 if (atomic_dec_and_test(&softif_neigh_vid->refcount))
106 call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
107}
108
109static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
110 short vid)
111{
112 struct softif_neigh_vid *softif_neigh_vid;
113 struct hlist_node *node;
114
115 rcu_read_lock();
116 hlist_for_each_entry_rcu(softif_neigh_vid, node,
117 &bat_priv->softif_neigh_vids, list) {
118 if (softif_neigh_vid->vid != vid)
119 continue;
120
121 if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
122 continue;
123
124 goto out;
125 }
126
127 softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
128 if (!softif_neigh_vid)
129 goto out;
130
131 softif_neigh_vid->vid = vid;
132 softif_neigh_vid->bat_priv = bat_priv;
133
134 /* initialize with 2 - caller decrements counter by one */
135 atomic_set(&softif_neigh_vid->refcount, 2);
136 INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
137 INIT_HLIST_NODE(&softif_neigh_vid->list);
138 spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
139 hlist_add_head_rcu(&softif_neigh_vid->list,
140 &bat_priv->softif_neigh_vids);
141 spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
142
143out:
144 rcu_read_unlock();
145 return softif_neigh_vid;
146}
147
148static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
149 const uint8_t *addr, short vid)
150{
151 struct softif_neigh_vid *softif_neigh_vid;
152 struct softif_neigh *softif_neigh = NULL;
153 struct hlist_node *node;
154
155 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
156 if (!softif_neigh_vid)
157 goto out;
158
159 rcu_read_lock();
160 hlist_for_each_entry_rcu(softif_neigh, node,
161 &softif_neigh_vid->softif_neigh_list,
162 list) {
163 if (!compare_eth(softif_neigh->addr, addr))
164 continue;
165
166 if (!atomic_inc_not_zero(&softif_neigh->refcount))
167 continue;
168
169 softif_neigh->last_seen = jiffies;
170 goto unlock;
171 }
172
173 softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
174 if (!softif_neigh)
175 goto unlock;
176
177 memcpy(softif_neigh->addr, addr, ETH_ALEN);
178 softif_neigh->last_seen = jiffies;
179 /* initialize with 2 - caller decrements counter by one */
180 atomic_set(&softif_neigh->refcount, 2);
181
182 INIT_HLIST_NODE(&softif_neigh->list);
183 spin_lock_bh(&bat_priv->softif_neigh_lock);
184 hlist_add_head_rcu(&softif_neigh->list,
185 &softif_neigh_vid->softif_neigh_list);
186 spin_unlock_bh(&bat_priv->softif_neigh_lock);
187
188unlock:
189 rcu_read_unlock();
190out:
191 if (softif_neigh_vid)
192 softif_neigh_vid_free_ref(softif_neigh_vid);
193 return softif_neigh;
194}
195
196static struct softif_neigh *softif_neigh_get_selected(
197 struct softif_neigh_vid *softif_neigh_vid)
198{
199 struct softif_neigh *softif_neigh;
200
201 rcu_read_lock();
202 softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
203
204 if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
205 softif_neigh = NULL;
206
207 rcu_read_unlock();
208 return softif_neigh;
209}
210
211static struct softif_neigh *softif_neigh_vid_get_selected(
212 struct bat_priv *bat_priv,
213 short vid)
214{
215 struct softif_neigh_vid *softif_neigh_vid;
216 struct softif_neigh *softif_neigh = NULL;
217
218 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
219 if (!softif_neigh_vid)
220 goto out;
221
222 softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
223out:
224 if (softif_neigh_vid)
225 softif_neigh_vid_free_ref(softif_neigh_vid);
226 return softif_neigh;
227}
228
229static void softif_neigh_vid_select(struct bat_priv *bat_priv,
230 struct softif_neigh *new_neigh,
231 short vid)
232{
233 struct softif_neigh_vid *softif_neigh_vid;
234 struct softif_neigh *curr_neigh;
235
236 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
237 if (!softif_neigh_vid)
238 goto out;
239
240 spin_lock_bh(&bat_priv->softif_neigh_lock);
241
242 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
243 new_neigh = NULL;
244
245 curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
246 1);
247 rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
248
249 if ((curr_neigh) && (!new_neigh))
250 bat_dbg(DBG_ROUTES, bat_priv,
251 "Removing mesh exit point on vid: %d (prev: %pM).\n",
252 vid, curr_neigh->addr);
253 else if ((curr_neigh) && (new_neigh))
254 bat_dbg(DBG_ROUTES, bat_priv,
255 "Changing mesh exit point on vid: %d from %pM "
256 "to %pM.\n", vid, curr_neigh->addr, new_neigh->addr);
257 else if ((!curr_neigh) && (new_neigh))
258 bat_dbg(DBG_ROUTES, bat_priv,
259 "Setting mesh exit point on vid: %d to %pM.\n",
260 vid, new_neigh->addr);
261
262 if (curr_neigh)
263 softif_neigh_free_ref(curr_neigh);
264
265 spin_unlock_bh(&bat_priv->softif_neigh_lock);
266
267out:
268 if (softif_neigh_vid)
269 softif_neigh_vid_free_ref(softif_neigh_vid);
270}
271
272static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
273 struct softif_neigh_vid *softif_neigh_vid)
274{
275 struct softif_neigh *curr_neigh;
276 struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
277 struct hard_iface *primary_if = NULL;
278 struct hlist_node *node;
279
280 primary_if = primary_if_get_selected(bat_priv);
281 if (!primary_if)
282 goto out;
283
284 /* find new softif_neigh immediately to avoid temporary loops */
285 rcu_read_lock();
286 curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
287
288 hlist_for_each_entry_rcu(softif_neigh_tmp, node,
289 &softif_neigh_vid->softif_neigh_list,
290 list) {
291 if (softif_neigh_tmp == curr_neigh)
292 continue;
293
294 /* we got a neighbor but its mac is 'bigger' than ours */
295 if (memcmp(primary_if->net_dev->dev_addr,
296 softif_neigh_tmp->addr, ETH_ALEN) < 0)
297 continue;
298
299 if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
300 continue;
301
302 softif_neigh = softif_neigh_tmp;
303 goto unlock;
304 }
305
306unlock:
307 rcu_read_unlock();
308out:
309 softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
310
311 if (primary_if)
312 hardif_free_ref(primary_if);
313 if (softif_neigh)
314 softif_neigh_free_ref(softif_neigh);
315}
316
317int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
318{
319 struct net_device *net_dev = (struct net_device *)seq->private;
320 struct bat_priv *bat_priv = netdev_priv(net_dev);
321 struct softif_neigh_vid *softif_neigh_vid;
322 struct softif_neigh *softif_neigh;
323 struct hard_iface *primary_if;
324 struct hlist_node *node, *node_tmp;
325 struct softif_neigh *curr_softif_neigh;
326 int ret = 0, last_seen_secs, last_seen_msecs;
327
328 primary_if = primary_if_get_selected(bat_priv);
329 if (!primary_if) {
330 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
331 "please specify interfaces to enable it\n",
332 net_dev->name);
333 goto out;
334 }
335
336 if (primary_if->if_status != IF_ACTIVE) {
337 ret = seq_printf(seq, "BATMAN mesh %s "
338 "disabled - primary interface not active\n",
339 net_dev->name);
340 goto out;
341 }
342
343 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
344
345 rcu_read_lock();
346 hlist_for_each_entry_rcu(softif_neigh_vid, node,
347 &bat_priv->softif_neigh_vids, list) {
348 seq_printf(seq, " %-15s %s on vid: %d\n",
349 "Originator", "last-seen", softif_neigh_vid->vid);
350
351 curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
352
353 hlist_for_each_entry_rcu(softif_neigh, node_tmp,
354 &softif_neigh_vid->softif_neigh_list,
355 list) {
356 last_seen_secs = jiffies_to_msecs(jiffies -
357 softif_neigh->last_seen) / 1000;
358 last_seen_msecs = jiffies_to_msecs(jiffies -
359 softif_neigh->last_seen) % 1000;
360 seq_printf(seq, "%s %pM %3i.%03is\n",
361 curr_softif_neigh == softif_neigh
362 ? "=>" : " ", softif_neigh->addr,
363 last_seen_secs, last_seen_msecs);
364 }
365
366 if (curr_softif_neigh)
367 softif_neigh_free_ref(curr_softif_neigh);
368
369 seq_printf(seq, "\n");
370 }
371 rcu_read_unlock();
372
373out:
374 if (primary_if)
375 hardif_free_ref(primary_if);
376 return ret;
377}
378
379void softif_neigh_purge(struct bat_priv *bat_priv)
380{
381 struct softif_neigh *softif_neigh, *curr_softif_neigh;
382 struct softif_neigh_vid *softif_neigh_vid;
383 struct hlist_node *node, *node_tmp, *node_tmp2;
384 int do_deselect;
385
386 rcu_read_lock();
387 hlist_for_each_entry_rcu(softif_neigh_vid, node,
388 &bat_priv->softif_neigh_vids, list) {
389 if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
390 continue;
391
392 curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
393 do_deselect = 0;
394
395 spin_lock_bh(&bat_priv->softif_neigh_lock);
396 hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
397 &softif_neigh_vid->softif_neigh_list,
398 list) {
399 if ((!time_after(jiffies, softif_neigh->last_seen +
400 msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) &&
401 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
402 continue;
403
404 if (curr_softif_neigh == softif_neigh) {
405 bat_dbg(DBG_ROUTES, bat_priv,
406 "Current mesh exit point on vid: %d "
407 "'%pM' vanished.\n",
408 softif_neigh_vid->vid,
409 softif_neigh->addr);
410 do_deselect = 1;
411 }
412
413 hlist_del_rcu(&softif_neigh->list);
414 softif_neigh_free_ref(softif_neigh);
415 }
416 spin_unlock_bh(&bat_priv->softif_neigh_lock);
417
418 /* soft_neigh_vid_deselect() needs to acquire the
419 * softif_neigh_lock */
420 if (do_deselect)
421 softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
422
423 if (curr_softif_neigh)
424 softif_neigh_free_ref(curr_softif_neigh);
425
426 softif_neigh_vid_free_ref(softif_neigh_vid);
427 }
428 rcu_read_unlock();
429
430 spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
431 hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
432 &bat_priv->softif_neigh_vids, list) {
433 if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
434 continue;
435
436 hlist_del_rcu(&softif_neigh_vid->list);
437 softif_neigh_vid_free_ref(softif_neigh_vid);
438 }
439 spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
440
441}
442
443static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
444 short vid)
445{
446 struct bat_priv *bat_priv = netdev_priv(dev);
447 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
448 struct batman_ogm_packet *batman_ogm_packet;
449 struct softif_neigh *softif_neigh = NULL;
450 struct hard_iface *primary_if = NULL;
451 struct softif_neigh *curr_softif_neigh = NULL;
452
453 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
454 batman_ogm_packet = (struct batman_ogm_packet *)
455 (skb->data + ETH_HLEN + VLAN_HLEN);
456 else
457 batman_ogm_packet = (struct batman_ogm_packet *)
458 (skb->data + ETH_HLEN);
459
460 if (batman_ogm_packet->version != COMPAT_VERSION)
461 goto out;
462
463 if (batman_ogm_packet->packet_type != BAT_OGM)
464 goto out;
465
466 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
467 goto out;
468
469 if (is_my_mac(batman_ogm_packet->orig))
470 goto out;
471
472 softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
473 if (!softif_neigh)
474 goto out;
475
476 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
477 if (curr_softif_neigh == softif_neigh)
478 goto out;
479
480 primary_if = primary_if_get_selected(bat_priv);
481 if (!primary_if)
482 goto out;
483
484 /* we got a neighbor but its mac is 'bigger' than ours */
485 if (memcmp(primary_if->net_dev->dev_addr,
486 softif_neigh->addr, ETH_ALEN) < 0)
487 goto out;
488
489 /* close own batX device and use softif_neigh as exit node */
490 if (!curr_softif_neigh) {
491 softif_neigh_vid_select(bat_priv, softif_neigh, vid);
492 goto out;
493 }
494
495 /* switch to new 'smallest neighbor' */
496 if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
497 softif_neigh_vid_select(bat_priv, softif_neigh, vid);
498
499out:
500 kfree_skb(skb);
501 if (softif_neigh)
502 softif_neigh_free_ref(softif_neigh);
503 if (curr_softif_neigh)
504 softif_neigh_free_ref(curr_softif_neigh);
505 if (primary_if)
506 hardif_free_ref(primary_if);
507 return;
508}
509
510static int interface_open(struct net_device *dev) 77static int interface_open(struct net_device *dev)
511{ 78{
512 netif_start_queue(dev); 79 netif_start_queue(dev);
@@ -541,6 +108,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
541 } 108 }
542 109
543 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 110 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
111 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
544 return 0; 112 return 0;
545} 113}
546 114
@@ -562,10 +130,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
562 struct hard_iface *primary_if = NULL; 130 struct hard_iface *primary_if = NULL;
563 struct bcast_packet *bcast_packet; 131 struct bcast_packet *bcast_packet;
564 struct vlan_ethhdr *vhdr; 132 struct vlan_ethhdr *vhdr;
565 struct softif_neigh *curr_softif_neigh = NULL; 133 static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
134 0x00};
566 unsigned int header_len = 0; 135 unsigned int header_len = 0;
567 int data_len = skb->len, ret; 136 int data_len = skb->len, ret;
568 short vid = -1; 137 short vid __maybe_unused = -1;
569 bool do_bcast = false; 138 bool do_bcast = false;
570 139
571 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 140 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
@@ -583,21 +152,21 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
583 152
584 /* fall through */ 153 /* fall through */
585 case ETH_P_BATMAN: 154 case ETH_P_BATMAN:
586 softif_batman_recv(skb, soft_iface, vid); 155 goto dropped;
587 goto end;
588 } 156 }
589 157
590 /** 158 if (bla_tx(bat_priv, skb, vid))
591 * if we have a another chosen mesh exit node in range
592 * it will transport the packets to the mesh
593 */
594 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
595 if (curr_softif_neigh)
596 goto dropped; 159 goto dropped;
597 160
598 /* Register the client MAC in the transtable */ 161 /* Register the client MAC in the transtable */
599 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 162 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
600 163
164 /* don't accept stp packets. STP does not help in meshes.
165 * better use the bridge loop avoidance ...
166 */
167 if (compare_eth(ethhdr->h_dest, stp_addr))
168 goto dropped;
169
601 if (is_multicast_ether_addr(ethhdr->h_dest)) { 170 if (is_multicast_ether_addr(ethhdr->h_dest)) {
602 do_bcast = true; 171 do_bcast = true;
603 172
@@ -632,11 +201,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
632 goto dropped; 201 goto dropped;
633 202
634 bcast_packet = (struct bcast_packet *)skb->data; 203 bcast_packet = (struct bcast_packet *)skb->data;
635 bcast_packet->version = COMPAT_VERSION; 204 bcast_packet->header.version = COMPAT_VERSION;
636 bcast_packet->ttl = TTL; 205 bcast_packet->header.ttl = TTL;
637 206
638 /* batman packet type: broadcast */ 207 /* batman packet type: broadcast */
639 bcast_packet->packet_type = BAT_BCAST; 208 bcast_packet->header.packet_type = BAT_BCAST;
640 209
641 /* hw address of first interface is the orig mac because only 210 /* hw address of first interface is the orig mac because only
642 * this mac is known throughout the mesh */ 211 * this mac is known throughout the mesh */
@@ -675,8 +244,6 @@ dropped:
675dropped_freed: 244dropped_freed:
676 bat_priv->stats.tx_dropped++; 245 bat_priv->stats.tx_dropped++;
677end: 246end:
678 if (curr_softif_neigh)
679 softif_neigh_free_ref(curr_softif_neigh);
680 if (primary_if) 247 if (primary_if)
681 hardif_free_ref(primary_if); 248 hardif_free_ref(primary_if);
682 return NETDEV_TX_OK; 249 return NETDEV_TX_OK;
@@ -687,12 +254,9 @@ void interface_rx(struct net_device *soft_iface,
687 int hdr_size) 254 int hdr_size)
688{ 255{
689 struct bat_priv *bat_priv = netdev_priv(soft_iface); 256 struct bat_priv *bat_priv = netdev_priv(soft_iface);
690 struct unicast_packet *unicast_packet;
691 struct ethhdr *ethhdr; 257 struct ethhdr *ethhdr;
692 struct vlan_ethhdr *vhdr; 258 struct vlan_ethhdr *vhdr;
693 struct softif_neigh *curr_softif_neigh = NULL; 259 short vid __maybe_unused = -1;
694 short vid = -1;
695 int ret;
696 260
697 /* check if enough space is available for pulling, and pull */ 261 /* check if enough space is available for pulling, and pull */
698 if (!pskb_may_pull(skb, hdr_size)) 262 if (!pskb_may_pull(skb, hdr_size))
@@ -716,30 +280,6 @@ void interface_rx(struct net_device *soft_iface,
716 goto dropped; 280 goto dropped;
717 } 281 }
718 282
719 /**
720 * if we have a another chosen mesh exit node in range
721 * it will transport the packets to the non-mesh network
722 */
723 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
724 if (curr_softif_neigh) {
725 skb_push(skb, hdr_size);
726 unicast_packet = (struct unicast_packet *)skb->data;
727
728 if ((unicast_packet->packet_type != BAT_UNICAST) &&
729 (unicast_packet->packet_type != BAT_UNICAST_FRAG))
730 goto dropped;
731
732 skb_reset_mac_header(skb);
733
734 memcpy(unicast_packet->dest,
735 curr_softif_neigh->addr, ETH_ALEN);
736 ret = route_unicast_packet(skb, recv_if);
737 if (ret == NET_RX_DROP)
738 goto dropped;
739
740 goto out;
741 }
742
743 /* skb->dev & skb->pkt_type are set here */ 283 /* skb->dev & skb->pkt_type are set here */
744 if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) 284 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
745 goto dropped; 285 goto dropped;
@@ -752,21 +292,25 @@ void interface_rx(struct net_device *soft_iface,
752/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ 292/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
753 293
754 bat_priv->stats.rx_packets++; 294 bat_priv->stats.rx_packets++;
755 bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); 295 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
756 296
757 soft_iface->last_rx = jiffies; 297 soft_iface->last_rx = jiffies;
758 298
759 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) 299 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
760 goto dropped; 300 goto dropped;
761 301
302 /* Let the bridge loop avoidance check the packet. If will
303 * not handle it, we can safely push it up.
304 */
305 if (bla_rx(bat_priv, skb, vid))
306 goto out;
307
762 netif_rx(skb); 308 netif_rx(skb);
763 goto out; 309 goto out;
764 310
765dropped: 311dropped:
766 kfree_skb(skb); 312 kfree_skb(skb);
767out: 313out:
768 if (curr_softif_neigh)
769 softif_neigh_free_ref(curr_softif_neigh);
770 return; 314 return;
771} 315}
772 316
@@ -783,7 +327,6 @@ static const struct net_device_ops bat_netdev_ops = {
783static void interface_setup(struct net_device *dev) 327static void interface_setup(struct net_device *dev)
784{ 328{
785 struct bat_priv *priv = netdev_priv(dev); 329 struct bat_priv *priv = netdev_priv(dev);
786 char dev_addr[ETH_ALEN];
787 330
788 ether_setup(dev); 331 ether_setup(dev);
789 332
@@ -800,8 +343,7 @@ static void interface_setup(struct net_device *dev)
800 dev->hard_header_len = BAT_HEADER_LEN; 343 dev->hard_header_len = BAT_HEADER_LEN;
801 344
802 /* generate random address */ 345 /* generate random address */
803 random_ether_addr(dev_addr); 346 eth_hw_addr_random(dev);
804 memcpy(dev->dev_addr, dev_addr, ETH_ALEN);
805 347
806 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops); 348 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
807 349
@@ -830,13 +372,14 @@ struct net_device *softif_create(const char *name)
830 372
831 atomic_set(&bat_priv->aggregated_ogms, 1); 373 atomic_set(&bat_priv->aggregated_ogms, 1);
832 atomic_set(&bat_priv->bonding, 0); 374 atomic_set(&bat_priv->bonding, 0);
375 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
833 atomic_set(&bat_priv->ap_isolation, 0); 376 atomic_set(&bat_priv->ap_isolation, 0);
834 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); 377 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
835 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); 378 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
836 atomic_set(&bat_priv->gw_sel_class, 20); 379 atomic_set(&bat_priv->gw_sel_class, 20);
837 atomic_set(&bat_priv->gw_bandwidth, 41); 380 atomic_set(&bat_priv->gw_bandwidth, 41);
838 atomic_set(&bat_priv->orig_interval, 1000); 381 atomic_set(&bat_priv->orig_interval, 1000);
839 atomic_set(&bat_priv->hop_penalty, 10); 382 atomic_set(&bat_priv->hop_penalty, 30);
840 atomic_set(&bat_priv->log_level, 0); 383 atomic_set(&bat_priv->log_level, 0);
841 atomic_set(&bat_priv->fragmentation, 1); 384 atomic_set(&bat_priv->fragmentation, 1);
842 atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); 385 atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
@@ -847,6 +390,7 @@ struct net_device *softif_create(const char *name)
847 atomic_set(&bat_priv->ttvn, 0); 390 atomic_set(&bat_priv->ttvn, 0);
848 atomic_set(&bat_priv->tt_local_changes, 0); 391 atomic_set(&bat_priv->tt_local_changes, 0);
849 atomic_set(&bat_priv->tt_ogm_append_cnt, 0); 392 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
393 atomic_set(&bat_priv->bla_num_requests, 0);
850 394
851 bat_priv->tt_buff = NULL; 395 bat_priv->tt_buff = NULL;
852 bat_priv->tt_buff_len = 0; 396 bat_priv->tt_buff_len = 0;
@@ -855,6 +399,10 @@ struct net_device *softif_create(const char *name)
855 bat_priv->primary_if = NULL; 399 bat_priv->primary_if = NULL;
856 bat_priv->num_ifaces = 0; 400 bat_priv->num_ifaces = 0;
857 401
402 ret = bat_algo_select(bat_priv, bat_routing_algo);
403 if (ret < 0)
404 goto unreg_soft_iface;
405
858 ret = sysfs_add_meshif(soft_iface); 406 ret = sysfs_add_meshif(soft_iface);
859 if (ret < 0) 407 if (ret < 0)
860 goto unreg_soft_iface; 408 goto unreg_soft_iface;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 001546fc96f1..020300673884 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -23,8 +23,6 @@
23#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ 23#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
24 24
25int my_skb_head_push(struct sk_buff *skb, unsigned int len); 25int my_skb_head_push(struct sk_buff *skb, unsigned int len);
26int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
27void softif_neigh_purge(struct bat_priv *bat_priv);
28void interface_rx(struct net_device *soft_iface, 26void interface_rx(struct net_device *soft_iface,
29 struct sk_buff *skb, struct hard_iface *recv_if, 27 struct sk_buff *skb, struct hard_iface *recv_if,
30 int hdr_size); 28 int hdr_size);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index ab8dea8b0b2e..a66c2dcd1088 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public 7 * modify it under the terms of version 2 of the GNU General Public
@@ -27,13 +27,14 @@
27#include "hash.h" 27#include "hash.h"
28#include "originator.h" 28#include "originator.h"
29#include "routing.h" 29#include "routing.h"
30#include "bridge_loop_avoidance.h"
30 31
31#include <linux/crc16.h> 32#include <linux/crc16.h>
32 33
33static void _tt_global_del(struct bat_priv *bat_priv, 34static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
34 struct tt_global_entry *tt_global_entry, 35 struct orig_node *orig_node);
35 const char *message);
36static void tt_purge(struct work_struct *work); 36static void tt_purge(struct work_struct *work);
37static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
37 38
38/* returns 1 if they are the same mac addr */ 39/* returns 1 if they are the same mac addr */
39static int compare_tt(const struct hlist_node *node, const void *data2) 40static int compare_tt(const struct hlist_node *node, const void *data2)
@@ -108,14 +109,6 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
108 109
109} 110}
110 111
111static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
112{
113 unsigned long deadline;
114 deadline = starting_time + msecs_to_jiffies(timeout);
115
116 return time_after(jiffies, deadline);
117}
118
119static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) 112static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
120{ 113{
121 if (atomic_dec_and_test(&tt_local_entry->common.refcount)) 114 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
@@ -131,17 +124,31 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu)
131 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, 124 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
132 common); 125 common);
133 126
134 if (tt_global_entry->orig_node)
135 orig_node_free_ref(tt_global_entry->orig_node);
136
137 kfree(tt_global_entry); 127 kfree(tt_global_entry);
138} 128}
139 129
140static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) 130static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
141{ 131{
142 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) 132 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
133 tt_global_del_orig_list(tt_global_entry);
143 call_rcu(&tt_global_entry->common.rcu, 134 call_rcu(&tt_global_entry->common.rcu,
144 tt_global_entry_free_rcu); 135 tt_global_entry_free_rcu);
136 }
137}
138
139static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
140{
141 struct tt_orig_list_entry *orig_entry;
142
143 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
144 atomic_dec(&orig_entry->orig_node->tt_size);
145 orig_node_free_ref(orig_entry->orig_node);
146 kfree(orig_entry);
147}
148
149static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
150{
151 call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
145} 152}
146 153
147static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, 154static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -190,12 +197,17 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
190 struct bat_priv *bat_priv = netdev_priv(soft_iface); 197 struct bat_priv *bat_priv = netdev_priv(soft_iface);
191 struct tt_local_entry *tt_local_entry = NULL; 198 struct tt_local_entry *tt_local_entry = NULL;
192 struct tt_global_entry *tt_global_entry = NULL; 199 struct tt_global_entry *tt_global_entry = NULL;
200 struct hlist_head *head;
201 struct hlist_node *node;
202 struct tt_orig_list_entry *orig_entry;
193 int hash_added; 203 int hash_added;
194 204
195 tt_local_entry = tt_local_hash_find(bat_priv, addr); 205 tt_local_entry = tt_local_hash_find(bat_priv, addr);
196 206
197 if (tt_local_entry) { 207 if (tt_local_entry) {
198 tt_local_entry->last_seen = jiffies; 208 tt_local_entry->last_seen = jiffies;
209 /* possibly unset the TT_CLIENT_PENDING flag */
210 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
199 goto out; 211 goto out;
200 } 212 }
201 213
@@ -218,6 +230,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
218 if (compare_eth(addr, soft_iface->dev_addr)) 230 if (compare_eth(addr, soft_iface->dev_addr))
219 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE; 231 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
220 232
233 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment
235 * (consistency check) */
236 tt_local_entry->common.flags |= TT_CLIENT_NEW;
237
221 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, 238 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
222 &tt_local_entry->common, 239 &tt_local_entry->common,
223 &tt_local_entry->common.hash_entry); 240 &tt_local_entry->common.hash_entry);
@@ -230,24 +247,26 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
230 247
231 tt_local_event(bat_priv, addr, tt_local_entry->common.flags); 248 tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
232 249
233 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment
235 * (consistency check) */
236 tt_local_entry->common.flags |= TT_CLIENT_NEW;
237
238 /* remove address from global hash if present */ 250 /* remove address from global hash if present */
239 tt_global_entry = tt_global_hash_find(bat_priv, addr); 251 tt_global_entry = tt_global_hash_find(bat_priv, addr);
240 252
241 /* Check whether it is a roaming! */ 253 /* Check whether it is a roaming! */
242 if (tt_global_entry) { 254 if (tt_global_entry) {
243 /* This node is probably going to update its tt table */ 255 /* These node are probably going to update their tt table */
244 tt_global_entry->orig_node->tt_poss_change = true; 256 head = &tt_global_entry->orig_list;
245 /* The global entry has to be marked as ROAMING and has to be 257 rcu_read_lock();
246 * kept for consistency purpose */ 258 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
259 orig_entry->orig_node->tt_poss_change = true;
260
261 send_roam_adv(bat_priv, tt_global_entry->common.addr,
262 orig_entry->orig_node);
263 }
264 rcu_read_unlock();
265 /* The global entry has to be marked as ROAMING and
266 * has to be kept for consistency purpose
267 */
247 tt_global_entry->common.flags |= TT_CLIENT_ROAM; 268 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
248 tt_global_entry->roam_at = jiffies; 269 tt_global_entry->roam_at = jiffies;
249 send_roam_adv(bat_priv, tt_global_entry->common.addr,
250 tt_global_entry->orig_node);
251 } 270 }
252out: 271out:
253 if (tt_local_entry) 272 if (tt_local_entry)
@@ -269,7 +288,7 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
269 atomic_set(&bat_priv->tt_local_changes, 0); 288 atomic_set(&bat_priv->tt_local_changes, 0);
270 289
271 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 290 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
272 list) { 291 list) {
273 if (count < tot_changes) { 292 if (count < tot_changes) {
274 memcpy(buff + tt_len(count), 293 memcpy(buff + tt_len(count),
275 &entry->change, sizeof(struct tt_change)); 294 &entry->change, sizeof(struct tt_change));
@@ -317,21 +336,21 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
317 336
318 primary_if = primary_if_get_selected(bat_priv); 337 primary_if = primary_if_get_selected(bat_priv);
319 if (!primary_if) { 338 if (!primary_if) {
320 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 339 ret = seq_printf(seq,
321 "please specify interfaces to enable it\n", 340 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
322 net_dev->name); 341 net_dev->name);
323 goto out; 342 goto out;
324 } 343 }
325 344
326 if (primary_if->if_status != IF_ACTIVE) { 345 if (primary_if->if_status != IF_ACTIVE) {
327 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 346 ret = seq_printf(seq,
328 "primary interface not active\n", 347 "BATMAN mesh %s disabled - primary interface not active\n",
329 net_dev->name); 348 net_dev->name);
330 goto out; 349 goto out;
331 } 350 }
332 351
333 seq_printf(seq, "Locally retrieved addresses (from %s) " 352 seq_printf(seq,
334 "announced via TT (TTVN: %u):\n", 353 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
335 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); 354 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
336 355
337 for (i = 0; i < hash->size; i++) { 356 for (i = 0; i < hash->size; i++) {
@@ -341,17 +360,17 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
341 hlist_for_each_entry_rcu(tt_common_entry, node, 360 hlist_for_each_entry_rcu(tt_common_entry, node,
342 head, hash_entry) { 361 head, hash_entry) {
343 seq_printf(seq, " * %pM [%c%c%c%c%c]\n", 362 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
344 tt_common_entry->addr, 363 tt_common_entry->addr,
345 (tt_common_entry->flags & 364 (tt_common_entry->flags &
346 TT_CLIENT_ROAM ? 'R' : '.'), 365 TT_CLIENT_ROAM ? 'R' : '.'),
347 (tt_common_entry->flags & 366 (tt_common_entry->flags &
348 TT_CLIENT_NOPURGE ? 'P' : '.'), 367 TT_CLIENT_NOPURGE ? 'P' : '.'),
349 (tt_common_entry->flags & 368 (tt_common_entry->flags &
350 TT_CLIENT_NEW ? 'N' : '.'), 369 TT_CLIENT_NEW ? 'N' : '.'),
351 (tt_common_entry->flags & 370 (tt_common_entry->flags &
352 TT_CLIENT_PENDING ? 'X' : '.'), 371 TT_CLIENT_PENDING ? 'X' : '.'),
353 (tt_common_entry->flags & 372 (tt_common_entry->flags &
354 TT_CLIENT_WIFI ? 'W' : '.')); 373 TT_CLIENT_WIFI ? 'W' : '.'));
355 } 374 }
356 rcu_read_unlock(); 375 rcu_read_unlock();
357 } 376 }
@@ -363,7 +382,7 @@ out:
363 382
364static void tt_local_set_pending(struct bat_priv *bat_priv, 383static void tt_local_set_pending(struct bat_priv *bat_priv,
365 struct tt_local_entry *tt_local_entry, 384 struct tt_local_entry *tt_local_entry,
366 uint16_t flags) 385 uint16_t flags, const char *message)
367{ 386{
368 tt_local_event(bat_priv, tt_local_entry->common.addr, 387 tt_local_event(bat_priv, tt_local_entry->common.addr,
369 tt_local_entry->common.flags | flags); 388 tt_local_entry->common.flags | flags);
@@ -372,6 +391,10 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
372 * to be kept in the table in order to send it in a full table 391 * to be kept in the table in order to send it in a full table
373 * response issued before the net ttvn increment (consistency check) */ 392 * response issued before the net ttvn increment (consistency check) */
374 tt_local_entry->common.flags |= TT_CLIENT_PENDING; 393 tt_local_entry->common.flags |= TT_CLIENT_PENDING;
394
395 bat_dbg(DBG_TT, bat_priv,
396 "Local tt entry (%pM) pending to be removed: %s\n",
397 tt_local_entry->common.addr, message);
375} 398}
376 399
377void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, 400void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -384,10 +407,7 @@ void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
384 goto out; 407 goto out;
385 408
386 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL | 409 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
387 (roaming ? TT_CLIENT_ROAM : NO_FLAGS)); 410 (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
388
389 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
390 "%s\n", tt_local_entry->common.addr, message);
391out: 411out:
392 if (tt_local_entry) 412 if (tt_local_entry)
393 tt_local_entry_free_ref(tt_local_entry); 413 tt_local_entry_free_ref(tt_local_entry);
@@ -420,15 +440,12 @@ static void tt_local_purge(struct bat_priv *bat_priv)
420 if (tt_local_entry->common.flags & TT_CLIENT_PENDING) 440 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
421 continue; 441 continue;
422 442
423 if (!is_out_of_time(tt_local_entry->last_seen, 443 if (!has_timed_out(tt_local_entry->last_seen,
424 TT_LOCAL_TIMEOUT * 1000)) 444 TT_LOCAL_TIMEOUT))
425 continue; 445 continue;
426 446
427 tt_local_set_pending(bat_priv, tt_local_entry, 447 tt_local_set_pending(bat_priv, tt_local_entry,
428 TT_CLIENT_DEL); 448 TT_CLIENT_DEL, "timed out");
429 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
430 "pending to be removed: timed out\n",
431 tt_local_entry->common.addr);
432 } 449 }
433 spin_unlock_bh(list_lock); 450 spin_unlock_bh(list_lock);
434 } 451 }
@@ -500,33 +517,76 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
500 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 517 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
501} 518}
502 519
520/* find out if an orig_node is already in the list of a tt_global_entry.
521 * returns 1 if found, 0 otherwise
522 */
523static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
524 const struct orig_node *orig_node)
525{
526 struct tt_orig_list_entry *tmp_orig_entry;
527 const struct hlist_head *head;
528 struct hlist_node *node;
529 bool found = false;
530
531 rcu_read_lock();
532 head = &entry->orig_list;
533 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
534 if (tmp_orig_entry->orig_node == orig_node) {
535 found = true;
536 break;
537 }
538 }
539 rcu_read_unlock();
540 return found;
541}
542
543static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
544 struct orig_node *orig_node,
545 int ttvn)
546{
547 struct tt_orig_list_entry *orig_entry;
548
549 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
550 if (!orig_entry)
551 return;
552
553 INIT_HLIST_NODE(&orig_entry->list);
554 atomic_inc(&orig_node->refcount);
555 atomic_inc(&orig_node->tt_size);
556 orig_entry->orig_node = orig_node;
557 orig_entry->ttvn = ttvn;
558
559 spin_lock_bh(&tt_global_entry->list_lock);
560 hlist_add_head_rcu(&orig_entry->list,
561 &tt_global_entry->orig_list);
562 spin_unlock_bh(&tt_global_entry->list_lock);
563}
564
503/* caller must hold orig_node refcount */ 565/* caller must hold orig_node refcount */
504int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 566int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
505 const unsigned char *tt_addr, uint8_t ttvn, bool roaming, 567 const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
506 bool wifi) 568 bool wifi)
507{ 569{
508 struct tt_global_entry *tt_global_entry; 570 struct tt_global_entry *tt_global_entry = NULL;
509 struct orig_node *orig_node_tmp;
510 int ret = 0; 571 int ret = 0;
511 int hash_added; 572 int hash_added;
512 573
513 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); 574 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
514 575
515 if (!tt_global_entry) { 576 if (!tt_global_entry) {
516 tt_global_entry = 577 tt_global_entry = kzalloc(sizeof(*tt_global_entry),
517 kmalloc(sizeof(*tt_global_entry), 578 GFP_ATOMIC);
518 GFP_ATOMIC);
519 if (!tt_global_entry) 579 if (!tt_global_entry)
520 goto out; 580 goto out;
521 581
522 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); 582 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
583
523 tt_global_entry->common.flags = NO_FLAGS; 584 tt_global_entry->common.flags = NO_FLAGS;
524 atomic_set(&tt_global_entry->common.refcount, 2);
525 /* Assign the new orig_node */
526 atomic_inc(&orig_node->refcount);
527 tt_global_entry->orig_node = orig_node;
528 tt_global_entry->ttvn = ttvn;
529 tt_global_entry->roam_at = 0; 585 tt_global_entry->roam_at = 0;
586 atomic_set(&tt_global_entry->common.refcount, 2);
587
588 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
589 spin_lock_init(&tt_global_entry->list_lock);
530 590
531 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, 591 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
532 choose_orig, &tt_global_entry->common, 592 choose_orig, &tt_global_entry->common,
@@ -537,19 +597,27 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
537 tt_global_entry_free_ref(tt_global_entry); 597 tt_global_entry_free_ref(tt_global_entry);
538 goto out_remove; 598 goto out_remove;
539 } 599 }
540 atomic_inc(&orig_node->tt_size); 600
601 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
541 } else { 602 } else {
542 if (tt_global_entry->orig_node != orig_node) { 603 /* there is already a global entry, use this one. */
543 atomic_dec(&tt_global_entry->orig_node->tt_size); 604
544 orig_node_tmp = tt_global_entry->orig_node; 605 /* If there is the TT_CLIENT_ROAM flag set, there is only one
545 atomic_inc(&orig_node->refcount); 606 * originator left in the list and we previously received a
546 tt_global_entry->orig_node = orig_node; 607 * delete + roaming change for this originator.
547 orig_node_free_ref(orig_node_tmp); 608 *
548 atomic_inc(&orig_node->tt_size); 609 * We should first delete the old originator before adding the
610 * new one.
611 */
612 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
613 tt_global_del_orig_list(tt_global_entry);
614 tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
615 tt_global_entry->roam_at = 0;
549 } 616 }
550 tt_global_entry->common.flags = NO_FLAGS; 617
551 tt_global_entry->ttvn = ttvn; 618 if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
552 tt_global_entry->roam_at = 0; 619 tt_global_add_orig_entry(tt_global_entry, orig_node,
620 ttvn);
553 } 621 }
554 622
555 if (wifi) 623 if (wifi)
@@ -570,6 +638,34 @@ out:
570 return ret; 638 return ret;
571} 639}
572 640
641/* print all orig nodes who announce the address for this global entry.
642 * it is assumed that the caller holds rcu_read_lock();
643 */
644static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
645 struct seq_file *seq)
646{
647 struct hlist_head *head;
648 struct hlist_node *node;
649 struct tt_orig_list_entry *orig_entry;
650 struct tt_common_entry *tt_common_entry;
651 uint16_t flags;
652 uint8_t last_ttvn;
653
654 tt_common_entry = &tt_global_entry->common;
655
656 head = &tt_global_entry->orig_list;
657
658 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
659 flags = tt_common_entry->flags;
660 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
661 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
662 tt_global_entry->common.addr, orig_entry->ttvn,
663 orig_entry->orig_node->orig, last_ttvn,
664 (flags & TT_CLIENT_ROAM ? 'R' : '.'),
665 (flags & TT_CLIENT_WIFI ? 'W' : '.'));
666 }
667}
668
573int tt_global_seq_print_text(struct seq_file *seq, void *offset) 669int tt_global_seq_print_text(struct seq_file *seq, void *offset)
574{ 670{
575 struct net_device *net_dev = (struct net_device *)seq->private; 671 struct net_device *net_dev = (struct net_device *)seq->private;
@@ -585,15 +681,15 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
585 681
586 primary_if = primary_if_get_selected(bat_priv); 682 primary_if = primary_if_get_selected(bat_priv);
587 if (!primary_if) { 683 if (!primary_if) {
588 ret = seq_printf(seq, "BATMAN mesh %s disabled - please " 684 ret = seq_printf(seq,
589 "specify interfaces to enable it\n", 685 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
590 net_dev->name); 686 net_dev->name);
591 goto out; 687 goto out;
592 } 688 }
593 689
594 if (primary_if->if_status != IF_ACTIVE) { 690 if (primary_if->if_status != IF_ACTIVE) {
595 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 691 ret = seq_printf(seq,
596 "primary interface not active\n", 692 "BATMAN mesh %s disabled - primary interface not active\n",
597 net_dev->name); 693 net_dev->name);
598 goto out; 694 goto out;
599 } 695 }
@@ -613,20 +709,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
613 tt_global_entry = container_of(tt_common_entry, 709 tt_global_entry = container_of(tt_common_entry,
614 struct tt_global_entry, 710 struct tt_global_entry,
615 common); 711 common);
616 seq_printf(seq, " * %pM (%3u) via %pM (%3u) " 712 tt_global_print_entry(tt_global_entry, seq);
617 "[%c%c%c]\n",
618 tt_global_entry->common.addr,
619 tt_global_entry->ttvn,
620 tt_global_entry->orig_node->orig,
621 (uint8_t) atomic_read(
622 &tt_global_entry->orig_node->
623 last_ttvn),
624 (tt_global_entry->common.flags &
625 TT_CLIENT_ROAM ? 'R' : '.'),
626 (tt_global_entry->common.flags &
627 TT_CLIENT_PENDING ? 'X' : '.'),
628 (tt_global_entry->common.flags &
629 TT_CLIENT_WIFI ? 'W' : '.'));
630 } 713 }
631 rcu_read_unlock(); 714 rcu_read_unlock();
632 } 715 }
@@ -636,30 +719,107 @@ out:
636 return ret; 719 return ret;
637} 720}
638 721
639static void _tt_global_del(struct bat_priv *bat_priv, 722/* deletes the orig list of a tt_global_entry */
640 struct tt_global_entry *tt_global_entry, 723static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
641 const char *message)
642{ 724{
643 if (!tt_global_entry) 725 struct hlist_head *head;
644 goto out; 726 struct hlist_node *node, *safe;
727 struct tt_orig_list_entry *orig_entry;
645 728
646 bat_dbg(DBG_TT, bat_priv, 729 spin_lock_bh(&tt_global_entry->list_lock);
647 "Deleting global tt entry %pM (via %pM): %s\n", 730 head = &tt_global_entry->orig_list;
648 tt_global_entry->common.addr, tt_global_entry->orig_node->orig, 731 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
649 message); 732 hlist_del_rcu(node);
733 tt_orig_list_entry_free_ref(orig_entry);
734 }
735 spin_unlock_bh(&tt_global_entry->list_lock);
736
737}
738
739static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
740 struct tt_global_entry *tt_global_entry,
741 struct orig_node *orig_node,
742 const char *message)
743{
744 struct hlist_head *head;
745 struct hlist_node *node, *safe;
746 struct tt_orig_list_entry *orig_entry;
747
748 spin_lock_bh(&tt_global_entry->list_lock);
749 head = &tt_global_entry->orig_list;
750 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
751 if (orig_entry->orig_node == orig_node) {
752 bat_dbg(DBG_TT, bat_priv,
753 "Deleting %pM from global tt entry %pM: %s\n",
754 orig_node->orig, tt_global_entry->common.addr,
755 message);
756 hlist_del_rcu(node);
757 tt_orig_list_entry_free_ref(orig_entry);
758 }
759 }
760 spin_unlock_bh(&tt_global_entry->list_lock);
761}
650 762
651 atomic_dec(&tt_global_entry->orig_node->tt_size); 763static void tt_global_del_struct(struct bat_priv *bat_priv,
764 struct tt_global_entry *tt_global_entry,
765 const char *message)
766{
767 bat_dbg(DBG_TT, bat_priv,
768 "Deleting global tt entry %pM: %s\n",
769 tt_global_entry->common.addr, message);
652 770
653 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, 771 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
654 tt_global_entry->common.addr); 772 tt_global_entry->common.addr);
655out: 773 tt_global_entry_free_ref(tt_global_entry);
656 if (tt_global_entry) 774
657 tt_global_entry_free_ref(tt_global_entry); 775}
776
777/* If the client is to be deleted, we check if it is the last origantor entry
778 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
779 * otherwise we simply remove the originator scheduled for deletion.
780 */
781static void tt_global_del_roaming(struct bat_priv *bat_priv,
782 struct tt_global_entry *tt_global_entry,
783 struct orig_node *orig_node,
784 const char *message)
785{
786 bool last_entry = true;
787 struct hlist_head *head;
788 struct hlist_node *node;
789 struct tt_orig_list_entry *orig_entry;
790
791 /* no local entry exists, case 1:
792 * Check if this is the last one or if other entries exist.
793 */
794
795 rcu_read_lock();
796 head = &tt_global_entry->orig_list;
797 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
798 if (orig_entry->orig_node != orig_node) {
799 last_entry = false;
800 break;
801 }
802 }
803 rcu_read_unlock();
804
805 if (last_entry) {
806 /* its the last one, mark for roaming. */
807 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
808 tt_global_entry->roam_at = jiffies;
809 } else
810 /* there is another entry, we can simply delete this
811 * one and can still use the other one.
812 */
813 tt_global_del_orig_entry(bat_priv, tt_global_entry,
814 orig_node, message);
658} 815}
659 816
660void tt_global_del(struct bat_priv *bat_priv, 817
661 struct orig_node *orig_node, const unsigned char *addr, 818
662 const char *message, bool roaming) 819static void tt_global_del(struct bat_priv *bat_priv,
820 struct orig_node *orig_node,
821 const unsigned char *addr,
822 const char *message, bool roaming)
663{ 823{
664 struct tt_global_entry *tt_global_entry = NULL; 824 struct tt_global_entry *tt_global_entry = NULL;
665 struct tt_local_entry *tt_local_entry = NULL; 825 struct tt_local_entry *tt_local_entry = NULL;
@@ -668,26 +828,42 @@ void tt_global_del(struct bat_priv *bat_priv,
668 if (!tt_global_entry) 828 if (!tt_global_entry)
669 goto out; 829 goto out;
670 830
671 if (tt_global_entry->orig_node == orig_node) { 831 if (!roaming) {
672 if (roaming) { 832 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
673 /* if we are deleting a global entry due to a roam 833 message);
674 * event, there are two possibilities: 834
675 * 1) the client roamed from node A to node B => we mark 835 if (hlist_empty(&tt_global_entry->orig_list))
676 * it with TT_CLIENT_ROAM, we start a timer and we 836 tt_global_del_struct(bat_priv, tt_global_entry,
677 * wait for node B to claim it. In case of timeout 837 message);
678 * the entry is purged. 838
679 * 2) the client roamed to us => we can directly delete 839 goto out;
680 * the global entry, since it is useless now. */
681 tt_local_entry = tt_local_hash_find(bat_priv,
682 tt_global_entry->common.addr);
683 if (!tt_local_entry) {
684 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
685 tt_global_entry->roam_at = jiffies;
686 goto out;
687 }
688 }
689 _tt_global_del(bat_priv, tt_global_entry, message);
690 } 840 }
841
842 /* if we are deleting a global entry due to a roam
843 * event, there are two possibilities:
844 * 1) the client roamed from node A to node B => if there
845 * is only one originator left for this client, we mark
846 * it with TT_CLIENT_ROAM, we start a timer and we
847 * wait for node B to claim it. In case of timeout
848 * the entry is purged.
849 *
850 * If there are other originators left, we directly delete
851 * the originator.
852 * 2) the client roamed to us => we can directly delete
853 * the global entry, since it is useless now. */
854
855 tt_local_entry = tt_local_hash_find(bat_priv,
856 tt_global_entry->common.addr);
857 if (tt_local_entry) {
858 /* local entry exists, case 2: client roamed to us. */
859 tt_global_del_orig_list(tt_global_entry);
860 tt_global_del_struct(bat_priv, tt_global_entry, message);
861 } else
862 /* no local entry exists, case 1: check for roaming */
863 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
864 message);
865
866
691out: 867out:
692 if (tt_global_entry) 868 if (tt_global_entry)
693 tt_global_entry_free_ref(tt_global_entry); 869 tt_global_entry_free_ref(tt_global_entry);
@@ -715,16 +891,18 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
715 891
716 spin_lock_bh(list_lock); 892 spin_lock_bh(list_lock);
717 hlist_for_each_entry_safe(tt_common_entry, node, safe, 893 hlist_for_each_entry_safe(tt_common_entry, node, safe,
718 head, hash_entry) { 894 head, hash_entry) {
719 tt_global_entry = container_of(tt_common_entry, 895 tt_global_entry = container_of(tt_common_entry,
720 struct tt_global_entry, 896 struct tt_global_entry,
721 common); 897 common);
722 if (tt_global_entry->orig_node == orig_node) { 898
899 tt_global_del_orig_entry(bat_priv, tt_global_entry,
900 orig_node, message);
901
902 if (hlist_empty(&tt_global_entry->orig_list)) {
723 bat_dbg(DBG_TT, bat_priv, 903 bat_dbg(DBG_TT, bat_priv,
724 "Deleting global tt entry %pM " 904 "Deleting global tt entry %pM: %s\n",
725 "(via %pM): %s\n",
726 tt_global_entry->common.addr, 905 tt_global_entry->common.addr,
727 tt_global_entry->orig_node->orig,
728 message); 906 message);
729 hlist_del_rcu(node); 907 hlist_del_rcu(node);
730 tt_global_entry_free_ref(tt_global_entry); 908 tt_global_entry_free_ref(tt_global_entry);
@@ -733,6 +911,7 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
733 spin_unlock_bh(list_lock); 911 spin_unlock_bh(list_lock);
734 } 912 }
735 atomic_set(&orig_node->tt_size, 0); 913 atomic_set(&orig_node->tt_size, 0);
914 orig_node->tt_initialised = false;
736} 915}
737 916
738static void tt_global_roam_purge(struct bat_priv *bat_priv) 917static void tt_global_roam_purge(struct bat_priv *bat_priv)
@@ -757,14 +936,14 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
757 common); 936 common);
758 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM)) 937 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
759 continue; 938 continue;
760 if (!is_out_of_time(tt_global_entry->roam_at, 939 if (!has_timed_out(tt_global_entry->roam_at,
761 TT_CLIENT_ROAM_TIMEOUT * 1000)) 940 TT_CLIENT_ROAM_TIMEOUT))
762 continue; 941 continue;
763 942
764 bat_dbg(DBG_TT, bat_priv, "Deleting global " 943 bat_dbg(DBG_TT, bat_priv,
765 "tt entry (%pM): Roaming timeout\n", 944 "Deleting global tt entry (%pM): Roaming timeout\n",
766 tt_global_entry->common.addr); 945 tt_global_entry->common.addr);
767 atomic_dec(&tt_global_entry->orig_node->tt_size); 946
768 hlist_del_rcu(node); 947 hlist_del_rcu(node);
769 tt_global_entry_free_ref(tt_global_entry); 948 tt_global_entry_free_ref(tt_global_entry);
770 } 949 }
@@ -827,6 +1006,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
827 struct tt_local_entry *tt_local_entry = NULL; 1006 struct tt_local_entry *tt_local_entry = NULL;
828 struct tt_global_entry *tt_global_entry = NULL; 1007 struct tt_global_entry *tt_global_entry = NULL;
829 struct orig_node *orig_node = NULL; 1008 struct orig_node *orig_node = NULL;
1009 struct neigh_node *router = NULL;
1010 struct hlist_head *head;
1011 struct hlist_node *node;
1012 struct tt_orig_list_entry *orig_entry;
1013 int best_tq;
830 1014
831 if (src && atomic_read(&bat_priv->ap_isolation)) { 1015 if (src && atomic_read(&bat_priv->ap_isolation)) {
832 tt_local_entry = tt_local_hash_find(bat_priv, src); 1016 tt_local_entry = tt_local_hash_find(bat_priv, src);
@@ -843,16 +1027,25 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
843 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) 1027 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
844 goto out; 1028 goto out;
845 1029
846 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 1030 best_tq = 0;
847 goto out;
848
849 /* A global client marked as PENDING has already moved from that
850 * originator */
851 if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
852 goto out;
853 1031
854 orig_node = tt_global_entry->orig_node; 1032 rcu_read_lock();
1033 head = &tt_global_entry->orig_list;
1034 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1035 router = orig_node_get_router(orig_entry->orig_node);
1036 if (!router)
1037 continue;
855 1038
1039 if (router->tq_avg > best_tq) {
1040 orig_node = orig_entry->orig_node;
1041 best_tq = router->tq_avg;
1042 }
1043 neigh_node_free_ref(router);
1044 }
1045 /* found anything? */
1046 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1047 orig_node = NULL;
1048 rcu_read_unlock();
856out: 1049out:
857 if (tt_global_entry) 1050 if (tt_global_entry)
858 tt_global_entry_free_ref(tt_global_entry); 1051 tt_global_entry_free_ref(tt_global_entry);
@@ -863,7 +1056,8 @@ out:
863} 1056}
864 1057
865/* Calculates the checksum of the local table of a given orig_node */ 1058/* Calculates the checksum of the local table of a given orig_node */
866uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) 1059static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1060 struct orig_node *orig_node)
867{ 1061{
868 uint16_t total = 0, total_one; 1062 uint16_t total = 0, total_one;
869 struct hashtable_t *hash = bat_priv->tt_global_hash; 1063 struct hashtable_t *hash = bat_priv->tt_global_hash;
@@ -883,20 +1077,26 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
883 tt_global_entry = container_of(tt_common_entry, 1077 tt_global_entry = container_of(tt_common_entry,
884 struct tt_global_entry, 1078 struct tt_global_entry,
885 common); 1079 common);
886 if (compare_eth(tt_global_entry->orig_node, 1080 /* Roaming clients are in the global table for
887 orig_node)) { 1081 * consistency only. They don't have to be
888 /* Roaming clients are in the global table for 1082 * taken into account while computing the
889 * consistency only. They don't have to be 1083 * global crc
890 * taken into account while computing the 1084 */
891 * global crc */ 1085 if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
892 if (tt_common_entry->flags & TT_CLIENT_ROAM) 1086 continue;
893 continue; 1087
894 total_one = 0; 1088 /* find out if this global entry is announced by this
895 for (j = 0; j < ETH_ALEN; j++) 1089 * originator
896 total_one = crc16_byte(total_one, 1090 */
897 tt_common_entry->addr[j]); 1091 if (!tt_global_entry_has_orig(tt_global_entry,
898 total ^= total_one; 1092 orig_node))
899 } 1093 continue;
1094
1095 total_one = 0;
1096 for (j = 0; j < ETH_ALEN; j++)
1097 total_one = crc16_byte(total_one,
1098 tt_global_entry->common.addr[j]);
1099 total ^= total_one;
900 } 1100 }
901 rcu_read_unlock(); 1101 rcu_read_unlock();
902 } 1102 }
@@ -951,8 +1151,10 @@ static void tt_req_list_free(struct bat_priv *bat_priv)
951 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1151 spin_unlock_bh(&bat_priv->tt_req_list_lock);
952} 1152}
953 1153
954void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, 1154static void tt_save_orig_buffer(struct bat_priv *bat_priv,
955 const unsigned char *tt_buff, uint8_t tt_num_changes) 1155 struct orig_node *orig_node,
1156 const unsigned char *tt_buff,
1157 uint8_t tt_num_changes)
956{ 1158{
957 uint16_t tt_buff_len = tt_len(tt_num_changes); 1159 uint16_t tt_buff_len = tt_len(tt_num_changes);
958 1160
@@ -977,8 +1179,7 @@ static void tt_req_purge(struct bat_priv *bat_priv)
977 1179
978 spin_lock_bh(&bat_priv->tt_req_list_lock); 1180 spin_lock_bh(&bat_priv->tt_req_list_lock);
979 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1181 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
980 if (is_out_of_time(node->issued_at, 1182 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
981 TT_REQUEST_TIMEOUT * 1000)) {
982 list_del(&node->list); 1183 list_del(&node->list);
983 kfree(node); 1184 kfree(node);
984 } 1185 }
@@ -996,8 +1197,8 @@ static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
996 spin_lock_bh(&bat_priv->tt_req_list_lock); 1197 spin_lock_bh(&bat_priv->tt_req_list_lock);
997 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { 1198 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
998 if (compare_eth(tt_req_node_tmp, orig_node) && 1199 if (compare_eth(tt_req_node_tmp, orig_node) &&
999 !is_out_of_time(tt_req_node_tmp->issued_at, 1200 !has_timed_out(tt_req_node_tmp->issued_at,
1000 TT_REQUEST_TIMEOUT * 1000)) 1201 TT_REQUEST_TIMEOUT))
1001 goto unlock; 1202 goto unlock;
1002 } 1203 }
1003 1204
@@ -1036,7 +1237,7 @@ static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1036 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, 1237 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1037 common); 1238 common);
1038 1239
1039 return (tt_global_entry->orig_node == orig_node); 1240 return tt_global_entry_has_orig(tt_global_entry, orig_node);
1040} 1241}
1041 1242
1042static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, 1243static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
@@ -1134,13 +1335,13 @@ static int send_tt_request(struct bat_priv *bat_priv,
1134 tt_request = (struct tt_query_packet *)skb_put(skb, 1335 tt_request = (struct tt_query_packet *)skb_put(skb,
1135 sizeof(struct tt_query_packet)); 1336 sizeof(struct tt_query_packet));
1136 1337
1137 tt_request->packet_type = BAT_TT_QUERY; 1338 tt_request->header.packet_type = BAT_TT_QUERY;
1138 tt_request->version = COMPAT_VERSION; 1339 tt_request->header.version = COMPAT_VERSION;
1139 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1340 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1140 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); 1341 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1141 tt_request->ttl = TTL; 1342 tt_request->header.ttl = TTL;
1142 tt_request->ttvn = ttvn; 1343 tt_request->ttvn = ttvn;
1143 tt_request->tt_data = tt_crc; 1344 tt_request->tt_data = htons(tt_crc);
1144 tt_request->flags = TT_REQUEST; 1345 tt_request->flags = TT_REQUEST;
1145 1346
1146 if (full_table) 1347 if (full_table)
@@ -1150,8 +1351,9 @@ static int send_tt_request(struct bat_priv *bat_priv,
1150 if (!neigh_node) 1351 if (!neigh_node)
1151 goto out; 1352 goto out;
1152 1353
1153 bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM " 1354 bat_dbg(DBG_TT, bat_priv,
1154 "[%c]\n", dst_orig_node->orig, neigh_node->addr, 1355 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1356 dst_orig_node->orig, neigh_node->addr,
1155 (full_table ? 'F' : '.')); 1357 (full_table ? 'F' : '.'));
1156 1358
1157 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1359 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
@@ -1188,9 +1390,8 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
1188 struct tt_query_packet *tt_response; 1390 struct tt_query_packet *tt_response;
1189 1391
1190 bat_dbg(DBG_TT, bat_priv, 1392 bat_dbg(DBG_TT, bat_priv,
1191 "Received TT_REQUEST from %pM for " 1393 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1192 "ttvn: %u (%pM) [%c]\n", tt_request->src, 1394 tt_request->src, tt_request->ttvn, tt_request->dst,
1193 tt_request->ttvn, tt_request->dst,
1194 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1395 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1195 1396
1196 /* Let's get the orig node of the REAL destination */ 1397 /* Let's get the orig node of the REAL destination */
@@ -1264,9 +1465,9 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
1264 tt_response = (struct tt_query_packet *)skb->data; 1465 tt_response = (struct tt_query_packet *)skb->data;
1265 } 1466 }
1266 1467
1267 tt_response->packet_type = BAT_TT_QUERY; 1468 tt_response->header.packet_type = BAT_TT_QUERY;
1268 tt_response->version = COMPAT_VERSION; 1469 tt_response->header.version = COMPAT_VERSION;
1269 tt_response->ttl = TTL; 1470 tt_response->header.ttl = TTL;
1270 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); 1471 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1271 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1472 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1272 tt_response->flags = TT_RESPONSE; 1473 tt_response->flags = TT_RESPONSE;
@@ -1315,9 +1516,8 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
1315 struct tt_query_packet *tt_response; 1516 struct tt_query_packet *tt_response;
1316 1517
1317 bat_dbg(DBG_TT, bat_priv, 1518 bat_dbg(DBG_TT, bat_priv,
1318 "Received TT_REQUEST from %pM for " 1519 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1319 "ttvn: %u (me) [%c]\n", tt_request->src, 1520 tt_request->src, tt_request->ttvn,
1320 tt_request->ttvn,
1321 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1521 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1322 1522
1323 1523
@@ -1381,9 +1581,9 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
1381 tt_response = (struct tt_query_packet *)skb->data; 1581 tt_response = (struct tt_query_packet *)skb->data;
1382 } 1582 }
1383 1583
1384 tt_response->packet_type = BAT_TT_QUERY; 1584 tt_response->header.packet_type = BAT_TT_QUERY;
1385 tt_response->version = COMPAT_VERSION; 1585 tt_response->header.version = COMPAT_VERSION;
1386 tt_response->ttl = TTL; 1586 tt_response->header.ttl = TTL;
1387 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1587 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1388 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1588 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1389 tt_response->flags = TT_RESPONSE; 1589 tt_response->flags = TT_RESPONSE;
@@ -1418,10 +1618,15 @@ out:
1418bool send_tt_response(struct bat_priv *bat_priv, 1618bool send_tt_response(struct bat_priv *bat_priv,
1419 struct tt_query_packet *tt_request) 1619 struct tt_query_packet *tt_request)
1420{ 1620{
1421 if (is_my_mac(tt_request->dst)) 1621 if (is_my_mac(tt_request->dst)) {
1622 /* don't answer backbone gws! */
1623 if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1624 return true;
1625
1422 return send_my_tt_response(bat_priv, tt_request); 1626 return send_my_tt_response(bat_priv, tt_request);
1423 else 1627 } else {
1424 return send_other_tt_response(bat_priv, tt_request); 1628 return send_other_tt_response(bat_priv, tt_request);
1629 }
1425} 1630}
1426 1631
1427static void _tt_update_changes(struct bat_priv *bat_priv, 1632static void _tt_update_changes(struct bat_priv *bat_priv,
@@ -1450,6 +1655,7 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
1450 */ 1655 */
1451 return; 1656 return;
1452 } 1657 }
1658 orig_node->tt_initialised = true;
1453} 1659}
1454 1660
1455static void tt_fill_gtable(struct bat_priv *bat_priv, 1661static void tt_fill_gtable(struct bat_priv *bat_priv,
@@ -1519,12 +1725,15 @@ void handle_tt_response(struct bat_priv *bat_priv,
1519 struct tt_req_node *node, *safe; 1725 struct tt_req_node *node, *safe;
1520 struct orig_node *orig_node = NULL; 1726 struct orig_node *orig_node = NULL;
1521 1727
1522 bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for " 1728 bat_dbg(DBG_TT, bat_priv,
1523 "ttvn %d t_size: %d [%c]\n", 1729 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1524 tt_response->src, tt_response->ttvn, 1730 tt_response->src, tt_response->ttvn, tt_response->tt_data,
1525 tt_response->tt_data,
1526 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1731 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1527 1732
1733 /* we should have never asked a backbone gw */
1734 if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1735 goto out;
1736
1528 orig_node = orig_hash_find(bat_priv, tt_response->src); 1737 orig_node = orig_hash_find(bat_priv, tt_response->src);
1529 if (!orig_node) 1738 if (!orig_node)
1530 goto out; 1739 goto out;
@@ -1589,8 +1798,7 @@ static void tt_roam_purge(struct bat_priv *bat_priv)
1589 1798
1590 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1799 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1591 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 1800 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1592 if (!is_out_of_time(node->first_time, 1801 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
1593 ROAMING_MAX_TIME * 1000))
1594 continue; 1802 continue;
1595 1803
1596 list_del(&node->list); 1804 list_del(&node->list);
@@ -1617,8 +1825,7 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv,
1617 if (!compare_eth(tt_roam_node->addr, client)) 1825 if (!compare_eth(tt_roam_node->addr, client))
1618 continue; 1826 continue;
1619 1827
1620 if (is_out_of_time(tt_roam_node->first_time, 1828 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
1621 ROAMING_MAX_TIME * 1000))
1622 continue; 1829 continue;
1623 1830
1624 if (!atomic_dec_not_zero(&tt_roam_node->counter)) 1831 if (!atomic_dec_not_zero(&tt_roam_node->counter))
@@ -1646,8 +1853,8 @@ unlock:
1646 return ret; 1853 return ret;
1647} 1854}
1648 1855
1649void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 1856static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1650 struct orig_node *orig_node) 1857 struct orig_node *orig_node)
1651{ 1858{
1652 struct neigh_node *neigh_node = NULL; 1859 struct neigh_node *neigh_node = NULL;
1653 struct sk_buff *skb = NULL; 1860 struct sk_buff *skb = NULL;
@@ -1669,9 +1876,9 @@ void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1669 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb, 1876 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1670 sizeof(struct roam_adv_packet)); 1877 sizeof(struct roam_adv_packet));
1671 1878
1672 roam_adv_packet->packet_type = BAT_ROAM_ADV; 1879 roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1673 roam_adv_packet->version = COMPAT_VERSION; 1880 roam_adv_packet->header.version = COMPAT_VERSION;
1674 roam_adv_packet->ttl = TTL; 1881 roam_adv_packet->header.ttl = TTL;
1675 primary_if = primary_if_get_selected(bat_priv); 1882 primary_if = primary_if_get_selected(bat_priv);
1676 if (!primary_if) 1883 if (!primary_if)
1677 goto out; 1884 goto out;
@@ -1788,8 +1995,9 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1788 if (!(tt_common_entry->flags & TT_CLIENT_PENDING)) 1995 if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
1789 continue; 1996 continue;
1790 1997
1791 bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry " 1998 bat_dbg(DBG_TT, bat_priv,
1792 "(%pM): pending\n", tt_common_entry->addr); 1999 "Deleting local tt entry (%pM): pending\n",
2000 tt_common_entry->addr);
1793 2001
1794 atomic_dec(&bat_priv->num_local_tt); 2002 atomic_dec(&bat_priv->num_local_tt);
1795 hlist_del_rcu(node); 2003 hlist_del_rcu(node);
@@ -1814,6 +2022,8 @@ void tt_commit_changes(struct bat_priv *bat_priv)
1814 2022
1815 /* Increment the TTVN only once per OGM interval */ 2023 /* Increment the TTVN only once per OGM interval */
1816 atomic_inc(&bat_priv->ttvn); 2024 atomic_inc(&bat_priv->ttvn);
2025 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2026 (uint8_t)atomic_read(&bat_priv->ttvn));
1817 bat_priv->tt_poss_change = false; 2027 bat_priv->tt_poss_change = false;
1818} 2028}
1819 2029
@@ -1854,8 +2064,14 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1854 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 2064 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
1855 bool full_table = true; 2065 bool full_table = true;
1856 2066
1857 /* the ttvn increased by one -> we can apply the attached changes */ 2067 /* don't care about a backbone gateways updates. */
1858 if (ttvn - orig_ttvn == 1) { 2068 if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2069 return;
2070
2071 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2072 * increased by one -> we can apply the attached changes */
2073 if ((!orig_node->tt_initialised && ttvn == 1) ||
2074 ttvn - orig_ttvn == 1) {
1859 /* the OGM could not contain the changes due to their size or 2075 /* the OGM could not contain the changes due to their size or
1860 * because they have already been sent TT_OGM_APPEND_MAX times. 2076 * because they have already been sent TT_OGM_APPEND_MAX times.
1861 * In this case send a tt request */ 2077 * In this case send a tt request */
@@ -1889,17 +2105,36 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1889 } else { 2105 } else {
1890 /* if we missed more than one change or our tables are not 2106 /* if we missed more than one change or our tables are not
1891 * in sync anymore -> request fresh tt data */ 2107 * in sync anymore -> request fresh tt data */
1892 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { 2108
2109 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2110 orig_node->tt_crc != tt_crc) {
1893request_table: 2111request_table:
1894 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. " 2112 bat_dbg(DBG_TT, bat_priv,
1895 "Need to retrieve the correct information " 2113 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
1896 "(ttvn: %u last_ttvn: %u crc: %u last_crc: " 2114 orig_node->orig, ttvn, orig_ttvn, tt_crc,
1897 "%u num_changes: %u)\n", orig_node->orig, ttvn, 2115 orig_node->tt_crc, tt_num_changes);
1898 orig_ttvn, tt_crc, orig_node->tt_crc,
1899 tt_num_changes);
1900 send_tt_request(bat_priv, orig_node, ttvn, tt_crc, 2116 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
1901 full_table); 2117 full_table);
1902 return; 2118 return;
1903 } 2119 }
1904 } 2120 }
1905} 2121}
2122
2123/* returns true whether we know that the client has moved from its old
2124 * originator to another one. This entry is kept is still kept for consistency
2125 * purposes
2126 */
2127bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr)
2128{
2129 struct tt_global_entry *tt_global_entry;
2130 bool ret = false;
2131
2132 tt_global_entry = tt_global_hash_find(bat_priv, addr);
2133 if (!tt_global_entry)
2134 goto out;
2135
2136 ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
2137 tt_global_entry_free_ref(tt_global_entry);
2138out:
2139 return ret;
2140}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 30efd49881a3..c43374dc364d 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public 7 * modify it under the terms of version 2 of the GNU General Public
@@ -39,27 +39,21 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
39int tt_global_seq_print_text(struct seq_file *seq, void *offset); 39int tt_global_seq_print_text(struct seq_file *seq, void *offset);
40void tt_global_del_orig(struct bat_priv *bat_priv, 40void tt_global_del_orig(struct bat_priv *bat_priv,
41 struct orig_node *orig_node, const char *message); 41 struct orig_node *orig_node, const char *message);
42void tt_global_del(struct bat_priv *bat_priv,
43 struct orig_node *orig_node, const unsigned char *addr,
44 const char *message, bool roaming);
45struct orig_node *transtable_search(struct bat_priv *bat_priv, 42struct orig_node *transtable_search(struct bat_priv *bat_priv,
46 const uint8_t *src, const uint8_t *addr); 43 const uint8_t *src, const uint8_t *addr);
47void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
48 const unsigned char *tt_buff, uint8_t tt_num_changes);
49uint16_t tt_local_crc(struct bat_priv *bat_priv); 44uint16_t tt_local_crc(struct bat_priv *bat_priv);
50uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
51void tt_free(struct bat_priv *bat_priv); 45void tt_free(struct bat_priv *bat_priv);
52bool send_tt_response(struct bat_priv *bat_priv, 46bool send_tt_response(struct bat_priv *bat_priv,
53 struct tt_query_packet *tt_request); 47 struct tt_query_packet *tt_request);
54bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); 48bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
55void handle_tt_response(struct bat_priv *bat_priv, 49void handle_tt_response(struct bat_priv *bat_priv,
56 struct tt_query_packet *tt_response); 50 struct tt_query_packet *tt_response);
57void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
58 struct orig_node *orig_node);
59void tt_commit_changes(struct bat_priv *bat_priv); 51void tt_commit_changes(struct bat_priv *bat_priv);
60bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); 52bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
61void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 53void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
62 const unsigned char *tt_buff, uint8_t tt_num_changes, 54 const unsigned char *tt_buff, uint8_t tt_num_changes,
63 uint8_t ttvn, uint16_t tt_crc); 55 uint8_t ttvn, uint16_t tt_crc);
56bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr);
57
64 58
65#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 59#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index e9eb043719ac..61308e8016ff 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -27,7 +27,7 @@
27#include "packet.h" 27#include "packet.h"
28#include "bitarray.h" 28#include "bitarray.h"
29 29
30#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \ 30#define BAT_HEADER_LEN (ETH_HLEN + \
31 ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ 31 ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
32 sizeof(struct unicast_packet) : \ 32 sizeof(struct unicast_packet) : \
33 sizeof(struct bcast_packet)))) 33 sizeof(struct bcast_packet))))
@@ -52,7 +52,7 @@ struct hard_iface {
52/** 52/**
53 * orig_node - structure for orig_list maintaining nodes of mesh 53 * orig_node - structure for orig_list maintaining nodes of mesh
54 * @primary_addr: hosts primary interface address 54 * @primary_addr: hosts primary interface address
55 * @last_valid: when last packet from this node was received 55 * @last_seen: when last packet from this node was received
56 * @bcast_seqno_reset: time when the broadcast seqno window was reset 56 * @bcast_seqno_reset: time when the broadcast seqno window was reset
57 * @batman_seqno_reset: time when the batman seqno window was reset 57 * @batman_seqno_reset: time when the batman seqno window was reset
58 * @gw_flags: flags related to gateway class 58 * @gw_flags: flags related to gateway class
@@ -70,7 +70,7 @@ struct orig_node {
70 struct neigh_node __rcu *router; /* rcu protected pointer */ 70 struct neigh_node __rcu *router; /* rcu protected pointer */
71 unsigned long *bcast_own; 71 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum; 72 uint8_t *bcast_own_sum;
73 unsigned long last_valid; 73 unsigned long last_seen;
74 unsigned long bcast_seqno_reset; 74 unsigned long bcast_seqno_reset;
75 unsigned long batman_seqno_reset; 75 unsigned long batman_seqno_reset;
76 uint8_t gw_flags; 76 uint8_t gw_flags;
@@ -81,6 +81,7 @@ struct orig_node {
81 int16_t tt_buff_len; 81 int16_t tt_buff_len;
82 spinlock_t tt_buff_lock; /* protects tt_buff */ 82 spinlock_t tt_buff_lock; /* protects tt_buff */
83 atomic_t tt_size; 83 atomic_t tt_size;
84 bool tt_initialised;
84 /* The tt_poss_change flag is used to detect an ongoing roaming phase. 85 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
85 * If true, then I sent a Roaming_adv to this orig_node and I have to 86 * If true, then I sent a Roaming_adv to this orig_node and I have to
86 * inspect every packet directed to it to check whether it is still 87 * inspect every packet directed to it to check whether it is still
@@ -89,7 +90,7 @@ struct orig_node {
89 bool tt_poss_change; 90 bool tt_poss_change;
90 uint32_t last_real_seqno; 91 uint32_t last_real_seqno;
91 uint8_t last_ttl; 92 uint8_t last_ttl;
92 unsigned long bcast_bits[NUM_WORDS]; 93 DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
93 uint32_t last_bcast_seqno; 94 uint32_t last_bcast_seqno;
94 struct hlist_head neigh_list; 95 struct hlist_head neigh_list;
95 struct list_head frag_list; 96 struct list_head frag_list;
@@ -119,7 +120,7 @@ struct gw_node {
119 120
120/** 121/**
121 * neigh_node 122 * neigh_node
122 * @last_valid: when last packet via this neighbor was received 123 * @last_seen: when last packet via this neighbor was received
123 */ 124 */
124struct neigh_node { 125struct neigh_node {
125 struct hlist_node list; 126 struct hlist_node list;
@@ -130,15 +131,22 @@ struct neigh_node {
130 uint8_t tq_avg; 131 uint8_t tq_avg;
131 uint8_t last_ttl; 132 uint8_t last_ttl;
132 struct list_head bonding_list; 133 struct list_head bonding_list;
133 unsigned long last_valid; 134 unsigned long last_seen;
134 unsigned long real_bits[NUM_WORDS]; 135 DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
135 atomic_t refcount; 136 atomic_t refcount;
136 struct rcu_head rcu; 137 struct rcu_head rcu;
137 struct orig_node *orig_node; 138 struct orig_node *orig_node;
138 struct hard_iface *if_incoming; 139 struct hard_iface *if_incoming;
139 spinlock_t tq_lock; /* protects: tq_recv, tq_index */ 140 spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */
140}; 141};
141 142
143#ifdef CONFIG_BATMAN_ADV_BLA
144struct bcast_duplist_entry {
145 uint8_t orig[ETH_ALEN];
146 uint16_t crc;
147 unsigned long entrytime;
148};
149#endif
142 150
143struct bat_priv { 151struct bat_priv {
144 atomic_t mesh_state; 152 atomic_t mesh_state;
@@ -147,6 +155,7 @@ struct bat_priv {
147 atomic_t bonding; /* boolean */ 155 atomic_t bonding; /* boolean */
148 atomic_t fragmentation; /* boolean */ 156 atomic_t fragmentation; /* boolean */
149 atomic_t ap_isolation; /* boolean */ 157 atomic_t ap_isolation; /* boolean */
158 atomic_t bridge_loop_avoidance; /* boolean */
150 atomic_t vis_mode; /* VIS_TYPE_* */ 159 atomic_t vis_mode; /* VIS_TYPE_* */
151 atomic_t gw_mode; /* GW_MODE_* */ 160 atomic_t gw_mode; /* GW_MODE_* */
152 atomic_t gw_sel_class; /* uint */ 161 atomic_t gw_sel_class; /* uint */
@@ -160,6 +169,7 @@ struct bat_priv {
160 atomic_t ttvn; /* translation table version number */ 169 atomic_t ttvn; /* translation table version number */
161 atomic_t tt_ogm_append_cnt; 170 atomic_t tt_ogm_append_cnt;
162 atomic_t tt_local_changes; /* changes registered in a OGM interval */ 171 atomic_t tt_local_changes; /* changes registered in a OGM interval */
172 atomic_t bla_num_requests; /* number of bla requests in flight */
163 /* The tt_poss_change flag is used to detect an ongoing roaming phase. 173 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
164 * If true, then I received a Roaming_adv and I have to inspect every 174 * If true, then I received a Roaming_adv and I have to inspect every
165 * packet directed to me to check whether I am still the true 175 * packet directed to me to check whether I am still the true
@@ -173,15 +183,23 @@ struct bat_priv {
173 struct hlist_head forw_bat_list; 183 struct hlist_head forw_bat_list;
174 struct hlist_head forw_bcast_list; 184 struct hlist_head forw_bcast_list;
175 struct hlist_head gw_list; 185 struct hlist_head gw_list;
176 struct hlist_head softif_neigh_vids;
177 struct list_head tt_changes_list; /* tracks changes in a OGM int */ 186 struct list_head tt_changes_list; /* tracks changes in a OGM int */
178 struct list_head vis_send_list; 187 struct list_head vis_send_list;
179 struct hashtable_t *orig_hash; 188 struct hashtable_t *orig_hash;
180 struct hashtable_t *tt_local_hash; 189 struct hashtable_t *tt_local_hash;
181 struct hashtable_t *tt_global_hash; 190 struct hashtable_t *tt_global_hash;
191#ifdef CONFIG_BATMAN_ADV_BLA
192 struct hashtable_t *claim_hash;
193 struct hashtable_t *backbone_hash;
194#endif
182 struct list_head tt_req_list; /* list of pending tt_requests */ 195 struct list_head tt_req_list; /* list of pending tt_requests */
183 struct list_head tt_roam_list; 196 struct list_head tt_roam_list;
184 struct hashtable_t *vis_hash; 197 struct hashtable_t *vis_hash;
198#ifdef CONFIG_BATMAN_ADV_BLA
199 struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
200 int bcast_duplist_curr;
201 struct bla_claim_dst claim_dest;
202#endif
185 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 203 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
186 spinlock_t forw_bcast_list_lock; /* protects */ 204 spinlock_t forw_bcast_list_lock; /* protects */
187 spinlock_t tt_changes_list_lock; /* protects tt_changes */ 205 spinlock_t tt_changes_list_lock; /* protects tt_changes */
@@ -190,8 +208,6 @@ struct bat_priv {
190 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ 208 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
191 spinlock_t vis_hash_lock; /* protects vis_hash */ 209 spinlock_t vis_hash_lock; /* protects vis_hash */
192 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 210 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
193 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
194 spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
195 atomic_t num_local_tt; 211 atomic_t num_local_tt;
196 /* Checksum of the local table, recomputed before sending a new OGM */ 212 /* Checksum of the local table, recomputed before sending a new OGM */
197 atomic_t tt_crc; 213 atomic_t tt_crc;
@@ -201,10 +217,12 @@ struct bat_priv {
201 struct delayed_work tt_work; 217 struct delayed_work tt_work;
202 struct delayed_work orig_work; 218 struct delayed_work orig_work;
203 struct delayed_work vis_work; 219 struct delayed_work vis_work;
220 struct delayed_work bla_work;
204 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 221 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
205 atomic_t gw_reselect; 222 atomic_t gw_reselect;
206 struct hard_iface __rcu *primary_if; /* rcu protected pointer */ 223 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
207 struct vis_info *my_vis_info; 224 struct vis_info *my_vis_info;
225 struct bat_algo_ops *bat_algo_ops;
208}; 226};
209 227
210struct socket_client { 228struct socket_client {
@@ -237,11 +255,42 @@ struct tt_local_entry {
237 255
238struct tt_global_entry { 256struct tt_global_entry {
239 struct tt_common_entry common; 257 struct tt_common_entry common;
258 struct hlist_head orig_list;
259 spinlock_t list_lock; /* protects the list */
260 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
261};
262
263struct tt_orig_list_entry {
240 struct orig_node *orig_node; 264 struct orig_node *orig_node;
241 uint8_t ttvn; 265 uint8_t ttvn;
242 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 266 struct rcu_head rcu;
267 struct hlist_node list;
243}; 268};
244 269
270#ifdef CONFIG_BATMAN_ADV_BLA
271struct backbone_gw {
272 uint8_t orig[ETH_ALEN];
273 short vid; /* used VLAN ID */
274 struct hlist_node hash_entry;
275 struct bat_priv *bat_priv;
276 unsigned long lasttime; /* last time we heard of this backbone gw */
277 atomic_t request_sent;
278 atomic_t refcount;
279 struct rcu_head rcu;
280 uint16_t crc; /* crc checksum over all claims */
281};
282
283struct claim {
284 uint8_t addr[ETH_ALEN];
285 short vid;
286 struct backbone_gw *backbone_gw;
287 unsigned long lasttime; /* last time we heard of claim (locals only) */
288 struct rcu_head rcu;
289 atomic_t refcount;
290 struct hlist_node hash_entry;
291};
292#endif
293
245struct tt_change_node { 294struct tt_change_node {
246 struct list_head list; 295 struct list_head list;
247 struct tt_change change; 296 struct tt_change change;
@@ -325,22 +374,24 @@ struct recvlist_node {
325 uint8_t mac[ETH_ALEN]; 374 uint8_t mac[ETH_ALEN];
326}; 375};
327 376
328struct softif_neigh_vid { 377struct bat_algo_ops {
329 struct hlist_node list; 378 struct hlist_node list;
330 struct bat_priv *bat_priv; 379 char *name;
331 short vid; 380 /* init routing info when hard-interface is enabled */
332 atomic_t refcount; 381 int (*bat_iface_enable)(struct hard_iface *hard_iface);
333 struct softif_neigh __rcu *softif_neigh; 382 /* de-init routing info when hard-interface is disabled */
334 struct rcu_head rcu; 383 void (*bat_iface_disable)(struct hard_iface *hard_iface);
335 struct hlist_head softif_neigh_list; 384 /* (re-)init mac addresses of the protocol information
336}; 385 * belonging to this hard-interface
337 386 */
338struct softif_neigh { 387 void (*bat_iface_update_mac)(struct hard_iface *hard_iface);
339 struct hlist_node list; 388 /* called when primary interface is selected / changed */
340 uint8_t addr[ETH_ALEN]; 389 void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
341 unsigned long last_seen; 390 /* prepare a new outgoing OGM for the send queue */
342 atomic_t refcount; 391 void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
343 struct rcu_head rcu; 392 int tt_num_changes);
393 /* send scheduled OGM */
394 void (*bat_ogm_emit)(struct forw_packet *forw_packet);
344}; 395};
345 396
346#endif /* _NET_BATMAN_ADV_TYPES_H_ */ 397#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 07d1c1da89dd..74175c210858 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Andreas Langer 4 * Andreas Langer
5 * 5 *
@@ -66,8 +66,8 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
66 kfree_skb(tmp_skb); 66 kfree_skb(tmp_skb);
67 67
68 memmove(skb->data + uni_diff, skb->data, hdr_len); 68 memmove(skb->data + uni_diff, skb->data, hdr_len);
69 unicast_packet = (struct unicast_packet *) skb_pull(skb, uni_diff); 69 unicast_packet = (struct unicast_packet *)skb_pull(skb, uni_diff);
70 unicast_packet->packet_type = BAT_UNICAST; 70 unicast_packet->header.packet_type = BAT_UNICAST;
71 71
72 return skb; 72 return skb;
73 73
@@ -238,7 +238,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
238 goto dropped; 238 goto dropped;
239 skb_reserve(frag_skb, ucf_hdr_len); 239 skb_reserve(frag_skb, ucf_hdr_len);
240 240
241 unicast_packet = (struct unicast_packet *) skb->data; 241 unicast_packet = (struct unicast_packet *)skb->data;
242 memcpy(&tmp_uc, unicast_packet, uc_hdr_len); 242 memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
243 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len); 243 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
244 244
@@ -251,9 +251,9 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
251 251
252 memcpy(frag1, &tmp_uc, sizeof(tmp_uc)); 252 memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
253 253
254 frag1->ttl--; 254 frag1->header.ttl--;
255 frag1->version = COMPAT_VERSION; 255 frag1->header.version = COMPAT_VERSION;
256 frag1->packet_type = BAT_UNICAST_FRAG; 256 frag1->header.packet_type = BAT_UNICAST_FRAG;
257 257
258 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 258 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
259 memcpy(frag2, frag1, sizeof(*frag2)); 259 memcpy(frag2, frag1, sizeof(*frag2));
@@ -320,22 +320,30 @@ find_router:
320 320
321 unicast_packet = (struct unicast_packet *)skb->data; 321 unicast_packet = (struct unicast_packet *)skb->data;
322 322
323 unicast_packet->version = COMPAT_VERSION; 323 unicast_packet->header.version = COMPAT_VERSION;
324 /* batman packet type: unicast */ 324 /* batman packet type: unicast */
325 unicast_packet->packet_type = BAT_UNICAST; 325 unicast_packet->header.packet_type = BAT_UNICAST;
326 /* set unicast ttl */ 326 /* set unicast ttl */
327 unicast_packet->ttl = TTL; 327 unicast_packet->header.ttl = TTL;
328 /* copy the destination for faster routing */ 328 /* copy the destination for faster routing */
329 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 329 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
330 /* set the destination tt version number */ 330 /* set the destination tt version number */
331 unicast_packet->ttvn = 331 unicast_packet->ttvn =
332 (uint8_t)atomic_read(&orig_node->last_ttvn); 332 (uint8_t)atomic_read(&orig_node->last_ttvn);
333 333
334 /* inform the destination node that we are still missing a correct route
335 * for this client. The destination will receive this packet and will
336 * try to reroute it because the ttvn contained in the header is less
337 * than the current one
338 */
339 if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
340 unicast_packet->ttvn = unicast_packet->ttvn - 1;
341
334 if (atomic_read(&bat_priv->fragmentation) && 342 if (atomic_read(&bat_priv->fragmentation) &&
335 data_len + sizeof(*unicast_packet) > 343 data_len + sizeof(*unicast_packet) >
336 neigh_node->if_incoming->net_dev->mtu) { 344 neigh_node->if_incoming->net_dev->mtu) {
337 /* send frag skb decreases ttl */ 345 /* send frag skb decreases ttl */
338 unicast_packet->ttl++; 346 unicast_packet->header.ttl++;
339 ret = frag_send_skb(skb, bat_priv, 347 ret = frag_send_skb(skb, bat_priv,
340 neigh_node->if_incoming, neigh_node->addr); 348 neigh_node->if_incoming, neigh_node->addr);
341 goto out; 349 goto out;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 8fd5535544b9..a9faf6b1db19 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Andreas Langer 4 * Andreas Langer
5 * 5 *
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cc3b9f2f3b5d..cec216fb77c7 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich 4 * Simon Wunderlich
5 * 5 *
@@ -434,12 +434,12 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
434 return NULL; 434 return NULL;
435 435
436 info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + 436 info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
437 sizeof(struct ethhdr)); 437 ETH_HLEN);
438 if (!info->skb_packet) { 438 if (!info->skb_packet) {
439 kfree(info); 439 kfree(info);
440 return NULL; 440 return NULL;
441 } 441 }
442 skb_reserve(info->skb_packet, sizeof(struct ethhdr)); 442 skb_reserve(info->skb_packet, ETH_HLEN);
443 packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) 443 packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
444 + vis_info_len); 444 + vis_info_len);
445 445
@@ -617,7 +617,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
617 packet->vis_type = atomic_read(&bat_priv->vis_mode); 617 packet->vis_type = atomic_read(&bat_priv->vis_mode);
618 618
619 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); 619 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
620 packet->ttl = TTL; 620 packet->header.ttl = TTL;
621 packet->seqno = htonl(ntohl(packet->seqno) + 1); 621 packet->seqno = htonl(ntohl(packet->seqno) + 1);
622 packet->entries = 0; 622 packet->entries = 0;
623 skb_trim(info->skb_packet, sizeof(*packet)); 623 skb_trim(info->skb_packet, sizeof(*packet));
@@ -714,8 +714,7 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
714 if (info == bat_priv->my_vis_info) 714 if (info == bat_priv->my_vis_info)
715 continue; 715 continue;
716 716
717 if (time_after(jiffies, 717 if (has_timed_out(info->first_seen, VIS_TIMEOUT)) {
718 info->first_seen + VIS_TIMEOUT * HZ)) {
719 hlist_del(node); 718 hlist_del(node);
720 send_list_del(info); 719 send_list_del(info);
721 kref_put(&info->refcount, free_info); 720 kref_put(&info->refcount, free_info);
@@ -818,19 +817,19 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
818 goto out; 817 goto out;
819 818
820 packet = (struct vis_packet *)info->skb_packet->data; 819 packet = (struct vis_packet *)info->skb_packet->data;
821 if (packet->ttl < 2) { 820 if (packet->header.ttl < 2) {
822 pr_debug("Error - can't send vis packet: ttl exceeded\n"); 821 pr_debug("Error - can't send vis packet: ttl exceeded\n");
823 goto out; 822 goto out;
824 } 823 }
825 824
826 memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); 825 memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
827 packet->ttl--; 826 packet->header.ttl--;
828 827
829 if (is_broadcast_ether_addr(packet->target_orig)) 828 if (is_broadcast_ether_addr(packet->target_orig))
830 broadcast_vis_packet(bat_priv, info); 829 broadcast_vis_packet(bat_priv, info);
831 else 830 else
832 unicast_vis_packet(bat_priv, info); 831 unicast_vis_packet(bat_priv, info);
833 packet->ttl++; /* restore TTL */ 832 packet->header.ttl++; /* restore TTL */
834 833
835out: 834out:
836 if (primary_if) 835 if (primary_if)
@@ -895,11 +894,11 @@ int vis_init(struct bat_priv *bat_priv)
895 894
896 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + 895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
897 MAX_VIS_PACKET_SIZE + 896 MAX_VIS_PACKET_SIZE +
898 sizeof(struct ethhdr)); 897 ETH_HLEN);
899 if (!bat_priv->my_vis_info->skb_packet) 898 if (!bat_priv->my_vis_info->skb_packet)
900 goto free_info; 899 goto free_info;
901 900
902 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); 901 skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
903 packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, 902 packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
904 sizeof(*packet)); 903 sizeof(*packet));
905 904
@@ -910,9 +909,9 @@ int vis_init(struct bat_priv *bat_priv)
910 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); 909 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
911 kref_init(&bat_priv->my_vis_info->refcount); 910 kref_init(&bat_priv->my_vis_info->refcount);
912 bat_priv->my_vis_info->bat_priv = bat_priv; 911 bat_priv->my_vis_info->bat_priv = bat_priv;
913 packet->version = COMPAT_VERSION; 912 packet->header.version = COMPAT_VERSION;
914 packet->packet_type = BAT_VIS; 913 packet->header.packet_type = BAT_VIS;
915 packet->ttl = TTL; 914 packet->header.ttl = TTL;
916 packet->seqno = 0; 915 packet->seqno = 0;
917 packet->entries = 0; 916 packet->entries = 0;
918 917
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 31b820d07f23..ee2e46e5347b 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
@@ -22,7 +22,8 @@
22#ifndef _NET_BATMAN_ADV_VIS_H_ 22#ifndef _NET_BATMAN_ADV_VIS_H_
23#define _NET_BATMAN_ADV_VIS_H_ 23#define _NET_BATMAN_ADV_VIS_H_
24 24
25#define VIS_TIMEOUT 200 /* timeout of vis packets in seconds */ 25#define VIS_TIMEOUT 200000 /* timeout of vis packets
26 * in miliseconds */
26 27
27int vis_seq_print_text(struct seq_file *seq, void *offset); 28int vis_seq_print_text(struct seq_file *seq, void *offset);
28void receive_server_sync_packet(struct bat_priv *bat_priv, 29void receive_server_sync_packet(struct bat_priv *bat_priv,
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 9ec85eb8853d..3537d385035e 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -29,7 +29,6 @@ menuconfig BT
29 BNEP Module (Bluetooth Network Encapsulation Protocol) 29 BNEP Module (Bluetooth Network Encapsulation Protocol)
30 CMTP Module (CAPI Message Transport Protocol) 30 CMTP Module (CAPI Message Transport Protocol)
31 HIDP Module (Human Interface Device Protocol) 31 HIDP Module (Human Interface Device Protocol)
32 SMP Module (Security Manager Protocol)
33 32
34 Say Y here to compile Bluetooth support into the kernel or say M to 33 Say Y here to compile Bluetooth support into the kernel or say M to
35 compile it as module (bluetooth). 34 compile it as module (bluetooth).
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 72eb187a5f60..46e7f86acfc9 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
210 } 210 }
211 211
212 if (sk->sk_state == BT_CONNECTED || !newsock || 212 if (sk->sk_state == BT_CONNECTED || !newsock ||
213 bt_sk(parent)->defer_setup) { 213 test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
214 bt_accept_unlink(sk); 214 bt_accept_unlink(sk);
215 if (newsock) 215 if (newsock)
216 sock_graft(sk, newsock); 216 sock_graft(sk, newsock);
@@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
410 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { 410 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
411 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); 411 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
412 if (sk->sk_state == BT_CONNECTED || 412 if (sk->sk_state == BT_CONNECTED ||
413 (bt_sk(parent)->defer_setup && 413 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
414 sk->sk_state == BT_CONNECT2)) 414 sk->sk_state == BT_CONNECT2))
415 return POLLIN | POLLRDNORM; 415 return POLLIN | POLLRDNORM;
416 } 416 }
417 417
@@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
450 sk->sk_state == BT_CONFIG) 450 sk->sk_state == BT_CONFIG)
451 return mask; 451 return mask;
452 452
453 if (sock_writeable(sk)) 453 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
454 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 454 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
455 else 455 else
456 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 456 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index a779ec703323..031d7d656754 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -69,7 +69,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
69 BT_DBG(""); 69 BT_DBG("");
70 70
71 list_for_each_entry(s, &bnep_session_list, list) 71 list_for_each_entry(s, &bnep_session_list, list)
72 if (!compare_ether_addr(dst, s->eh.h_source)) 72 if (ether_addr_equal(dst, s->eh.h_source))
73 return s; 73 return s;
74 74
75 return NULL; 75 return NULL;
@@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
340 } 340 }
341 341
342 /* Strip 802.1p header */ 342 /* Strip 802.1p header */
343 if (ntohs(s->eh.h_proto) == 0x8100) { 343 if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
344 if (!skb_pull(skb, 4)) 344 if (!skb_pull(skb, 4))
345 goto badframe; 345 goto badframe;
346 s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); 346 s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
@@ -422,10 +422,10 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
422 iv[il++] = (struct kvec) { &type, 1 }; 422 iv[il++] = (struct kvec) { &type, 1 };
423 len++; 423 len++;
424 424
425 if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source)) 425 if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source))
426 type |= 0x01; 426 type |= 0x01;
427 427
428 if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest)) 428 if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest))
429 type |= 0x02; 429 type |= 0x02;
430 430
431 if (type) 431 if (type)
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 17800b1d28ea..180bfc45810d 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -42,7 +42,6 @@
42#include <linux/uaccess.h> 42#include <linux/uaccess.h>
43#include <net/sock.h> 43#include <net/sock.h>
44 44
45#include <asm/system.h>
46 45
47#include "bnep.h" 46#include "bnep.h"
48 47
@@ -143,10 +142,10 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
143{ 142{
144 if (cmd == BNEPGETCONNLIST) { 143 if (cmd == BNEPGETCONNLIST) {
145 struct bnep_connlist_req cl; 144 struct bnep_connlist_req cl;
146 uint32_t uci; 145 u32 uci;
147 int err; 146 int err;
148 147
149 if (get_user(cl.cnum, (uint32_t __user *) arg) || 148 if (get_user(cl.cnum, (u32 __user *) arg) ||
150 get_user(uci, (u32 __user *) (arg + 4))) 149 get_user(uci, (u32 __user *) (arg + 4)))
151 return -EFAULT; 150 return -EFAULT;
152 151
@@ -157,7 +156,7 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
157 156
158 err = bnep_get_connlist(&cl); 157 err = bnep_get_connlist(&cl);
159 158
160 if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) 159 if (!err && put_user(cl.cnum, (u32 __user *) arg))
161 err = -EFAULT; 160 err = -EFAULT;
162 161
163 return err; 162 return err;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 3f2dd5c25ae5..311668d14571 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -39,7 +39,6 @@
39 39
40#include <linux/isdn/capilli.h> 40#include <linux/isdn/capilli.h>
41 41
42#include <asm/system.h>
43 42
44#include "cmtp.h" 43#include "cmtp.h"
45 44
@@ -137,10 +136,10 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
137{ 136{
138 if (cmd == CMTPGETCONNLIST) { 137 if (cmd == CMTPGETCONNLIST) {
139 struct cmtp_connlist_req cl; 138 struct cmtp_connlist_req cl;
140 uint32_t uci; 139 u32 uci;
141 int err; 140 int err;
142 141
143 if (get_user(cl.cnum, (uint32_t __user *) arg) || 142 if (get_user(cl.cnum, (u32 __user *) arg) ||
144 get_user(uci, (u32 __user *) (arg + 4))) 143 get_user(uci, (u32 __user *) (arg + 4)))
145 return -EFAULT; 144 return -EFAULT;
146 145
@@ -151,7 +150,7 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
151 150
152 err = cmtp_get_connlist(&cl); 151 err = cmtp_get_connlist(&cl);
153 152
154 if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) 153 if (!err && put_user(cl.cnum, (u32 __user *) arg))
155 err = -EFAULT; 154 err = -EFAULT;
156 155
157 return err; 156 return err;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 07bc69ed9498..3f18a6ed9731 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -35,10 +35,8 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/notifier.h>
39#include <net/sock.h> 38#include <net/sock.h>
40 39
41#include <asm/system.h>
42#include <linux/uaccess.h> 40#include <linux/uaccess.h>
43#include <asm/unaligned.h> 41#include <asm/unaligned.h>
44 42
@@ -51,7 +49,7 @@ static void hci_le_connect(struct hci_conn *conn)
51 struct hci_cp_le_create_conn cp; 49 struct hci_cp_le_create_conn cp;
52 50
53 conn->state = BT_CONNECT; 51 conn->state = BT_CONNECT;
54 conn->out = 1; 52 conn->out = true;
55 conn->link_mode |= HCI_LM_MASTER; 53 conn->link_mode |= HCI_LM_MASTER;
56 conn->sec_level = BT_SECURITY_LOW; 54 conn->sec_level = BT_SECURITY_LOW;
57 55
@@ -80,10 +78,10 @@ void hci_acl_connect(struct hci_conn *conn)
80 struct inquiry_entry *ie; 78 struct inquiry_entry *ie;
81 struct hci_cp_create_conn cp; 79 struct hci_cp_create_conn cp;
82 80
83 BT_DBG("%p", conn); 81 BT_DBG("hcon %p", conn);
84 82
85 conn->state = BT_CONNECT; 83 conn->state = BT_CONNECT;
86 conn->out = 1; 84 conn->out = true;
87 85
88 conn->link_mode = HCI_LM_MASTER; 86 conn->link_mode = HCI_LM_MASTER;
89 87
@@ -105,7 +103,8 @@ void hci_acl_connect(struct hci_conn *conn)
105 } 103 }
106 104
107 memcpy(conn->dev_class, ie->data.dev_class, 3); 105 memcpy(conn->dev_class, ie->data.dev_class, 3);
108 conn->ssp_mode = ie->data.ssp_mode; 106 if (ie->data.ssp_mode > 0)
107 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
109 } 108 }
110 109
111 cp.pkt_type = cpu_to_le16(conn->pkt_type); 110 cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -151,7 +150,7 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
151 BT_DBG("%p", conn); 150 BT_DBG("%p", conn);
152 151
153 conn->state = BT_CONNECT; 152 conn->state = BT_CONNECT;
154 conn->out = 1; 153 conn->out = true;
155 154
156 conn->attempt++; 155 conn->attempt++;
157 156
@@ -169,7 +168,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
169 BT_DBG("%p", conn); 168 BT_DBG("%p", conn);
170 169
171 conn->state = BT_CONNECT; 170 conn->state = BT_CONNECT;
172 conn->out = 1; 171 conn->out = true;
173 172
174 conn->attempt++; 173 conn->attempt++;
175 174
@@ -224,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
224} 223}
225EXPORT_SYMBOL(hci_le_start_enc); 224EXPORT_SYMBOL(hci_le_start_enc);
226 225
227void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
228{
229 struct hci_dev *hdev = conn->hdev;
230 struct hci_cp_le_ltk_reply cp;
231
232 BT_DBG("%p", conn);
233
234 memset(&cp, 0, sizeof(cp));
235
236 cp.handle = cpu_to_le16(conn->handle);
237 memcpy(cp.ltk, ltk, sizeof(ltk));
238
239 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
240}
241EXPORT_SYMBOL(hci_le_ltk_reply);
242
243void hci_le_ltk_neg_reply(struct hci_conn *conn)
244{
245 struct hci_dev *hdev = conn->hdev;
246 struct hci_cp_le_ltk_neg_reply cp;
247
248 BT_DBG("%p", conn);
249
250 memset(&cp, 0, sizeof(cp));
251
252 cp.handle = cpu_to_le16(conn->handle);
253
254 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
255}
256
257/* Device _must_ be locked */ 226/* Device _must_ be locked */
258void hci_sco_setup(struct hci_conn *conn, __u8 status) 227void hci_sco_setup(struct hci_conn *conn, __u8 status)
259{ 228{
@@ -279,16 +248,13 @@ static void hci_conn_timeout(struct work_struct *work)
279{ 248{
280 struct hci_conn *conn = container_of(work, struct hci_conn, 249 struct hci_conn *conn = container_of(work, struct hci_conn,
281 disc_work.work); 250 disc_work.work);
282 struct hci_dev *hdev = conn->hdev;
283 __u8 reason; 251 __u8 reason;
284 252
285 BT_DBG("conn %p state %d", conn, conn->state); 253 BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
286 254
287 if (atomic_read(&conn->refcnt)) 255 if (atomic_read(&conn->refcnt))
288 return; 256 return;
289 257
290 hci_dev_lock(hdev);
291
292 switch (conn->state) { 258 switch (conn->state) {
293 case BT_CONNECT: 259 case BT_CONNECT:
294 case BT_CONNECT2: 260 case BT_CONNECT2:
@@ -308,8 +274,6 @@ static void hci_conn_timeout(struct work_struct *work)
308 conn->state = BT_CLOSED; 274 conn->state = BT_CLOSED;
309 break; 275 break;
310 } 276 }
311
312 hci_dev_unlock(hdev);
313} 277}
314 278
315/* Enter sniff mode */ 279/* Enter sniff mode */
@@ -337,7 +301,7 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
337 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 301 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
338 } 302 }
339 303
340 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 304 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
341 struct hci_cp_sniff_mode cp; 305 struct hci_cp_sniff_mode cp;
342 cp.handle = cpu_to_le16(conn->handle); 306 cp.handle = cpu_to_le16(conn->handle);
343 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 307 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
@@ -372,7 +336,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
372 336
373 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 337 BT_DBG("%s dst %s", hdev->name, batostr(dst));
374 338
375 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC); 339 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
376 if (!conn) 340 if (!conn)
377 return NULL; 341 return NULL;
378 342
@@ -386,7 +350,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
386 conn->remote_auth = 0xff; 350 conn->remote_auth = 0xff;
387 conn->key_type = 0xff; 351 conn->key_type = 0xff;
388 352
389 conn->power_save = 1; 353 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
390 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 354 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
391 355
392 switch (type) { 356 switch (type) {
@@ -407,7 +371,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
407 371
408 skb_queue_head_init(&conn->data_q); 372 skb_queue_head_init(&conn->data_q);
409 373
410 INIT_LIST_HEAD(&conn->chan_list);; 374 INIT_LIST_HEAD(&conn->chan_list);
411 375
412 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 376 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
413 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 377 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
@@ -519,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route);
519 483
520/* Create SCO, ACL or LE connection. 484/* Create SCO, ACL or LE connection.
521 * Device _must_ be locked */ 485 * Device _must_ be locked */
522struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 486struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
487 __u8 dst_type, __u8 sec_level, __u8 auth_type)
523{ 488{
524 struct hci_conn *acl; 489 struct hci_conn *acl;
525 struct hci_conn *sco; 490 struct hci_conn *sco;
@@ -528,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
528 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 493 BT_DBG("%s dst %s", hdev->name, batostr(dst));
529 494
530 if (type == LE_LINK) { 495 if (type == LE_LINK) {
531 struct adv_entry *entry;
532
533 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 496 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
534 if (le) 497 if (!le) {
535 return ERR_PTR(-EBUSY); 498 le = hci_conn_add(hdev, LE_LINK, dst);
536 499 if (!le)
537 entry = hci_find_adv_entry(hdev, dst); 500 return ERR_PTR(-ENOMEM);
538 if (!entry)
539 return ERR_PTR(-EHOSTUNREACH);
540
541 le = hci_conn_add(hdev, LE_LINK, dst);
542 if (!le)
543 return ERR_PTR(-ENOMEM);
544 501
545 le->dst_type = entry->bdaddr_type; 502 le->dst_type = bdaddr_to_le(dst_type);
503 hci_le_connect(le);
504 }
546 505
547 hci_le_connect(le); 506 le->pending_sec_level = sec_level;
507 le->auth_type = auth_type;
548 508
549 hci_conn_hold(le); 509 hci_conn_hold(le);
550 510
@@ -555,7 +515,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
555 if (!acl) { 515 if (!acl) {
556 acl = hci_conn_add(hdev, ACL_LINK, dst); 516 acl = hci_conn_add(hdev, ACL_LINK, dst);
557 if (!acl) 517 if (!acl)
558 return NULL; 518 return ERR_PTR(-ENOMEM);
559 } 519 }
560 520
561 hci_conn_hold(acl); 521 hci_conn_hold(acl);
@@ -575,7 +535,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
575 sco = hci_conn_add(hdev, type, dst); 535 sco = hci_conn_add(hdev, type, dst);
576 if (!sco) { 536 if (!sco) {
577 hci_conn_put(acl); 537 hci_conn_put(acl);
578 return NULL; 538 return ERR_PTR(-ENOMEM);
579 } 539 }
580 } 540 }
581 541
@@ -586,12 +546,12 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
586 546
587 if (acl->state == BT_CONNECTED && 547 if (acl->state == BT_CONNECTED &&
588 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 548 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
589 acl->power_save = 1; 549 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
590 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 550 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
591 551
592 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) { 552 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
593 /* defer SCO setup until mode change completed */ 553 /* defer SCO setup until mode change completed */
594 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend); 554 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
595 return sco; 555 return sco;
596 } 556 }
597 557
@@ -607,8 +567,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
607{ 567{
608 BT_DBG("conn %p", conn); 568 BT_DBG("conn %p", conn);
609 569
610 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 && 570 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
611 !(conn->link_mode & HCI_LM_ENCRYPT))
612 return 0; 571 return 0;
613 572
614 return 1; 573 return 1;
@@ -633,17 +592,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
633 592
634 conn->auth_type = auth_type; 593 conn->auth_type = auth_type;
635 594
636 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 595 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
637 struct hci_cp_auth_requested cp; 596 struct hci_cp_auth_requested cp;
638 597
639 /* encrypt must be pending if auth is also pending */ 598 /* encrypt must be pending if auth is also pending */
640 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 599 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
641 600
642 cp.handle = cpu_to_le16(conn->handle); 601 cp.handle = cpu_to_le16(conn->handle);
643 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 602 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
644 sizeof(cp), &cp); 603 sizeof(cp), &cp);
645 if (conn->key_type != 0xff) 604 if (conn->key_type != 0xff)
646 set_bit(HCI_CONN_REAUTH_PEND, &conn->pend); 605 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
647 } 606 }
648 607
649 return 0; 608 return 0;
@@ -654,7 +613,7 @@ static void hci_conn_encrypt(struct hci_conn *conn)
654{ 613{
655 BT_DBG("conn %p", conn); 614 BT_DBG("conn %p", conn);
656 615
657 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 616 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
658 struct hci_cp_set_conn_encrypt cp; 617 struct hci_cp_set_conn_encrypt cp;
659 cp.handle = cpu_to_le16(conn->handle); 618 cp.handle = cpu_to_le16(conn->handle);
660 cp.encrypt = 0x01; 619 cp.encrypt = 0x01;
@@ -674,8 +633,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
674 633
675 /* For non 2.1 devices and low security level we don't need the link 634 /* For non 2.1 devices and low security level we don't need the link
676 key. */ 635 key. */
677 if (sec_level == BT_SECURITY_LOW && 636 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
678 (!conn->ssp_mode || !conn->hdev->ssp_mode))
679 return 1; 637 return 1;
680 638
681 /* For other security levels we need the link key. */ 639 /* For other security levels we need the link key. */
@@ -704,7 +662,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
704 goto encrypt; 662 goto encrypt;
705 663
706auth: 664auth:
707 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 665 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
708 return 0; 666 return 0;
709 667
710 if (!hci_conn_auth(conn, sec_level, auth_type)) 668 if (!hci_conn_auth(conn, sec_level, auth_type))
@@ -739,7 +697,7 @@ int hci_conn_change_link_key(struct hci_conn *conn)
739{ 697{
740 BT_DBG("conn %p", conn); 698 BT_DBG("conn %p", conn);
741 699
742 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 700 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
743 struct hci_cp_change_conn_link_key cp; 701 struct hci_cp_change_conn_link_key cp;
744 cp.handle = cpu_to_le16(conn->handle); 702 cp.handle = cpu_to_le16(conn->handle);
745 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 703 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
@@ -758,7 +716,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
758 if (!role && conn->link_mode & HCI_LM_MASTER) 716 if (!role && conn->link_mode & HCI_LM_MASTER)
759 return 1; 717 return 1;
760 718
761 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) { 719 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
762 struct hci_cp_switch_role cp; 720 struct hci_cp_switch_role cp;
763 bacpy(&cp.bdaddr, &conn->dst); 721 bacpy(&cp.bdaddr, &conn->dst);
764 cp.role = role; 722 cp.role = role;
@@ -782,10 +740,10 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
782 if (conn->mode != HCI_CM_SNIFF) 740 if (conn->mode != HCI_CM_SNIFF)
783 goto timer; 741 goto timer;
784 742
785 if (!conn->power_save && !force_active) 743 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
786 goto timer; 744 goto timer;
787 745
788 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 746 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
789 struct hci_cp_exit_sniff_mode cp; 747 struct hci_cp_exit_sniff_mode cp;
790 cp.handle = cpu_to_le16(conn->handle); 748 cp.handle = cpu_to_le16(conn->handle);
791 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); 749 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
@@ -801,11 +759,11 @@ timer:
801void hci_conn_hash_flush(struct hci_dev *hdev) 759void hci_conn_hash_flush(struct hci_dev *hdev)
802{ 760{
803 struct hci_conn_hash *h = &hdev->conn_hash; 761 struct hci_conn_hash *h = &hdev->conn_hash;
804 struct hci_conn *c; 762 struct hci_conn *c, *n;
805 763
806 BT_DBG("hdev %s", hdev->name); 764 BT_DBG("hdev %s", hdev->name);
807 765
808 list_for_each_entry_rcu(c, &h->list, list) { 766 list_for_each_entry_safe(c, n, &h->list, list) {
809 c->state = BT_CLOSED; 767 c->state = BT_CLOSED;
810 768
811 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); 769 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
@@ -950,7 +908,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
950 908
951 BT_DBG("%s conn %p", hdev->name, conn); 909 BT_DBG("%s conn %p", hdev->name, conn);
952 910
953 chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC); 911 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
954 if (!chan) 912 if (!chan)
955 return NULL; 913 return NULL;
956 914
@@ -981,10 +939,10 @@ int hci_chan_del(struct hci_chan *chan)
981 939
982void hci_chan_list_flush(struct hci_conn *conn) 940void hci_chan_list_flush(struct hci_conn *conn)
983{ 941{
984 struct hci_chan *chan; 942 struct hci_chan *chan, *n;
985 943
986 BT_DBG("conn %p", conn); 944 BT_DBG("conn %p", conn);
987 945
988 list_for_each_entry_rcu(chan, &conn->chan_list, list) 946 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
989 hci_chan_del(chan); 947 hci_chan_del(chan);
990} 948}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 5aeb62491198..411ace8e647b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -40,13 +40,11 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/notifier.h>
44#include <linux/rfkill.h> 43#include <linux/rfkill.h>
45#include <linux/timer.h> 44#include <linux/timer.h>
46#include <linux/crypto.h> 45#include <linux/crypto.h>
47#include <net/sock.h> 46#include <net/sock.h>
48 47
49#include <asm/system.h>
50#include <linux/uaccess.h> 48#include <linux/uaccess.h>
51#include <asm/unaligned.h> 49#include <asm/unaligned.h>
52 50
@@ -55,8 +53,6 @@
55 53
56#define AUTO_OFF_TIMEOUT 2000 54#define AUTO_OFF_TIMEOUT 2000
57 55
58bool enable_hs;
59
60static void hci_rx_work(struct work_struct *work); 56static void hci_rx_work(struct work_struct *work);
61static void hci_cmd_work(struct work_struct *work); 57static void hci_cmd_work(struct work_struct *work);
62static void hci_tx_work(struct work_struct *work); 58static void hci_tx_work(struct work_struct *work);
@@ -69,24 +65,11 @@ DEFINE_RWLOCK(hci_dev_list_lock);
69LIST_HEAD(hci_cb_list); 65LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock); 66DEFINE_RWLOCK(hci_cb_list_lock);
71 67
72/* HCI notifiers list */
73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75/* ---- HCI notifications ---- */ 68/* ---- HCI notifications ---- */
76 69
77int hci_register_notifier(struct notifier_block *nb)
78{
79 return atomic_notifier_chain_register(&hci_notifier, nb);
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85}
86
87static void hci_notify(struct hci_dev *hdev, int event) 70static void hci_notify(struct hci_dev *hdev, int event)
88{ 71{
89 atomic_notifier_call_chain(&hci_notifier, event, hdev); 72 hci_sock_dev_event(hdev, event);
90} 73}
91 74
92/* ---- HCI requests ---- */ 75/* ---- HCI requests ---- */
@@ -98,8 +81,29 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 /* If this is the init phase check if the completed command matches 81 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return. 82 * the last init command, and if not just return.
100 */ 83 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) 84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 u16 opcode = __le16_to_cpu(sent->opcode);
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
102 return; 105 return;
106 }
103 107
104 if (hdev->req_status == HCI_REQ_PEND) { 108 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result; 109 hdev->req_result = result;
@@ -248,6 +252,9 @@ static void amp_init(struct hci_dev *hdev)
248 252
249 /* Read Local Version */ 253 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255
256 /* Read Local AMP Info */
257 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
251} 258}
252 259
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 260static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -355,72 +362,208 @@ struct hci_dev *hci_dev_get(int index)
355} 362}
356 363
357/* ---- Inquiry support ---- */ 364/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev) 365
366bool hci_discovery_active(struct hci_dev *hdev)
367{
368 struct discovery_state *discov = &hdev->discovery;
369
370 switch (discov->state) {
371 case DISCOVERY_FINDING:
372 case DISCOVERY_RESOLVING:
373 return true;
374
375 default:
376 return false;
377 }
378}
379
380void hci_discovery_set_state(struct hci_dev *hdev, int state)
359{ 381{
360 struct inquiry_cache *cache = &hdev->inq_cache; 382 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
361 struct inquiry_entry *next = cache->list, *e; 383
384 if (hdev->discovery.state == state)
385 return;
386
387 switch (state) {
388 case DISCOVERY_STOPPED:
389 if (hdev->discovery.state != DISCOVERY_STARTING)
390 mgmt_discovering(hdev, 0);
391 break;
392 case DISCOVERY_STARTING:
393 break;
394 case DISCOVERY_FINDING:
395 mgmt_discovering(hdev, 1);
396 break;
397 case DISCOVERY_RESOLVING:
398 break;
399 case DISCOVERY_STOPPING:
400 break;
401 }
402
403 hdev->discovery.state = state;
404}
362 405
363 BT_DBG("cache %p", cache); 406static void inquiry_cache_flush(struct hci_dev *hdev)
407{
408 struct discovery_state *cache = &hdev->discovery;
409 struct inquiry_entry *p, *n;
364 410
365 cache->list = NULL; 411 list_for_each_entry_safe(p, n, &cache->all, all) {
366 while ((e = next)) { 412 list_del(&p->all);
367 next = e->next; 413 kfree(p);
368 kfree(e);
369 } 414 }
415
416 INIT_LIST_HEAD(&cache->unknown);
417 INIT_LIST_HEAD(&cache->resolve);
370} 418}
371 419
372struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 420struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373{ 421{
374 struct inquiry_cache *cache = &hdev->inq_cache; 422 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e;
424
425 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426
427 list_for_each_entry(e, &cache->all, all) {
428 if (!bacmp(&e->data.bdaddr, bdaddr))
429 return e;
430 }
431
432 return NULL;
433}
434
435struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
436 bdaddr_t *bdaddr)
437{
438 struct discovery_state *cache = &hdev->discovery;
375 struct inquiry_entry *e; 439 struct inquiry_entry *e;
376 440
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 441 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378 442
379 for (e = cache->list; e; e = e->next) 443 list_for_each_entry(e, &cache->unknown, list) {
444 if (!bacmp(&e->data.bdaddr, bdaddr))
445 return e;
446 }
447
448 return NULL;
449}
450
451struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
452 bdaddr_t *bdaddr,
453 int state)
454{
455 struct discovery_state *cache = &hdev->discovery;
456 struct inquiry_entry *e;
457
458 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
459
460 list_for_each_entry(e, &cache->resolve, list) {
461 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
462 return e;
380 if (!bacmp(&e->data.bdaddr, bdaddr)) 463 if (!bacmp(&e->data.bdaddr, bdaddr))
464 return e;
465 }
466
467 return NULL;
468}
469
470void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
471 struct inquiry_entry *ie)
472{
473 struct discovery_state *cache = &hdev->discovery;
474 struct list_head *pos = &cache->resolve;
475 struct inquiry_entry *p;
476
477 list_del(&ie->list);
478
479 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi))
381 break; 482 break;
382 return e; 483 pos = &p->list;
484 }
485
486 list_add(&ie->list, pos);
383} 487}
384 488
385void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) 489bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
490 bool name_known, bool *ssp)
386{ 491{
387 struct inquiry_cache *cache = &hdev->inq_cache; 492 struct discovery_state *cache = &hdev->discovery;
388 struct inquiry_entry *ie; 493 struct inquiry_entry *ie;
389 494
390 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 495 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
391 496
497 if (ssp)
498 *ssp = data->ssp_mode;
499
392 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 500 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
393 if (!ie) { 501 if (ie) {
394 /* Entry not in the cache. Add new one. */ 502 if (ie->data.ssp_mode && ssp)
395 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC); 503 *ssp = true;
396 if (!ie) 504
397 return; 505 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie);
509 }
510
511 goto update;
512 }
513
514 /* Entry not in the cache. Add new one. */
515 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
516 if (!ie)
517 return false;
518
519 list_add(&ie->all, &cache->all);
520
521 if (name_known) {
522 ie->name_state = NAME_KNOWN;
523 } else {
524 ie->name_state = NAME_NOT_KNOWN;
525 list_add(&ie->list, &cache->unknown);
526 }
398 527
399 ie->next = cache->list; 528update:
400 cache->list = ie; 529 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list);
401 } 533 }
402 534
403 memcpy(&ie->data, data, sizeof(*data)); 535 memcpy(&ie->data, data, sizeof(*data));
404 ie->timestamp = jiffies; 536 ie->timestamp = jiffies;
405 cache->timestamp = jiffies; 537 cache->timestamp = jiffies;
538
539 if (ie->name_state == NAME_NOT_KNOWN)
540 return false;
541
542 return true;
406} 543}
407 544
408static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 545static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
409{ 546{
410 struct inquiry_cache *cache = &hdev->inq_cache; 547 struct discovery_state *cache = &hdev->discovery;
411 struct inquiry_info *info = (struct inquiry_info *) buf; 548 struct inquiry_info *info = (struct inquiry_info *) buf;
412 struct inquiry_entry *e; 549 struct inquiry_entry *e;
413 int copied = 0; 550 int copied = 0;
414 551
415 for (e = cache->list; e && copied < num; e = e->next, copied++) { 552 list_for_each_entry(e, &cache->all, all) {
416 struct inquiry_data *data = &e->data; 553 struct inquiry_data *data = &e->data;
554
555 if (copied >= num)
556 break;
557
417 bacpy(&info->bdaddr, &data->bdaddr); 558 bacpy(&info->bdaddr, &data->bdaddr);
418 info->pscan_rep_mode = data->pscan_rep_mode; 559 info->pscan_rep_mode = data->pscan_rep_mode;
419 info->pscan_period_mode = data->pscan_period_mode; 560 info->pscan_period_mode = data->pscan_period_mode;
420 info->pscan_mode = data->pscan_mode; 561 info->pscan_mode = data->pscan_mode;
421 memcpy(info->dev_class, data->dev_class, 3); 562 memcpy(info->dev_class, data->dev_class, 3);
422 info->clock_offset = data->clock_offset; 563 info->clock_offset = data->clock_offset;
564
423 info++; 565 info++;
566 copied++;
424 } 567 }
425 568
426 BT_DBG("cache %p, copied %d", cache, copied); 569 BT_DBG("cache %p, copied %d", cache, copied);
@@ -525,6 +668,11 @@ int hci_dev_open(__u16 dev)
525 668
526 hci_req_lock(hdev); 669 hci_req_lock(hdev);
527 670
671 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
672 ret = -ENODEV;
673 goto done;
674 }
675
528 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 676 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
529 ret = -ERFKILL; 677 ret = -ERFKILL;
530 goto done; 678 goto done;
@@ -567,7 +715,7 @@ int hci_dev_open(__u16 dev)
567 hci_dev_hold(hdev); 715 hci_dev_hold(hdev);
568 set_bit(HCI_UP, &hdev->flags); 716 set_bit(HCI_UP, &hdev->flags);
569 hci_notify(hdev, HCI_DEV_UP); 717 hci_notify(hdev, HCI_DEV_UP);
570 if (!test_bit(HCI_SETUP, &hdev->flags)) { 718 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
571 hci_dev_lock(hdev); 719 hci_dev_lock(hdev);
572 mgmt_powered(hdev, 1); 720 mgmt_powered(hdev, 1);
573 hci_dev_unlock(hdev); 721 hci_dev_unlock(hdev);
@@ -603,6 +751,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
603{ 751{
604 BT_DBG("%s %p", hdev->name, hdev); 752 BT_DBG("%s %p", hdev->name, hdev);
605 753
754 cancel_work_sync(&hdev->le_scan);
755
606 hci_req_cancel(hdev, ENODEV); 756 hci_req_cancel(hdev, ENODEV);
607 hci_req_lock(hdev); 757 hci_req_lock(hdev);
608 758
@@ -619,14 +769,14 @@ static int hci_dev_do_close(struct hci_dev *hdev)
619 if (hdev->discov_timeout > 0) { 769 if (hdev->discov_timeout > 0) {
620 cancel_delayed_work(&hdev->discov_off); 770 cancel_delayed_work(&hdev->discov_off);
621 hdev->discov_timeout = 0; 771 hdev->discov_timeout = 0;
772 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
622 } 773 }
623 774
624 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 775 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
625 cancel_delayed_work(&hdev->power_off);
626
627 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
628 cancel_delayed_work(&hdev->service_cache); 776 cancel_delayed_work(&hdev->service_cache);
629 777
778 cancel_delayed_work_sync(&hdev->le_scan_disable);
779
630 hci_dev_lock(hdev); 780 hci_dev_lock(hdev);
631 inquiry_cache_flush(hdev); 781 inquiry_cache_flush(hdev);
632 hci_conn_hash_flush(hdev); 782 hci_conn_hash_flush(hdev);
@@ -667,13 +817,18 @@ static int hci_dev_do_close(struct hci_dev *hdev)
667 * and no tasks are scheduled. */ 817 * and no tasks are scheduled. */
668 hdev->close(hdev); 818 hdev->close(hdev);
669 819
670 hci_dev_lock(hdev); 820 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
671 mgmt_powered(hdev, 0); 821 hci_dev_lock(hdev);
672 hci_dev_unlock(hdev); 822 mgmt_powered(hdev, 0);
823 hci_dev_unlock(hdev);
824 }
673 825
674 /* Clear flags */ 826 /* Clear flags */
675 hdev->flags = 0; 827 hdev->flags = 0;
676 828
829 memset(hdev->eir, 0, sizeof(hdev->eir));
830 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
831
677 hci_req_unlock(hdev); 832 hci_req_unlock(hdev);
678 833
679 hci_dev_put(hdev); 834 hci_dev_put(hdev);
@@ -688,7 +843,12 @@ int hci_dev_close(__u16 dev)
688 hdev = hci_dev_get(dev); 843 hdev = hci_dev_get(dev);
689 if (!hdev) 844 if (!hdev)
690 return -ENODEV; 845 return -ENODEV;
846
847 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
848 cancel_delayed_work(&hdev->power_off);
849
691 err = hci_dev_do_close(hdev); 850 err = hci_dev_do_close(hdev);
851
692 hci_dev_put(hdev); 852 hci_dev_put(hdev);
693 return err; 853 return err;
694} 854}
@@ -847,11 +1007,11 @@ int hci_get_dev_list(void __user *arg)
847 1007
848 read_lock(&hci_dev_list_lock); 1008 read_lock(&hci_dev_list_lock);
849 list_for_each_entry(hdev, &hci_dev_list, list) { 1009 list_for_each_entry(hdev, &hci_dev_list, list) {
850 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 1010 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
851 cancel_delayed_work(&hdev->power_off); 1011 cancel_delayed_work(&hdev->power_off);
852 1012
853 if (!test_bit(HCI_MGMT, &hdev->flags)) 1013 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
854 set_bit(HCI_PAIRABLE, &hdev->flags); 1014 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
855 1015
856 (dr + n)->dev_id = hdev->id; 1016 (dr + n)->dev_id = hdev->id;
857 (dr + n)->dev_opt = hdev->flags; 1017 (dr + n)->dev_opt = hdev->flags;
@@ -883,11 +1043,11 @@ int hci_get_dev_info(void __user *arg)
883 if (!hdev) 1043 if (!hdev)
884 return -ENODEV; 1044 return -ENODEV;
885 1045
886 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 1046 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
887 cancel_delayed_work_sync(&hdev->power_off); 1047 cancel_delayed_work_sync(&hdev->power_off);
888 1048
889 if (!test_bit(HCI_MGMT, &hdev->flags)) 1049 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
890 set_bit(HCI_PAIRABLE, &hdev->flags); 1050 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
891 1051
892 strcpy(di.name, hdev->name); 1052 strcpy(di.name, hdev->name);
893 di.bdaddr = hdev->bdaddr; 1053 di.bdaddr = hdev->bdaddr;
@@ -932,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = {
932 .set_block = hci_rfkill_set_block, 1092 .set_block = hci_rfkill_set_block,
933}; 1093};
934 1094
935/* Alloc HCI device */
936struct hci_dev *hci_alloc_dev(void)
937{
938 struct hci_dev *hdev;
939
940 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
941 if (!hdev)
942 return NULL;
943
944 hci_init_sysfs(hdev);
945 skb_queue_head_init(&hdev->driver_init);
946
947 return hdev;
948}
949EXPORT_SYMBOL(hci_alloc_dev);
950
951/* Free HCI device */
952void hci_free_dev(struct hci_dev *hdev)
953{
954 skb_queue_purge(&hdev->driver_init);
955
956 /* will free via device release */
957 put_device(&hdev->dev);
958}
959EXPORT_SYMBOL(hci_free_dev);
960
961static void hci_power_on(struct work_struct *work) 1095static void hci_power_on(struct work_struct *work)
962{ 1096{
963 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 1097 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
@@ -967,11 +1101,11 @@ static void hci_power_on(struct work_struct *work)
967 if (hci_dev_open(hdev->id) < 0) 1101 if (hci_dev_open(hdev->id) < 0)
968 return; 1102 return;
969 1103
970 if (test_bit(HCI_AUTO_OFF, &hdev->flags)) 1104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
971 schedule_delayed_work(&hdev->power_off, 1105 schedule_delayed_work(&hdev->power_off,
972 msecs_to_jiffies(AUTO_OFF_TIMEOUT)); 1106 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
973 1107
974 if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) 1108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
975 mgmt_index_added(hdev); 1109 mgmt_index_added(hdev);
976} 1110}
977 1111
@@ -982,9 +1116,7 @@ static void hci_power_off(struct work_struct *work)
982 1116
983 BT_DBG("%s", hdev->name); 1117 BT_DBG("%s", hdev->name);
984 1118
985 clear_bit(HCI_AUTO_OFF, &hdev->flags); 1119 hci_dev_do_close(hdev);
986
987 hci_dev_close(hdev->id);
988} 1120}
989 1121
990static void hci_discov_off(struct work_struct *work) 1122static void hci_discov_off(struct work_struct *work)
@@ -1037,6 +1169,18 @@ int hci_link_keys_clear(struct hci_dev *hdev)
1037 return 0; 1169 return 0;
1038} 1170}
1039 1171
1172int hci_smp_ltks_clear(struct hci_dev *hdev)
1173{
1174 struct smp_ltk *k, *tmp;
1175
1176 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1177 list_del(&k->list);
1178 kfree(k);
1179 }
1180
1181 return 0;
1182}
1183
1040struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1184struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1041{ 1185{
1042 struct link_key *k; 1186 struct link_key *k;
@@ -1048,83 +1192,78 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1048 return NULL; 1192 return NULL;
1049} 1193}
1050 1194
1051static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1195static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1052 u8 key_type, u8 old_key_type) 1196 u8 key_type, u8 old_key_type)
1053{ 1197{
1054 /* Legacy key */ 1198 /* Legacy key */
1055 if (key_type < 0x03) 1199 if (key_type < 0x03)
1056 return 1; 1200 return true;
1057 1201
1058 /* Debug keys are insecure so don't store them persistently */ 1202 /* Debug keys are insecure so don't store them persistently */
1059 if (key_type == HCI_LK_DEBUG_COMBINATION) 1203 if (key_type == HCI_LK_DEBUG_COMBINATION)
1060 return 0; 1204 return false;
1061 1205
1062 /* Changed combination key and there's no previous one */ 1206 /* Changed combination key and there's no previous one */
1063 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1207 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1064 return 0; 1208 return false;
1065 1209
1066 /* Security mode 3 case */ 1210 /* Security mode 3 case */
1067 if (!conn) 1211 if (!conn)
1068 return 1; 1212 return true;
1069 1213
1070 /* Neither local nor remote side had no-bonding as requirement */ 1214 /* Neither local nor remote side had no-bonding as requirement */
1071 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1215 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1072 return 1; 1216 return true;
1073 1217
1074 /* Local side had dedicated bonding as requirement */ 1218 /* Local side had dedicated bonding as requirement */
1075 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1219 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1076 return 1; 1220 return true;
1077 1221
1078 /* Remote side had dedicated bonding as requirement */ 1222 /* Remote side had dedicated bonding as requirement */
1079 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1223 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1080 return 1; 1224 return true;
1081 1225
1082 /* If none of the above criteria match, then don't store the key 1226 /* If none of the above criteria match, then don't store the key
1083 * persistently */ 1227 * persistently */
1084 return 0; 1228 return false;
1085} 1229}
1086 1230
1087struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) 1231struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1088{ 1232{
1089 struct link_key *k; 1233 struct smp_ltk *k;
1090
1091 list_for_each_entry(k, &hdev->link_keys, list) {
1092 struct key_master_id *id;
1093 1234
1094 if (k->type != HCI_LK_SMP_LTK) 1235 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand)))
1095 continue; 1238 continue;
1096 1239
1097 if (k->dlen != sizeof(*id)) 1240 return k;
1098 continue;
1099
1100 id = (void *) &k->data;
1101 if (id->ediv == ediv &&
1102 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1103 return k;
1104 } 1241 }
1105 1242
1106 return NULL; 1243 return NULL;
1107} 1244}
1108EXPORT_SYMBOL(hci_find_ltk); 1245EXPORT_SYMBOL(hci_find_ltk);
1109 1246
1110struct link_key *hci_find_link_key_type(struct hci_dev *hdev, 1247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1111 bdaddr_t *bdaddr, u8 type) 1248 u8 addr_type)
1112{ 1249{
1113 struct link_key *k; 1250 struct smp_ltk *k;
1114 1251
1115 list_for_each_entry(k, &hdev->link_keys, list) 1252 list_for_each_entry(k, &hdev->long_term_keys, list)
1116 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0) 1253 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0)
1117 return k; 1255 return k;
1118 1256
1119 return NULL; 1257 return NULL;
1120} 1258}
1121EXPORT_SYMBOL(hci_find_link_key_type); 1259EXPORT_SYMBOL(hci_find_ltk_by_addr);
1122 1260
1123int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1124 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1125{ 1263{
1126 struct link_key *key, *old_key; 1264 struct link_key *key, *old_key;
1127 u8 old_key_type, persistent; 1265 u8 old_key_type;
1266 bool persistent;
1128 1267
1129 old_key = hci_find_link_key(hdev, bdaddr); 1268 old_key = hci_find_link_key(hdev, bdaddr);
1130 if (old_key) { 1269 if (old_key) {
@@ -1167,48 +1306,45 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1167 1306
1168 mgmt_new_link_key(hdev, key, persistent); 1307 mgmt_new_link_key(hdev, key, persistent);
1169 1308
1170 if (!persistent) { 1309 if (conn)
1171 list_del(&key->list); 1310 conn->flush_key = !persistent;
1172 kfree(key);
1173 }
1174 1311
1175 return 0; 1312 return 0;
1176} 1313}
1177 1314
1178int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, 1315int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1179 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16]) 1316 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1317 ediv, u8 rand[8])
1180{ 1318{
1181 struct link_key *key, *old_key; 1319 struct smp_ltk *key, *old_key;
1182 struct key_master_id *id;
1183 u8 old_key_type;
1184 1320
1185 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr)); 1321 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1322 return 0;
1186 1323
1187 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK); 1324 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1188 if (old_key) { 1325 if (old_key)
1189 key = old_key; 1326 key = old_key;
1190 old_key_type = old_key->type; 1327 else {
1191 } else { 1328 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1192 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1193 if (!key) 1329 if (!key)
1194 return -ENOMEM; 1330 return -ENOMEM;
1195 list_add(&key->list, &hdev->link_keys); 1331 list_add(&key->list, &hdev->long_term_keys);
1196 old_key_type = 0xff;
1197 } 1332 }
1198 1333
1199 key->dlen = sizeof(*id);
1200
1201 bacpy(&key->bdaddr, bdaddr); 1334 bacpy(&key->bdaddr, bdaddr);
1202 memcpy(key->val, ltk, sizeof(key->val)); 1335 key->bdaddr_type = addr_type;
1203 key->type = HCI_LK_SMP_LTK; 1336 memcpy(key->val, tk, sizeof(key->val));
1204 key->pin_len = key_size; 1337 key->authenticated = authenticated;
1338 key->ediv = ediv;
1339 key->enc_size = enc_size;
1340 key->type = type;
1341 memcpy(key->rand, rand, sizeof(key->rand));
1205 1342
1206 id = (void *) &key->data; 1343 if (!new_key)
1207 id->ediv = ediv; 1344 return 0;
1208 memcpy(id->rand, rand, sizeof(id->rand));
1209 1345
1210 if (new_key) 1346 if (type & HCI_SMP_LTK)
1211 mgmt_new_link_key(hdev, key, old_key_type); 1347 mgmt_new_ltk(hdev, key, 1);
1212 1348
1213 return 0; 1349 return 0;
1214} 1350}
@@ -1229,6 +1365,23 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1229 return 0; 1365 return 0;
1230} 1366}
1231 1367
1368int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369{
1370 struct smp_ltk *k, *tmp;
1371
1372 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1373 if (bacmp(bdaddr, &k->bdaddr))
1374 continue;
1375
1376 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1377
1378 list_del(&k->list);
1379 kfree(k);
1380 }
1381
1382 return 0;
1383}
1384
1232/* HCI command timer function */ 1385/* HCI command timer function */
1233static void hci_cmd_timer(unsigned long arg) 1386static void hci_cmd_timer(unsigned long arg)
1234{ 1387{
@@ -1240,7 +1393,7 @@ static void hci_cmd_timer(unsigned long arg)
1240} 1393}
1241 1394
1242struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1395struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1243 bdaddr_t *bdaddr) 1396 bdaddr_t *bdaddr)
1244{ 1397{
1245 struct oob_data *data; 1398 struct oob_data *data;
1246 1399
@@ -1280,7 +1433,7 @@ int hci_remote_oob_data_clear(struct hci_dev *hdev)
1280} 1433}
1281 1434
1282int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, 1435int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1283 u8 *randomizer) 1436 u8 *randomizer)
1284{ 1437{
1285 struct oob_data *data; 1438 struct oob_data *data;
1286 1439
@@ -1303,8 +1456,7 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1303 return 0; 1456 return 0;
1304} 1457}
1305 1458
1306struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, 1459struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1307 bdaddr_t *bdaddr)
1308{ 1460{
1309 struct bdaddr_list *b; 1461 struct bdaddr_list *b;
1310 1462
@@ -1331,7 +1483,7 @@ int hci_blacklist_clear(struct hci_dev *hdev)
1331 return 0; 1483 return 0;
1332} 1484}
1333 1485
1334int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) 1486int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1335{ 1487{
1336 struct bdaddr_list *entry; 1488 struct bdaddr_list *entry;
1337 1489
@@ -1349,10 +1501,10 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1349 1501
1350 list_add(&entry->list, &hdev->blacklist); 1502 list_add(&entry->list, &hdev->blacklist);
1351 1503
1352 return mgmt_device_blocked(hdev, bdaddr); 1504 return mgmt_device_blocked(hdev, bdaddr, type);
1353} 1505}
1354 1506
1355int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) 1507int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1356{ 1508{
1357 struct bdaddr_list *entry; 1509 struct bdaddr_list *entry;
1358 1510
@@ -1366,168 +1518,225 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1366 list_del(&entry->list); 1518 list_del(&entry->list);
1367 kfree(entry); 1519 kfree(entry);
1368 1520
1369 return mgmt_device_unblocked(hdev, bdaddr); 1521 return mgmt_device_unblocked(hdev, bdaddr, type);
1370} 1522}
1371 1523
1372static void hci_clear_adv_cache(struct work_struct *work) 1524static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1373{ 1525{
1374 struct hci_dev *hdev = container_of(work, struct hci_dev, 1526 struct le_scan_params *param = (struct le_scan_params *) opt;
1375 adv_work.work); 1527 struct hci_cp_le_set_scan_param cp;
1376 1528
1377 hci_dev_lock(hdev); 1529 memset(&cp, 0, sizeof(cp));
1530 cp.type = param->type;
1531 cp.interval = cpu_to_le16(param->interval);
1532 cp.window = cpu_to_le16(param->window);
1378 1533
1379 hci_adv_entries_clear(hdev); 1534 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1535}
1380 1536
1381 hci_dev_unlock(hdev); 1537static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1538{
1539 struct hci_cp_le_set_scan_enable cp;
1540
1541 memset(&cp, 0, sizeof(cp));
1542 cp.enable = 1;
1543
1544 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1382} 1545}
1383 1546
1384int hci_adv_entries_clear(struct hci_dev *hdev) 1547static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1548 u16 window, int timeout)
1385{ 1549{
1386 struct adv_entry *entry, *tmp; 1550 long timeo = msecs_to_jiffies(3000);
1551 struct le_scan_params param;
1552 int err;
1387 1553
1388 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) { 1554 BT_DBG("%s", hdev->name);
1389 list_del(&entry->list); 1555
1390 kfree(entry); 1556 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1391 } 1557 return -EINPROGRESS;
1392 1558
1393 BT_DBG("%s adv cache cleared", hdev->name); 1559 param.type = type;
1560 param.interval = interval;
1561 param.window = window;
1562
1563 hci_req_lock(hdev);
1564
1565 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1566 timeo);
1567 if (!err)
1568 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1569
1570 hci_req_unlock(hdev);
1571
1572 if (err < 0)
1573 return err;
1574
1575 schedule_delayed_work(&hdev->le_scan_disable,
1576 msecs_to_jiffies(timeout));
1394 1577
1395 return 0; 1578 return 0;
1396} 1579}
1397 1580
1398struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr) 1581int hci_cancel_le_scan(struct hci_dev *hdev)
1399{ 1582{
1400 struct adv_entry *entry; 1583 BT_DBG("%s", hdev->name);
1401 1584
1402 list_for_each_entry(entry, &hdev->adv_entries, list) 1585 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1403 if (bacmp(bdaddr, &entry->bdaddr) == 0) 1586 return -EALREADY;
1404 return entry;
1405 1587
1406 return NULL; 1588 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1407} 1589 struct hci_cp_le_set_scan_enable cp;
1408 1590
1409static inline int is_connectable_adv(u8 evt_type) 1591 /* Send HCI command to disable LE Scan */
1410{ 1592 memset(&cp, 0, sizeof(cp));
1411 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND) 1593 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1412 return 1; 1594 }
1413 1595
1414 return 0; 1596 return 0;
1415} 1597}
1416 1598
1417int hci_add_adv_entry(struct hci_dev *hdev, 1599static void le_scan_disable_work(struct work_struct *work)
1418 struct hci_ev_le_advertising_info *ev)
1419{ 1600{
1420 struct adv_entry *entry; 1601 struct hci_dev *hdev = container_of(work, struct hci_dev,
1421 1602 le_scan_disable.work);
1422 if (!is_connectable_adv(ev->evt_type)) 1603 struct hci_cp_le_set_scan_enable cp;
1423 return -EINVAL;
1424 1604
1425 /* Only new entries should be added to adv_entries. So, if 1605 BT_DBG("%s", hdev->name);
1426 * bdaddr was found, don't add it. */
1427 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1428 return 0;
1429 1606
1430 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1607 memset(&cp, 0, sizeof(cp));
1431 if (!entry)
1432 return -ENOMEM;
1433 1608
1434 bacpy(&entry->bdaddr, &ev->bdaddr); 1609 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1435 entry->bdaddr_type = ev->bdaddr_type; 1610}
1436 1611
1437 list_add(&entry->list, &hdev->adv_entries); 1612static void le_scan_work(struct work_struct *work)
1613{
1614 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1615 struct le_scan_params *param = &hdev->le_scan_params;
1438 1616
1439 BT_DBG("%s adv entry added: address %s type %u", hdev->name, 1617 BT_DBG("%s", hdev->name);
1440 batostr(&entry->bdaddr), entry->bdaddr_type);
1441 1618
1442 return 0; 1619 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1620 param->timeout);
1443} 1621}
1444 1622
1445/* Register HCI device */ 1623int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1446int hci_register_dev(struct hci_dev *hdev) 1624 int timeout)
1447{ 1625{
1448 struct list_head *head = &hci_dev_list, *p; 1626 struct le_scan_params *param = &hdev->le_scan_params;
1449 int i, id, error;
1450 1627
1451 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, 1628 BT_DBG("%s", hdev->name);
1452 hdev->bus, hdev->owner);
1453 1629
1454 if (!hdev->open || !hdev->close || !hdev->destruct) 1630 if (work_busy(&hdev->le_scan))
1455 return -EINVAL; 1631 return -EINPROGRESS;
1456 1632
1457 /* Do not allow HCI_AMP devices to register at index 0, 1633 param->type = type;
1458 * so the index can be used as the AMP controller ID. 1634 param->interval = interval;
1459 */ 1635 param->window = window;
1460 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; 1636 param->timeout = timeout;
1461 1637
1462 write_lock(&hci_dev_list_lock); 1638 queue_work(system_long_wq, &hdev->le_scan);
1463 1639
1464 /* Find first available device id */ 1640 return 0;
1465 list_for_each(p, &hci_dev_list) { 1641}
1466 if (list_entry(p, struct hci_dev, list)->id != id)
1467 break;
1468 head = p; id++;
1469 }
1470 1642
1471 sprintf(hdev->name, "hci%d", id); 1643/* Alloc HCI device */
1472 hdev->id = id; 1644struct hci_dev *hci_alloc_dev(void)
1473 list_add_tail(&hdev->list, head); 1645{
1646 struct hci_dev *hdev;
1474 1647
1475 atomic_set(&hdev->refcnt, 1); 1648 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1476 mutex_init(&hdev->lock); 1649 if (!hdev)
1650 return NULL;
1477 1651
1478 hdev->flags = 0;
1479 hdev->dev_flags = 0;
1480 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 1652 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1481 hdev->esco_type = (ESCO_HV1); 1653 hdev->esco_type = (ESCO_HV1);
1482 hdev->link_mode = (HCI_LM_ACCEPT); 1654 hdev->link_mode = (HCI_LM_ACCEPT);
1483 hdev->io_capability = 0x03; /* No Input No Output */ 1655 hdev->io_capability = 0x03; /* No Input No Output */
1484 1656
1485 hdev->idle_timeout = 0;
1486 hdev->sniff_max_interval = 800; 1657 hdev->sniff_max_interval = 800;
1487 hdev->sniff_min_interval = 80; 1658 hdev->sniff_min_interval = 80;
1488 1659
1660 mutex_init(&hdev->lock);
1661 mutex_init(&hdev->req_lock);
1662
1663 INIT_LIST_HEAD(&hdev->mgmt_pending);
1664 INIT_LIST_HEAD(&hdev->blacklist);
1665 INIT_LIST_HEAD(&hdev->uuids);
1666 INIT_LIST_HEAD(&hdev->link_keys);
1667 INIT_LIST_HEAD(&hdev->long_term_keys);
1668 INIT_LIST_HEAD(&hdev->remote_oob_data);
1669
1489 INIT_WORK(&hdev->rx_work, hci_rx_work); 1670 INIT_WORK(&hdev->rx_work, hci_rx_work);
1490 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 1671 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1491 INIT_WORK(&hdev->tx_work, hci_tx_work); 1672 INIT_WORK(&hdev->tx_work, hci_tx_work);
1673 INIT_WORK(&hdev->power_on, hci_power_on);
1674 INIT_WORK(&hdev->le_scan, le_scan_work);
1492 1675
1676 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1677 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1678 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1493 1679
1680 skb_queue_head_init(&hdev->driver_init);
1494 skb_queue_head_init(&hdev->rx_q); 1681 skb_queue_head_init(&hdev->rx_q);
1495 skb_queue_head_init(&hdev->cmd_q); 1682 skb_queue_head_init(&hdev->cmd_q);
1496 skb_queue_head_init(&hdev->raw_q); 1683 skb_queue_head_init(&hdev->raw_q);
1497 1684
1498 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1499
1500 for (i = 0; i < NUM_REASSEMBLY; i++)
1501 hdev->reassembly[i] = NULL;
1502
1503 init_waitqueue_head(&hdev->req_wait_q); 1685 init_waitqueue_head(&hdev->req_wait_q);
1504 mutex_init(&hdev->req_lock);
1505 1686
1506 inquiry_cache_init(hdev); 1687 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1507 1688
1689 hci_init_sysfs(hdev);
1690 discovery_init(hdev);
1508 hci_conn_hash_init(hdev); 1691 hci_conn_hash_init(hdev);
1509 1692
1510 INIT_LIST_HEAD(&hdev->mgmt_pending); 1693 return hdev;
1694}
1695EXPORT_SYMBOL(hci_alloc_dev);
1511 1696
1512 INIT_LIST_HEAD(&hdev->blacklist); 1697/* Free HCI device */
1698void hci_free_dev(struct hci_dev *hdev)
1699{
1700 skb_queue_purge(&hdev->driver_init);
1513 1701
1514 INIT_LIST_HEAD(&hdev->uuids); 1702 /* will free via device release */
1703 put_device(&hdev->dev);
1704}
1705EXPORT_SYMBOL(hci_free_dev);
1515 1706
1516 INIT_LIST_HEAD(&hdev->link_keys); 1707/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev)
1709{
1710 struct list_head *head, *p;
1711 int id, error;
1517 1712
1518 INIT_LIST_HEAD(&hdev->remote_oob_data); 1713 if (!hdev->open || !hdev->close)
1714 return -EINVAL;
1519 1715
1520 INIT_LIST_HEAD(&hdev->adv_entries); 1716 write_lock(&hci_dev_list_lock);
1521 1717
1522 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache); 1718 /* Do not allow HCI_AMP devices to register at index 0,
1523 INIT_WORK(&hdev->power_on, hci_power_on); 1719 * so the index can be used as the AMP controller ID.
1524 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 1720 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1722 head = &hci_dev_list;
1525 1723
1526 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); 1724 /* Find first available device id */
1725 list_for_each(p, &hci_dev_list) {
1726 int nid = list_entry(p, struct hci_dev, list)->id;
1727 if (nid > id)
1728 break;
1729 if (nid == id)
1730 id++;
1731 head = p;
1732 }
1527 1733
1528 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1734 sprintf(hdev->name, "hci%d", id);
1735 hdev->id = id;
1529 1736
1530 atomic_set(&hdev->promisc, 0); 1737 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1738
1739 list_add(&hdev->list, head);
1531 1740
1532 write_unlock(&hci_dev_list_lock); 1741 write_unlock(&hci_dev_list_lock);
1533 1742
@@ -1551,11 +1760,12 @@ int hci_register_dev(struct hci_dev *hdev)
1551 } 1760 }
1552 } 1761 }
1553 1762
1554 set_bit(HCI_AUTO_OFF, &hdev->flags); 1763 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1555 set_bit(HCI_SETUP, &hdev->flags); 1764 set_bit(HCI_SETUP, &hdev->dev_flags);
1556 schedule_work(&hdev->power_on); 1765 schedule_work(&hdev->power_on);
1557 1766
1558 hci_notify(hdev, HCI_DEV_REG); 1767 hci_notify(hdev, HCI_DEV_REG);
1768 hci_dev_hold(hdev);
1559 1769
1560 return id; 1770 return id;
1561 1771
@@ -1577,6 +1787,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1577 1787
1578 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1788 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1579 1789
1790 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1791
1580 write_lock(&hci_dev_list_lock); 1792 write_lock(&hci_dev_list_lock);
1581 list_del(&hdev->list); 1793 list_del(&hdev->list);
1582 write_unlock(&hci_dev_list_lock); 1794 write_unlock(&hci_dev_list_lock);
@@ -1587,7 +1799,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1587 kfree_skb(hdev->reassembly[i]); 1799 kfree_skb(hdev->reassembly[i]);
1588 1800
1589 if (!test_bit(HCI_INIT, &hdev->flags) && 1801 if (!test_bit(HCI_INIT, &hdev->flags) &&
1590 !test_bit(HCI_SETUP, &hdev->flags)) { 1802 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1591 hci_dev_lock(hdev); 1803 hci_dev_lock(hdev);
1592 mgmt_index_removed(hdev); 1804 mgmt_index_removed(hdev);
1593 hci_dev_unlock(hdev); 1805 hci_dev_unlock(hdev);
@@ -1606,19 +1818,17 @@ void hci_unregister_dev(struct hci_dev *hdev)
1606 1818
1607 hci_del_sysfs(hdev); 1819 hci_del_sysfs(hdev);
1608 1820
1609 cancel_delayed_work_sync(&hdev->adv_work);
1610
1611 destroy_workqueue(hdev->workqueue); 1821 destroy_workqueue(hdev->workqueue);
1612 1822
1613 hci_dev_lock(hdev); 1823 hci_dev_lock(hdev);
1614 hci_blacklist_clear(hdev); 1824 hci_blacklist_clear(hdev);
1615 hci_uuids_clear(hdev); 1825 hci_uuids_clear(hdev);
1616 hci_link_keys_clear(hdev); 1826 hci_link_keys_clear(hdev);
1827 hci_smp_ltks_clear(hdev);
1617 hci_remote_oob_data_clear(hdev); 1828 hci_remote_oob_data_clear(hdev);
1618 hci_adv_entries_clear(hdev);
1619 hci_dev_unlock(hdev); 1829 hci_dev_unlock(hdev);
1620 1830
1621 __hci_dev_put(hdev); 1831 hci_dev_put(hdev);
1622} 1832}
1623EXPORT_SYMBOL(hci_unregister_dev); 1833EXPORT_SYMBOL(hci_unregister_dev);
1624 1834
@@ -1706,7 +1916,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1706 1916
1707 while (count) { 1917 while (count) {
1708 scb = (void *) skb->cb; 1918 scb = (void *) skb->cb;
1709 len = min(scb->expect, (__u16)count); 1919 len = min_t(uint, scb->expect, count);
1710 1920
1711 memcpy(skb_put(skb, len), data, len); 1921 memcpy(skb_put(skb, len), data, len);
1712 1922
@@ -1862,11 +2072,15 @@ static int hci_send_frame(struct sk_buff *skb)
1862 2072
1863 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 2073 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1864 2074
1865 if (atomic_read(&hdev->promisc)) { 2075 /* Time stamp */
1866 /* Time stamp */ 2076 __net_timestamp(skb);
1867 __net_timestamp(skb); 2077
2078 /* Send copy to monitor */
2079 hci_send_to_monitor(hdev, skb);
1868 2080
1869 hci_send_to_sock(hdev, skb, NULL); 2081 if (atomic_read(&hdev->promisc)) {
2082 /* Send copy to the sockets */
2083 hci_send_to_sock(hdev, skb);
1870 } 2084 }
1871 2085
1872 /* Get rid of skb owner, prior to sending to the driver. */ 2086 /* Get rid of skb owner, prior to sending to the driver. */
@@ -1948,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1948 struct hci_dev *hdev = conn->hdev; 2162 struct hci_dev *hdev = conn->hdev;
1949 struct sk_buff *list; 2163 struct sk_buff *list;
1950 2164
2165 skb->len = skb_headlen(skb);
2166 skb->data_len = 0;
2167
2168 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2169 hci_add_acl_hdr(skb, conn->handle, flags);
2170
1951 list = skb_shinfo(skb)->frag_list; 2171 list = skb_shinfo(skb)->frag_list;
1952 if (!list) { 2172 if (!list) {
1953 /* Non fragmented */ 2173 /* Non fragmented */
@@ -1991,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1991 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); 2211 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1992 2212
1993 skb->dev = (void *) hdev; 2213 skb->dev = (void *) hdev;
1994 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1995 hci_add_acl_hdr(skb, conn->handle, flags);
1996 2214
1997 hci_queue_acl(conn, &chan->data_q, skb, flags); 2215 hci_queue_acl(conn, &chan->data_q, skb, flags);
1998 2216
@@ -2030,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2030{ 2248{
2031 struct hci_conn_hash *h = &hdev->conn_hash; 2249 struct hci_conn_hash *h = &hdev->conn_hash;
2032 struct hci_conn *conn = NULL, *c; 2250 struct hci_conn *conn = NULL, *c;
2033 int num = 0, min = ~0; 2251 unsigned int num = 0, min = ~0;
2034 2252
2035 /* We don't have to lock device here. Connections are always 2253 /* We don't have to lock device here. Connections are always
2036 * added and removed with TX task disabled. */ 2254 * added and removed with TX task disabled. */
@@ -2111,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2111{ 2329{
2112 struct hci_conn_hash *h = &hdev->conn_hash; 2330 struct hci_conn_hash *h = &hdev->conn_hash;
2113 struct hci_chan *chan = NULL; 2331 struct hci_chan *chan = NULL;
2114 int num = 0, min = ~0, cur_prio = 0; 2332 unsigned int num = 0, min = ~0, cur_prio = 0;
2115 struct hci_conn *conn; 2333 struct hci_conn *conn;
2116 int cnt, q, conn_num = 0; 2334 int cnt, q, conn_num = 0;
2117 2335
@@ -2235,26 +2453,31 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2235 2453
2236} 2454}
2237 2455
2238static inline void hci_sched_acl(struct hci_dev *hdev) 2456static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2239{ 2457{
2240 struct hci_chan *chan; 2458 /* Calculate count of blocks used by this packet */
2241 struct sk_buff *skb; 2459 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2242 int quote; 2460}
2243 unsigned int cnt;
2244
2245 BT_DBG("%s", hdev->name);
2246
2247 if (!hci_conn_num(hdev, ACL_LINK))
2248 return;
2249 2461
2462static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2463{
2250 if (!test_bit(HCI_RAW, &hdev->flags)) { 2464 if (!test_bit(HCI_RAW, &hdev->flags)) {
2251 /* ACL tx timeout must be longer than maximum 2465 /* ACL tx timeout must be longer than maximum
2252 * link supervision timeout (40.9 seconds) */ 2466 * link supervision timeout (40.9 seconds) */
2253 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) 2467 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2468 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2254 hci_link_tx_to(hdev, ACL_LINK); 2469 hci_link_tx_to(hdev, ACL_LINK);
2255 } 2470 }
2471}
2472
2473static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2474{
2475 unsigned int cnt = hdev->acl_cnt;
2476 struct hci_chan *chan;
2477 struct sk_buff *skb;
2478 int quote;
2256 2479
2257 cnt = hdev->acl_cnt; 2480 __check_timeout(hdev, cnt);
2258 2481
2259 while (hdev->acl_cnt && 2482 while (hdev->acl_cnt &&
2260 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2483 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
@@ -2270,7 +2493,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2270 skb = skb_dequeue(&chan->data_q); 2493 skb = skb_dequeue(&chan->data_q);
2271 2494
2272 hci_conn_enter_active_mode(chan->conn, 2495 hci_conn_enter_active_mode(chan->conn,
2273 bt_cb(skb)->force_active); 2496 bt_cb(skb)->force_active);
2274 2497
2275 hci_send_frame(skb); 2498 hci_send_frame(skb);
2276 hdev->acl_last_tx = jiffies; 2499 hdev->acl_last_tx = jiffies;
@@ -2285,6 +2508,70 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2285 hci_prio_recalculate(hdev, ACL_LINK); 2508 hci_prio_recalculate(hdev, ACL_LINK);
2286} 2509}
2287 2510
2511static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2512{
2513 unsigned int cnt = hdev->block_cnt;
2514 struct hci_chan *chan;
2515 struct sk_buff *skb;
2516 int quote;
2517
2518 __check_timeout(hdev, cnt);
2519
2520 while (hdev->block_cnt > 0 &&
2521 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2522 u32 priority = (skb_peek(&chan->data_q))->priority;
2523 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2524 int blocks;
2525
2526 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2527 skb->len, skb->priority);
2528
2529 /* Stop if priority has changed */
2530 if (skb->priority < priority)
2531 break;
2532
2533 skb = skb_dequeue(&chan->data_q);
2534
2535 blocks = __get_blocks(hdev, skb);
2536 if (blocks > hdev->block_cnt)
2537 return;
2538
2539 hci_conn_enter_active_mode(chan->conn,
2540 bt_cb(skb)->force_active);
2541
2542 hci_send_frame(skb);
2543 hdev->acl_last_tx = jiffies;
2544
2545 hdev->block_cnt -= blocks;
2546 quote -= blocks;
2547
2548 chan->sent += blocks;
2549 chan->conn->sent += blocks;
2550 }
2551 }
2552
2553 if (cnt != hdev->block_cnt)
2554 hci_prio_recalculate(hdev, ACL_LINK);
2555}
2556
2557static inline void hci_sched_acl(struct hci_dev *hdev)
2558{
2559 BT_DBG("%s", hdev->name);
2560
2561 if (!hci_conn_num(hdev, ACL_LINK))
2562 return;
2563
2564 switch (hdev->flow_ctl_mode) {
2565 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2566 hci_sched_acl_pkt(hdev);
2567 break;
2568
2569 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2570 hci_sched_acl_blk(hdev);
2571 break;
2572 }
2573}
2574
2288/* Schedule SCO */ 2575/* Schedule SCO */
2289static inline void hci_sched_sco(struct hci_dev *hdev) 2576static inline void hci_sched_sco(struct hci_dev *hdev)
2290{ 2577{
@@ -2432,6 +2719,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2432 if (conn) { 2719 if (conn) {
2433 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 2720 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2434 2721
2722 hci_dev_lock(hdev);
2723 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2724 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2725 mgmt_device_connected(hdev, &conn->dst, conn->type,
2726 conn->dst_type, 0, NULL, 0,
2727 conn->dev_class);
2728 hci_dev_unlock(hdev);
2729
2435 /* Send to upper protocol */ 2730 /* Send to upper protocol */
2436 l2cap_recv_acldata(conn, skb, flags); 2731 l2cap_recv_acldata(conn, skb, flags);
2437 return; 2732 return;
@@ -2482,9 +2777,12 @@ static void hci_rx_work(struct work_struct *work)
2482 BT_DBG("%s", hdev->name); 2777 BT_DBG("%s", hdev->name);
2483 2778
2484 while ((skb = skb_dequeue(&hdev->rx_q))) { 2779 while ((skb = skb_dequeue(&hdev->rx_q))) {
2780 /* Send copy to monitor */
2781 hci_send_to_monitor(hdev, skb);
2782
2485 if (atomic_read(&hdev->promisc)) { 2783 if (atomic_read(&hdev->promisc)) {
2486 /* Send copy to the sockets */ 2784 /* Send copy to the sockets */
2487 hci_send_to_sock(hdev, skb, NULL); 2785 hci_send_to_sock(hdev, skb);
2488 } 2786 }
2489 2787
2490 if (test_bit(HCI_RAW, &hdev->flags)) { 2788 if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -2568,6 +2866,8 @@ int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2568 if (test_bit(HCI_INQUIRY, &hdev->flags)) 2866 if (test_bit(HCI_INQUIRY, &hdev->flags))
2569 return -EINPROGRESS; 2867 return -EINPROGRESS;
2570 2868
2869 inquiry_cache_flush(hdev);
2870
2571 memset(&cp, 0, sizeof(cp)); 2871 memset(&cp, 0, sizeof(cp));
2572 memcpy(&cp.lap, lap, sizeof(cp.lap)); 2872 memcpy(&cp.lap, lap, sizeof(cp.lap));
2573 cp.length = length; 2873 cp.length = length;
@@ -2580,10 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
2580 BT_DBG("%s", hdev->name); 2880 BT_DBG("%s", hdev->name);
2581 2881
2582 if (!test_bit(HCI_INQUIRY, &hdev->flags)) 2882 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2583 return -EPERM; 2883 return -EALREADY;
2584 2884
2585 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); 2885 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2586} 2886}
2587 2887
2588module_param(enable_hs, bool, 0644); 2888u8 bdaddr_to_le(u8 bdaddr_type)
2589MODULE_PARM_DESC(enable_hs, "Enable High Speed"); 2889{
2890 switch (bdaddr_type) {
2891 case BDADDR_LE_PUBLIC:
2892 return ADDR_LE_DEV_PUBLIC;
2893
2894 default:
2895 /* Fallback to LE Random address type */
2896 return ADDR_LE_DEV_RANDOM;
2897 }
2898}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 001307f81057..4eefb7f65cf6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -35,18 +35,14 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/notifier.h>
39#include <net/sock.h> 38#include <net/sock.h>
40 39
41#include <asm/system.h>
42#include <linux/uaccess.h> 40#include <linux/uaccess.h>
43#include <asm/unaligned.h> 41#include <asm/unaligned.h>
44 42
45#include <net/bluetooth/bluetooth.h> 43#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 44#include <net/bluetooth/hci_core.h>
47 45
48static bool enable_le;
49
50/* Handle HCI Event packets */ 46/* Handle HCI Event packets */
51 47
52static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 48static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -65,7 +61,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
65 clear_bit(HCI_INQUIRY, &hdev->flags); 61 clear_bit(HCI_INQUIRY, &hdev->flags);
66 62
67 hci_dev_lock(hdev); 63 hci_dev_lock(hdev);
68 mgmt_discovering(hdev, 0); 64 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69 hci_dev_unlock(hdev); 65 hci_dev_unlock(hdev);
70 66
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 67 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -73,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
73 hci_conn_check_pending(hdev); 69 hci_conn_check_pending(hdev);
74} 70}
75 71
72static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{
74 __u8 status = *((__u8 *) skb->data);
75
76 BT_DBG("%s status 0x%x", hdev->name, status);
77
78 if (status)
79 return;
80
81 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82}
83
76static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 84static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77{ 85{
78 __u8 status = *((__u8 *) skb->data); 86 __u8 status = *((__u8 *) skb->data);
@@ -82,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
82 if (status) 90 if (status)
83 return; 91 return;
84 92
93 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
94
85 hci_conn_check_pending(hdev); 95 hci_conn_check_pending(hdev);
86} 96}
87 97
@@ -195,7 +205,11 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 205
196 hci_req_complete(hdev, HCI_OP_RESET, status); 206 hci_req_complete(hdev, HCI_OP_RESET, status);
197 207
198 hdev->dev_flags = 0; 208 /* Reset all non-persistent flags */
209 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
210 BIT(HCI_PERIODIC_INQ));
211
212 hdev->discovery.state = DISCOVERY_STOPPED;
199} 213}
200 214
201static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 215static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -211,13 +225,14 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
211 225
212 hci_dev_lock(hdev); 226 hci_dev_lock(hdev);
213 227
214 if (test_bit(HCI_MGMT, &hdev->flags)) 228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
215 mgmt_set_local_name_complete(hdev, sent, status); 229 mgmt_set_local_name_complete(hdev, sent, status);
216 230 else if (!status)
217 if (status == 0)
218 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
219 232
220 hci_dev_unlock(hdev); 233 hci_dev_unlock(hdev);
234
235 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
221} 236}
222 237
223static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 238static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -229,7 +244,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 if (rp->status) 244 if (rp->status)
230 return; 245 return;
231 246
232 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 247 if (test_bit(HCI_SETUP, &hdev->dev_flags))
248 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
233} 249}
234 250
235static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 251static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -252,6 +268,9 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
252 clear_bit(HCI_AUTH, &hdev->flags); 268 clear_bit(HCI_AUTH, &hdev->flags);
253 } 269 }
254 270
271 if (test_bit(HCI_MGMT, &hdev->dev_flags))
272 mgmt_auth_enable_complete(hdev, status);
273
255 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); 274 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
256} 275}
257 276
@@ -349,14 +368,19 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
349 368
350 BT_DBG("%s status 0x%x", hdev->name, status); 369 BT_DBG("%s status 0x%x", hdev->name, status);
351 370
352 if (status)
353 return;
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
356 if (!sent) 372 if (!sent)
357 return; 373 return;
358 374
359 memcpy(hdev->dev_class, sent, 3); 375 hci_dev_lock(hdev);
376
377 if (status == 0)
378 memcpy(hdev->dev_class, sent, 3);
379
380 if (test_bit(HCI_MGMT, &hdev->dev_flags))
381 mgmt_set_class_of_dev_complete(hdev, sent, status);
382
383 hci_dev_unlock(hdev);
360} 384}
361 385
362static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 386static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -419,18 +443,6 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
419 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 443 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
420} 444}
421 445
422static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
423{
424 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->ssp_mode = rp->mode;
432}
433
434static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 446static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
435{ 447{
436 __u8 status = *((__u8 *) skb->data); 448 __u8 status = *((__u8 *) skb->data);
@@ -438,14 +450,18 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
438 450
439 BT_DBG("%s status 0x%x", hdev->name, status); 451 BT_DBG("%s status 0x%x", hdev->name, status);
440 452
441 if (status)
442 return;
443
444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 453 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 if (!sent) 454 if (!sent)
446 return; 455 return;
447 456
448 hdev->ssp_mode = *((__u8 *) sent); 457 if (test_bit(HCI_MGMT, &hdev->dev_flags))
458 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
459 else if (!status) {
460 if (*((u8 *) sent))
461 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
462 else
463 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
464 }
449} 465}
450 466
451static u8 hci_get_inquiry_mode(struct hci_dev *hdev) 467static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
@@ -504,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
504 events[5] |= 0x10; /* Synchronous Connection Changed */ 520 events[5] |= 0x10; /* Synchronous Connection Changed */
505 521
506 if (hdev->features[3] & LMP_RSSI_INQ) 522 if (hdev->features[3] & LMP_RSSI_INQ)
507 events[4] |= 0x04; /* Inquiry Result with RSSI */ 523 events[4] |= 0x02; /* Inquiry Result with RSSI */
508 524
509 if (hdev->features[5] & LMP_SNIFF_SUBR) 525 if (hdev->features[5] & LMP_SNIFF_SUBR)
510 events[5] |= 0x20; /* Sniff Subrating */ 526 events[5] |= 0x20; /* Sniff Subrating */
@@ -540,20 +556,6 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
540 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 556 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
541} 557}
542 558
543static void hci_set_le_support(struct hci_dev *hdev)
544{
545 struct hci_cp_write_le_host_supported cp;
546
547 memset(&cp, 0, sizeof(cp));
548
549 if (enable_le) {
550 cp.le = 1;
551 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
552 }
553
554 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
555}
556
557static void hci_setup(struct hci_dev *hdev) 559static void hci_setup(struct hci_dev *hdev)
558{ 560{
559 if (hdev->dev_type != HCI_BREDR) 561 if (hdev->dev_type != HCI_BREDR)
@@ -565,8 +567,18 @@ static void hci_setup(struct hci_dev *hdev)
565 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566 568
567 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 569 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
568 u8 mode = 0x01; 570 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
569 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); 571 u8 mode = 0x01;
572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
573 sizeof(mode), &mode);
574 } else {
575 struct hci_cp_write_eir cp;
576
577 memset(hdev->eir, 0, sizeof(hdev->eir));
578 memset(&cp, 0, sizeof(cp));
579
580 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
581 }
570 } 582 }
571 583
572 if (hdev->features[3] & LMP_RSSI_INQ) 584 if (hdev->features[3] & LMP_RSSI_INQ)
@@ -579,12 +591,15 @@ static void hci_setup(struct hci_dev *hdev)
579 struct hci_cp_read_local_ext_features cp; 591 struct hci_cp_read_local_ext_features cp;
580 592
581 cp.page = 0x01; 593 cp.page = 0x01;
582 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, 594 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
583 sizeof(cp), &cp); 595 &cp);
584 } 596 }
585 597
586 if (hdev->features[4] & LMP_LE) 598 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
587 hci_set_le_support(hdev); 599 u8 enable = 1;
600 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
601 &enable);
602 }
588} 603}
589 604
590static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 605static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -594,7 +609,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
594 BT_DBG("%s status 0x%x", hdev->name, rp->status); 609 BT_DBG("%s status 0x%x", hdev->name, rp->status);
595 610
596 if (rp->status) 611 if (rp->status)
597 return; 612 goto done;
598 613
599 hdev->hci_ver = rp->hci_ver; 614 hdev->hci_ver = rp->hci_ver;
600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 615 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
@@ -608,10 +623,14 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
608 623
609 if (test_bit(HCI_INIT, &hdev->flags)) 624 if (test_bit(HCI_INIT, &hdev->flags))
610 hci_setup(hdev); 625 hci_setup(hdev);
626
627done:
628 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
611} 629}
612 630
613static void hci_setup_link_policy(struct hci_dev *hdev) 631static void hci_setup_link_policy(struct hci_dev *hdev)
614{ 632{
633 struct hci_cp_write_def_link_policy cp;
615 u16 link_policy = 0; 634 u16 link_policy = 0;
616 635
617 if (hdev->features[0] & LMP_RSWITCH) 636 if (hdev->features[0] & LMP_RSWITCH)
@@ -623,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
623 if (hdev->features[1] & LMP_PARK) 642 if (hdev->features[1] & LMP_PARK)
624 link_policy |= HCI_LP_PARK; 643 link_policy |= HCI_LP_PARK;
625 644
626 link_policy = cpu_to_le16(link_policy); 645 cp.policy = cpu_to_le16(link_policy);
627 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
628 sizeof(link_policy), &link_policy);
629} 647}
630 648
631static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -701,6 +719,22 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
701 hdev->features[6], hdev->features[7]); 719 hdev->features[6], hdev->features[7]);
702} 720}
703 721
722static void hci_set_le_support(struct hci_dev *hdev)
723{
724 struct hci_cp_write_le_host_supported cp;
725
726 memset(&cp, 0, sizeof(cp));
727
728 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
729 cp.le = 1;
730 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
731 }
732
733 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
734 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
735 &cp);
736}
737
704static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 738static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
705 struct sk_buff *skb) 739 struct sk_buff *skb)
706{ 740{
@@ -709,7 +743,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
709 BT_DBG("%s status 0x%x", hdev->name, rp->status); 743 BT_DBG("%s status 0x%x", hdev->name, rp->status);
710 744
711 if (rp->status) 745 if (rp->status)
712 return; 746 goto done;
713 747
714 switch (rp->page) { 748 switch (rp->page) {
715 case 0: 749 case 0:
@@ -720,6 +754,10 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
720 break; 754 break;
721 } 755 }
722 756
757 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
758 hci_set_le_support(hdev);
759
760done:
723 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); 761 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
724} 762}
725 763
@@ -864,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
864static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
865 struct sk_buff *skb) 903 struct sk_buff *skb)
866{ 904{
867 __u8 status = *((__u8 *) skb->data); 905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
868 906
869 BT_DBG("%s status 0x%x", hdev->name, status); 907 BT_DBG("%s status 0x%x", hdev->name, rp->status);
908
909 if (!rp->status)
910 hdev->inq_tx_power = rp->tx_power;
870 911
871 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status); 912 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
872} 913}
873 914
874static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) 915static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -890,7 +931,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
890 931
891 hci_dev_lock(hdev); 932 hci_dev_lock(hdev);
892 933
893 if (test_bit(HCI_MGMT, &hdev->flags)) 934 if (test_bit(HCI_MGMT, &hdev->dev_flags))
894 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 935 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
895 936
896 if (rp->status != 0) 937 if (rp->status != 0)
@@ -916,7 +957,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
916 957
917 hci_dev_lock(hdev); 958 hci_dev_lock(hdev);
918 959
919 if (test_bit(HCI_MGMT, &hdev->flags)) 960 if (test_bit(HCI_MGMT, &hdev->dev_flags))
920 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 961 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
921 rp->status); 962 rp->status);
922 963
@@ -951,9 +992,9 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
951 992
952 hci_dev_lock(hdev); 993 hci_dev_lock(hdev);
953 994
954 if (test_bit(HCI_MGMT, &hdev->flags)) 995 if (test_bit(HCI_MGMT, &hdev->dev_flags))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, 996 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
956 rp->status); 997 rp->status);
957 998
958 hci_dev_unlock(hdev); 999 hci_dev_unlock(hdev);
959} 1000}
@@ -967,9 +1008,9 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
967 1008
968 hci_dev_lock(hdev); 1009 hci_dev_lock(hdev);
969 1010
970 if (test_bit(HCI_MGMT, &hdev->flags)) 1011 if (test_bit(HCI_MGMT, &hdev->dev_flags))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1012 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 rp->status); 1013 ACL_LINK, 0, rp->status);
973 1014
974 hci_dev_unlock(hdev); 1015 hci_dev_unlock(hdev);
975} 1016}
@@ -982,9 +1023,9 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
982 1023
983 hci_dev_lock(hdev); 1024 hci_dev_lock(hdev);
984 1025
985 if (test_bit(HCI_MGMT, &hdev->flags)) 1026 if (test_bit(HCI_MGMT, &hdev->dev_flags))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, 1027 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
987 rp->status); 1028 0, rp->status);
988 1029
989 hci_dev_unlock(hdev); 1030 hci_dev_unlock(hdev);
990} 1031}
@@ -998,9 +1039,9 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
998 1039
999 hci_dev_lock(hdev); 1040 hci_dev_lock(hdev);
1000 1041
1001 if (test_bit(HCI_MGMT, &hdev->flags)) 1042 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1043 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 rp->status); 1044 ACL_LINK, 0, rp->status);
1004 1045
1005 hci_dev_unlock(hdev); 1046 hci_dev_unlock(hdev);
1006} 1047}
@@ -1023,6 +1064,15 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1023 __u8 status = *((__u8 *) skb->data); 1064 __u8 status = *((__u8 *) skb->data);
1024 1065
1025 BT_DBG("%s status 0x%x", hdev->name, status); 1066 BT_DBG("%s status 0x%x", hdev->name, status);
1067
1068 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1069
1070 if (status) {
1071 hci_dev_lock(hdev);
1072 mgmt_start_discovery_failed(hdev, status);
1073 hci_dev_unlock(hdev);
1074 return;
1075 }
1026} 1076}
1027 1077
1028static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1078static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -1033,28 +1083,47 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1033 1083
1034 BT_DBG("%s status 0x%x", hdev->name, status); 1084 BT_DBG("%s status 0x%x", hdev->name, status);
1035 1085
1036 if (status)
1037 return;
1038
1039 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1086 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1040 if (!cp) 1087 if (!cp)
1041 return; 1088 return;
1042 1089
1043 switch (cp->enable) { 1090 switch (cp->enable) {
1044 case LE_SCANNING_ENABLED: 1091 case LE_SCANNING_ENABLED:
1045 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1092 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1093
1094 if (status) {
1095 hci_dev_lock(hdev);
1096 mgmt_start_discovery_failed(hdev, status);
1097 hci_dev_unlock(hdev);
1098 return;
1099 }
1046 1100
1047 cancel_delayed_work_sync(&hdev->adv_work); 1101 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1048 1102
1049 hci_dev_lock(hdev); 1103 hci_dev_lock(hdev);
1050 hci_adv_entries_clear(hdev); 1104 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1051 hci_dev_unlock(hdev); 1105 hci_dev_unlock(hdev);
1052 break; 1106 break;
1053 1107
1054 case LE_SCANNING_DISABLED: 1108 case LE_SCANNING_DISABLED:
1109 if (status) {
1110 hci_dev_lock(hdev);
1111 mgmt_stop_discovery_failed(hdev, status);
1112 hci_dev_unlock(hdev);
1113 return;
1114 }
1115
1055 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1116 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1056 1117
1057 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT); 1118 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1119 hdev->discovery.state == DISCOVERY_FINDING) {
1120 mgmt_interleaved_discovery(hdev);
1121 } else {
1122 hci_dev_lock(hdev);
1123 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1124 hci_dev_unlock(hdev);
1125 }
1126
1058 break; 1127 break;
1059 1128
1060 default: 1129 default:
@@ -1090,16 +1159,27 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1090static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1159static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1091 struct sk_buff *skb) 1160 struct sk_buff *skb)
1092{ 1161{
1093 struct hci_cp_read_local_ext_features cp; 1162 struct hci_cp_write_le_host_supported *sent;
1094 __u8 status = *((__u8 *) skb->data); 1163 __u8 status = *((__u8 *) skb->data);
1095 1164
1096 BT_DBG("%s status 0x%x", hdev->name, status); 1165 BT_DBG("%s status 0x%x", hdev->name, status);
1097 1166
1098 if (status) 1167 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1168 if (!sent)
1099 return; 1169 return;
1100 1170
1101 cp.page = 0x01; 1171 if (!status) {
1102 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp); 1172 if (sent->le)
1173 hdev->host_features[0] |= LMP_HOST_LE;
1174 else
1175 hdev->host_features[0] &= ~LMP_HOST_LE;
1176 }
1177
1178 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1179 !test_bit(HCI_INIT, &hdev->flags))
1180 mgmt_le_enable_complete(hdev, sent->le, status);
1181
1182 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1103} 1183}
1104 1184
1105static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1185static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
@@ -1110,7 +1190,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1110 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1190 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1111 hci_conn_check_pending(hdev); 1191 hci_conn_check_pending(hdev);
1112 hci_dev_lock(hdev); 1192 hci_dev_lock(hdev);
1113 if (test_bit(HCI_MGMT, &hdev->flags)) 1193 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1114 mgmt_start_discovery_failed(hdev, status); 1194 mgmt_start_discovery_failed(hdev, status);
1115 hci_dev_unlock(hdev); 1195 hci_dev_unlock(hdev);
1116 return; 1196 return;
@@ -1119,7 +1199,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1119 set_bit(HCI_INQUIRY, &hdev->flags); 1199 set_bit(HCI_INQUIRY, &hdev->flags);
1120 1200
1121 hci_dev_lock(hdev); 1201 hci_dev_lock(hdev);
1122 mgmt_discovering(hdev, 1); 1202 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1123 hci_dev_unlock(hdev); 1203 hci_dev_unlock(hdev);
1124} 1204}
1125 1205
@@ -1153,7 +1233,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1153 if (!conn) { 1233 if (!conn) {
1154 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 1234 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1155 if (conn) { 1235 if (conn) {
1156 conn->out = 1; 1236 conn->out = true;
1157 conn->link_mode |= HCI_LM_MASTER; 1237 conn->link_mode |= HCI_LM_MASTER;
1158 } else 1238 } else
1159 BT_ERR("No memory for new connection"); 1239 BT_ERR("No memory for new connection");
@@ -1263,7 +1343,7 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1263 1343
1264 /* Only request authentication for SSP connections or non-SSP 1344 /* Only request authentication for SSP connections or non-SSP
1265 * devices with sec_level HIGH or if MITM protection is requested */ 1345 * devices with sec_level HIGH or if MITM protection is requested */
1266 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 1346 if (!hci_conn_ssp_enabled(conn) &&
1267 conn->pending_sec_level != BT_SECURITY_HIGH && 1347 conn->pending_sec_level != BT_SECURITY_HIGH &&
1268 !(conn->auth_type & 0x01)) 1348 !(conn->auth_type & 0x01))
1269 return 0; 1349 return 0;
@@ -1271,6 +1351,73 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1271 return 1; 1351 return 1;
1272} 1352}
1273 1353
1354static inline int hci_resolve_name(struct hci_dev *hdev,
1355 struct inquiry_entry *e)
1356{
1357 struct hci_cp_remote_name_req cp;
1358
1359 memset(&cp, 0, sizeof(cp));
1360
1361 bacpy(&cp.bdaddr, &e->data.bdaddr);
1362 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1363 cp.pscan_mode = e->data.pscan_mode;
1364 cp.clock_offset = e->data.clock_offset;
1365
1366 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1367}
1368
1369static bool hci_resolve_next_name(struct hci_dev *hdev)
1370{
1371 struct discovery_state *discov = &hdev->discovery;
1372 struct inquiry_entry *e;
1373
1374 if (list_empty(&discov->resolve))
1375 return false;
1376
1377 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1378 if (hci_resolve_name(hdev, e) == 0) {
1379 e->name_state = NAME_PENDING;
1380 return true;
1381 }
1382
1383 return false;
1384}
1385
1386static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1387 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1388{
1389 struct discovery_state *discov = &hdev->discovery;
1390 struct inquiry_entry *e;
1391
1392 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1393 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1394 name_len, conn->dev_class);
1395
1396 if (discov->state == DISCOVERY_STOPPED)
1397 return;
1398
1399 if (discov->state == DISCOVERY_STOPPING)
1400 goto discov_complete;
1401
1402 if (discov->state != DISCOVERY_RESOLVING)
1403 return;
1404
1405 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1406 if (e) {
1407 e->name_state = NAME_KNOWN;
1408 list_del(&e->list);
1409 if (name)
1410 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1411 e->data.rssi, name, name_len);
1412 }
1413
1414 if (hci_resolve_next_name(hdev))
1415 return;
1416
1417discov_complete:
1418 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1419}
1420
1274static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1421static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1275{ 1422{
1276 struct hci_cp_remote_name_req *cp; 1423 struct hci_cp_remote_name_req *cp;
@@ -1290,13 +1437,17 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1290 hci_dev_lock(hdev); 1437 hci_dev_lock(hdev);
1291 1438
1292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1439 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1440
1441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1442 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1443
1293 if (!conn) 1444 if (!conn)
1294 goto unlock; 1445 goto unlock;
1295 1446
1296 if (!hci_outgoing_auth_needed(hdev, conn)) 1447 if (!hci_outgoing_auth_needed(hdev, conn))
1297 goto unlock; 1448 goto unlock;
1298 1449
1299 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 1450 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1300 struct hci_cp_auth_requested cp; 1451 struct hci_cp_auth_requested cp;
1301 cp.handle = __cpu_to_le16(conn->handle); 1452 cp.handle = __cpu_to_le16(conn->handle);
1302 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1453 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
@@ -1413,9 +1564,9 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1413 1564
1414 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1565 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1415 if (conn) { 1566 if (conn) {
1416 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1567 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1417 1568
1418 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1569 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1419 hci_sco_setup(conn, status); 1570 hci_sco_setup(conn, status);
1420 } 1571 }
1421 1572
@@ -1440,15 +1591,37 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1440 1591
1441 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1592 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1442 if (conn) { 1593 if (conn) {
1443 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1594 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1444 1595
1445 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1596 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1446 hci_sco_setup(conn, status); 1597 hci_sco_setup(conn, status);
1447 } 1598 }
1448 1599
1449 hci_dev_unlock(hdev); 1600 hci_dev_unlock(hdev);
1450} 1601}
1451 1602
1603static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1604{
1605 struct hci_cp_disconnect *cp;
1606 struct hci_conn *conn;
1607
1608 if (!status)
1609 return;
1610
1611 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1612 if (!cp)
1613 return;
1614
1615 hci_dev_lock(hdev);
1616
1617 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1618 if (conn)
1619 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1620 conn->dst_type, status);
1621
1622 hci_dev_unlock(hdev);
1623}
1624
1452static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1625static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1453{ 1626{
1454 struct hci_cp_le_create_conn *cp; 1627 struct hci_cp_le_create_conn *cp;
@@ -1470,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1470 if (status) { 1643 if (status) {
1471 if (conn && conn->state == BT_CONNECT) { 1644 if (conn && conn->state == BT_CONNECT) {
1472 conn->state = BT_CLOSED; 1645 conn->state = BT_CLOSED;
1646 mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
1647 conn->dst_type, status);
1473 hci_proto_connect_cfm(conn, status); 1648 hci_proto_connect_cfm(conn, status);
1474 hci_conn_del(conn); 1649 hci_conn_del(conn);
1475 } 1650 }
@@ -1478,7 +1653,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1478 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1653 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1479 if (conn) { 1654 if (conn) {
1480 conn->dst_type = cp->peer_addr_type; 1655 conn->dst_type = cp->peer_addr_type;
1481 conn->out = 1; 1656 conn->out = true;
1482 } else { 1657 } else {
1483 BT_ERR("No memory for new connection"); 1658 BT_ERR("No memory for new connection");
1484 } 1659 }
@@ -1496,6 +1671,8 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1496static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1671static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1497{ 1672{
1498 __u8 status = *((__u8 *) skb->data); 1673 __u8 status = *((__u8 *) skb->data);
1674 struct discovery_state *discov = &hdev->discovery;
1675 struct inquiry_entry *e;
1499 1676
1500 BT_DBG("%s status %d", hdev->name, status); 1677 BT_DBG("%s status %d", hdev->name, status);
1501 1678
@@ -1506,8 +1683,28 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
1506 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1683 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1507 return; 1684 return;
1508 1685
1686 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1687 return;
1688
1509 hci_dev_lock(hdev); 1689 hci_dev_lock(hdev);
1510 mgmt_discovering(hdev, 0); 1690
1691 if (discov->state != DISCOVERY_FINDING)
1692 goto unlock;
1693
1694 if (list_empty(&discov->resolve)) {
1695 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1696 goto unlock;
1697 }
1698
1699 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1700 if (e && hci_resolve_name(hdev, e) == 0) {
1701 e->name_state = NAME_PENDING;
1702 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1703 } else {
1704 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1705 }
1706
1707unlock:
1511 hci_dev_unlock(hdev); 1708 hci_dev_unlock(hdev);
1512} 1709}
1513 1710
@@ -1522,9 +1719,14 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1522 if (!num_rsp) 1719 if (!num_rsp)
1523 return; 1720 return;
1524 1721
1722 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1723 return;
1724
1525 hci_dev_lock(hdev); 1725 hci_dev_lock(hdev);
1526 1726
1527 for (; num_rsp; num_rsp--, info++) { 1727 for (; num_rsp; num_rsp--, info++) {
1728 bool name_known, ssp;
1729
1528 bacpy(&data.bdaddr, &info->bdaddr); 1730 bacpy(&data.bdaddr, &info->bdaddr);
1529 data.pscan_rep_mode = info->pscan_rep_mode; 1731 data.pscan_rep_mode = info->pscan_rep_mode;
1530 data.pscan_period_mode = info->pscan_period_mode; 1732 data.pscan_period_mode = info->pscan_period_mode;
@@ -1533,9 +1735,11 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1533 data.clock_offset = info->clock_offset; 1735 data.clock_offset = info->clock_offset;
1534 data.rssi = 0x00; 1736 data.rssi = 0x00;
1535 data.ssp_mode = 0x00; 1737 data.ssp_mode = 0x00;
1536 hci_inquiry_cache_update(hdev, &data); 1738
1739 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1537 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 1740 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1538 info->dev_class, 0, NULL); 1741 info->dev_class, 0, !name_known, ssp, NULL,
1742 0);
1539 } 1743 }
1540 1744
1541 hci_dev_unlock(hdev); 1745 hci_dev_unlock(hdev);
@@ -1569,8 +1773,6 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1569 conn->state = BT_CONFIG; 1773 conn->state = BT_CONFIG;
1570 hci_conn_hold(conn); 1774 hci_conn_hold(conn);
1571 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1775 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1572 mgmt_connected(hdev, &ev->bdaddr, conn->type,
1573 conn->dst_type);
1574 } else 1776 } else
1575 conn->state = BT_CONNECTED; 1777 conn->state = BT_CONNECTED;
1576 1778
@@ -1588,7 +1790,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1588 struct hci_cp_read_remote_features cp; 1790 struct hci_cp_read_remote_features cp;
1589 cp.handle = ev->handle; 1791 cp.handle = ev->handle;
1590 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1792 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1591 sizeof(cp), &cp); 1793 sizeof(cp), &cp);
1592 } 1794 }
1593 1795
1594 /* Set packet type for incoming connection */ 1796 /* Set packet type for incoming connection */
@@ -1596,14 +1798,14 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1596 struct hci_cp_change_conn_ptype cp; 1798 struct hci_cp_change_conn_ptype cp;
1597 cp.handle = ev->handle; 1799 cp.handle = ev->handle;
1598 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1800 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1599 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1801 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1600 sizeof(cp), &cp); 1802 &cp);
1601 } 1803 }
1602 } else { 1804 } else {
1603 conn->state = BT_CLOSED; 1805 conn->state = BT_CLOSED;
1604 if (conn->type == ACL_LINK) 1806 if (conn->type == ACL_LINK)
1605 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, 1807 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1606 conn->dst_type, ev->status); 1808 conn->dst_type, ev->status);
1607 } 1809 }
1608 1810
1609 if (conn->type == ACL_LINK) 1811 if (conn->type == ACL_LINK)
@@ -1668,8 +1870,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1668 else 1870 else
1669 cp.role = 0x01; /* Remain slave */ 1871 cp.role = 0x01; /* Remain slave */
1670 1872
1671 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, 1873 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1672 sizeof(cp), &cp); 1874 &cp);
1673 } else { 1875 } else {
1674 struct hci_cp_accept_sync_conn_req cp; 1876 struct hci_cp_accept_sync_conn_req cp;
1675 1877
@@ -1683,7 +1885,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1683 cp.retrans_effort = 0xff; 1885 cp.retrans_effort = 0xff;
1684 1886
1685 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1887 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1686 sizeof(cp), &cp); 1888 sizeof(cp), &cp);
1687 } 1889 }
1688 } else { 1890 } else {
1689 /* Connection rejected */ 1891 /* Connection rejected */
@@ -1711,15 +1913,19 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1711 if (ev->status == 0) 1913 if (ev->status == 0)
1712 conn->state = BT_CLOSED; 1914 conn->state = BT_CLOSED;
1713 1915
1714 if (conn->type == ACL_LINK || conn->type == LE_LINK) { 1916 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1917 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1715 if (ev->status != 0) 1918 if (ev->status != 0)
1716 mgmt_disconnect_failed(hdev, &conn->dst, ev->status); 1919 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1920 conn->dst_type, ev->status);
1717 else 1921 else
1718 mgmt_disconnected(hdev, &conn->dst, conn->type, 1922 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1719 conn->dst_type); 1923 conn->dst_type);
1720 } 1924 }
1721 1925
1722 if (ev->status == 0) { 1926 if (ev->status == 0) {
1927 if (conn->type == ACL_LINK && conn->flush_key)
1928 hci_remove_link_key(hdev, &conn->dst);
1723 hci_proto_disconn_cfm(conn, ev->reason); 1929 hci_proto_disconn_cfm(conn, ev->reason);
1724 hci_conn_del(conn); 1930 hci_conn_del(conn);
1725 } 1931 }
@@ -1742,22 +1948,23 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1742 goto unlock; 1948 goto unlock;
1743 1949
1744 if (!ev->status) { 1950 if (!ev->status) {
1745 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) && 1951 if (!hci_conn_ssp_enabled(conn) &&
1746 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) { 1952 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1747 BT_INFO("re-auth of legacy device is not possible."); 1953 BT_INFO("re-auth of legacy device is not possible.");
1748 } else { 1954 } else {
1749 conn->link_mode |= HCI_LM_AUTH; 1955 conn->link_mode |= HCI_LM_AUTH;
1750 conn->sec_level = conn->pending_sec_level; 1956 conn->sec_level = conn->pending_sec_level;
1751 } 1957 }
1752 } else { 1958 } else {
1753 mgmt_auth_failed(hdev, &conn->dst, ev->status); 1959 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1960 ev->status);
1754 } 1961 }
1755 1962
1756 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1963 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1757 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend); 1964 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1758 1965
1759 if (conn->state == BT_CONFIG) { 1966 if (conn->state == BT_CONFIG) {
1760 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) { 1967 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1761 struct hci_cp_set_conn_encrypt cp; 1968 struct hci_cp_set_conn_encrypt cp;
1762 cp.handle = ev->handle; 1969 cp.handle = ev->handle;
1763 cp.encrypt = 0x01; 1970 cp.encrypt = 0x01;
@@ -1776,7 +1983,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1776 hci_conn_put(conn); 1983 hci_conn_put(conn);
1777 } 1984 }
1778 1985
1779 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1986 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1780 if (!ev->status) { 1987 if (!ev->status) {
1781 struct hci_cp_set_conn_encrypt cp; 1988 struct hci_cp_set_conn_encrypt cp;
1782 cp.handle = ev->handle; 1989 cp.handle = ev->handle;
@@ -1784,7 +1991,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1784 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1991 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1785 &cp); 1992 &cp);
1786 } else { 1993 } else {
1787 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1994 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1788 hci_encrypt_cfm(conn, ev->status, 0x00); 1995 hci_encrypt_cfm(conn, ev->status, 0x00);
1789 } 1996 }
1790 } 1997 }
@@ -1804,17 +2011,25 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
1804 2011
1805 hci_dev_lock(hdev); 2012 hci_dev_lock(hdev);
1806 2013
1807 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1808 mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1809
1810 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2014 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2015
2016 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2017 goto check_auth;
2018
2019 if (ev->status == 0)
2020 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2021 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2022 else
2023 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2024
2025check_auth:
1811 if (!conn) 2026 if (!conn)
1812 goto unlock; 2027 goto unlock;
1813 2028
1814 if (!hci_outgoing_auth_needed(hdev, conn)) 2029 if (!hci_outgoing_auth_needed(hdev, conn))
1815 goto unlock; 2030 goto unlock;
1816 2031
1817 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 2032 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1818 struct hci_cp_auth_requested cp; 2033 struct hci_cp_auth_requested cp;
1819 cp.handle = __cpu_to_le16(conn->handle); 2034 cp.handle = __cpu_to_le16(conn->handle);
1820 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2035 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
@@ -1845,7 +2060,13 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
1845 conn->link_mode &= ~HCI_LM_ENCRYPT; 2060 conn->link_mode &= ~HCI_LM_ENCRYPT;
1846 } 2061 }
1847 2062
1848 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 2063 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2064
2065 if (ev->status && conn->state == BT_CONNECTED) {
2066 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2067 hci_conn_put(conn);
2068 goto unlock;
2069 }
1849 2070
1850 if (conn->state == BT_CONFIG) { 2071 if (conn->state == BT_CONFIG) {
1851 if (!ev->status) 2072 if (!ev->status)
@@ -1857,6 +2078,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
1857 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 2078 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1858 } 2079 }
1859 2080
2081unlock:
1860 hci_dev_unlock(hdev); 2082 hci_dev_unlock(hdev);
1861} 2083}
1862 2084
@@ -1874,7 +2096,7 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
1874 if (!ev->status) 2096 if (!ev->status)
1875 conn->link_mode |= HCI_LM_SECURE; 2097 conn->link_mode |= HCI_LM_SECURE;
1876 2098
1877 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 2099 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1878 2100
1879 hci_key_change_cfm(conn, ev->status); 2101 hci_key_change_cfm(conn, ev->status);
1880 } 2102 }
@@ -1910,13 +2132,16 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
1910 goto unlock; 2132 goto unlock;
1911 } 2133 }
1912 2134
1913 if (!ev->status) { 2135 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
1914 struct hci_cp_remote_name_req cp; 2136 struct hci_cp_remote_name_req cp;
1915 memset(&cp, 0, sizeof(cp)); 2137 memset(&cp, 0, sizeof(cp));
1916 bacpy(&cp.bdaddr, &conn->dst); 2138 bacpy(&cp.bdaddr, &conn->dst);
1917 cp.pscan_rep_mode = 0x02; 2139 cp.pscan_rep_mode = 0x02;
1918 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2140 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1919 } 2141 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2142 mgmt_device_connected(hdev, &conn->dst, conn->type,
2143 conn->dst_type, 0, NULL, 0,
2144 conn->dev_class);
1920 2145
1921 if (!hci_outgoing_auth_needed(hdev, conn)) { 2146 if (!hci_outgoing_auth_needed(hdev, conn)) {
1922 conn->state = BT_CONNECTED; 2147 conn->state = BT_CONNECTED;
@@ -1952,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1952 hci_cc_inquiry_cancel(hdev, skb); 2177 hci_cc_inquiry_cancel(hdev, skb);
1953 break; 2178 break;
1954 2179
2180 case HCI_OP_PERIODIC_INQ:
2181 hci_cc_periodic_inq(hdev, skb);
2182 break;
2183
1955 case HCI_OP_EXIT_PERIODIC_INQ: 2184 case HCI_OP_EXIT_PERIODIC_INQ:
1956 hci_cc_exit_periodic_inq(hdev, skb); 2185 hci_cc_exit_periodic_inq(hdev, skb);
1957 break; 2186 break;
@@ -2024,10 +2253,6 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2024 hci_cc_host_buffer_size(hdev, skb); 2253 hci_cc_host_buffer_size(hdev, skb);
2025 break; 2254 break;
2026 2255
2027 case HCI_OP_READ_SSP_MODE:
2028 hci_cc_read_ssp_mode(hdev, skb);
2029 break;
2030
2031 case HCI_OP_WRITE_SSP_MODE: 2256 case HCI_OP_WRITE_SSP_MODE:
2032 hci_cc_write_ssp_mode(hdev, skb); 2257 hci_cc_write_ssp_mode(hdev, skb);
2033 break; 2258 break;
@@ -2122,6 +2347,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2122 2347
2123 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2348 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2124 hci_cc_user_passkey_neg_reply(hdev, skb); 2349 hci_cc_user_passkey_neg_reply(hdev, skb);
2350 break;
2125 2351
2126 case HCI_OP_LE_SET_SCAN_PARAM: 2352 case HCI_OP_LE_SET_SCAN_PARAM:
2127 hci_cc_le_set_scan_param(hdev, skb); 2353 hci_cc_le_set_scan_param(hdev, skb);
@@ -2213,8 +2439,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2213 break; 2439 break;
2214 2440
2215 case HCI_OP_DISCONNECT: 2441 case HCI_OP_DISCONNECT:
2216 if (ev->status != 0) 2442 hci_cs_disconnect(hdev, ev->status);
2217 mgmt_disconnect_failed(hdev, NULL, ev->status);
2218 break; 2443 break;
2219 2444
2220 case HCI_OP_LE_CREATE_CONN: 2445 case HCI_OP_LE_CREATE_CONN:
@@ -2258,7 +2483,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2258 conn->link_mode |= HCI_LM_MASTER; 2483 conn->link_mode |= HCI_LM_MASTER;
2259 } 2484 }
2260 2485
2261 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); 2486 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2262 2487
2263 hci_role_switch_cfm(conn, ev->status, ev->role); 2488 hci_role_switch_cfm(conn, ev->status, ev->role);
2264 } 2489 }
@@ -2332,6 +2557,56 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2332 queue_work(hdev->workqueue, &hdev->tx_work); 2557 queue_work(hdev->workqueue, &hdev->tx_work);
2333} 2558}
2334 2559
2560static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2561 struct sk_buff *skb)
2562{
2563 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2564 int i;
2565
2566 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2567 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2568 return;
2569 }
2570
2571 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2572 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2573 BT_DBG("%s bad parameters", hdev->name);
2574 return;
2575 }
2576
2577 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2578 ev->num_hndl);
2579
2580 for (i = 0; i < ev->num_hndl; i++) {
2581 struct hci_comp_blocks_info *info = &ev->handles[i];
2582 struct hci_conn *conn;
2583 __u16 handle, block_count;
2584
2585 handle = __le16_to_cpu(info->handle);
2586 block_count = __le16_to_cpu(info->blocks);
2587
2588 conn = hci_conn_hash_lookup_handle(hdev, handle);
2589 if (!conn)
2590 continue;
2591
2592 conn->sent -= block_count;
2593
2594 switch (conn->type) {
2595 case ACL_LINK:
2596 hdev->block_cnt += block_count;
2597 if (hdev->block_cnt > hdev->num_blocks)
2598 hdev->block_cnt = hdev->num_blocks;
2599 break;
2600
2601 default:
2602 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2603 break;
2604 }
2605 }
2606
2607 queue_work(hdev->workqueue, &hdev->tx_work);
2608}
2609
2335static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2610static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2336{ 2611{
2337 struct hci_ev_mode_change *ev = (void *) skb->data; 2612 struct hci_ev_mode_change *ev = (void *) skb->data;
@@ -2346,14 +2621,14 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2346 conn->mode = ev->mode; 2621 conn->mode = ev->mode;
2347 conn->interval = __le16_to_cpu(ev->interval); 2622 conn->interval = __le16_to_cpu(ev->interval);
2348 2623
2349 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 2624 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2350 if (conn->mode == HCI_CM_ACTIVE) 2625 if (conn->mode == HCI_CM_ACTIVE)
2351 conn->power_save = 1; 2626 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2352 else 2627 else
2353 conn->power_save = 0; 2628 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2354 } 2629 }
2355 2630
2356 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 2631 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2357 hci_sco_setup(conn, ev->status); 2632 hci_sco_setup(conn, ev->status);
2358 } 2633 }
2359 2634
@@ -2379,10 +2654,10 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2379 hci_conn_put(conn); 2654 hci_conn_put(conn);
2380 } 2655 }
2381 2656
2382 if (!test_bit(HCI_PAIRABLE, &hdev->flags)) 2657 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2383 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2658 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2384 sizeof(ev->bdaddr), &ev->bdaddr); 2659 sizeof(ev->bdaddr), &ev->bdaddr);
2385 else if (test_bit(HCI_MGMT, &hdev->flags)) { 2660 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2386 u8 secure; 2661 u8 secure;
2387 2662
2388 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2663 if (conn->pending_sec_level == BT_SECURITY_HIGH)
@@ -2406,7 +2681,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2406 2681
2407 BT_DBG("%s", hdev->name); 2682 BT_DBG("%s", hdev->name);
2408 2683
2409 if (!test_bit(HCI_LINK_KEYS, &hdev->flags)) 2684 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2410 return; 2685 return;
2411 2686
2412 hci_dev_lock(hdev); 2687 hci_dev_lock(hdev);
@@ -2421,7 +2696,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2421 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2696 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2422 batostr(&ev->bdaddr)); 2697 batostr(&ev->bdaddr));
2423 2698
2424 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && 2699 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2425 key->type == HCI_LK_DEBUG_COMBINATION) { 2700 key->type == HCI_LK_DEBUG_COMBINATION) {
2426 BT_DBG("%s ignoring debug key", hdev->name); 2701 BT_DBG("%s ignoring debug key", hdev->name);
2427 goto not_found; 2702 goto not_found;
@@ -2483,7 +2758,7 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2483 hci_conn_put(conn); 2758 hci_conn_put(conn);
2484 } 2759 }
2485 2760
2486 if (test_bit(HCI_LINK_KEYS, &hdev->flags)) 2761 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2487 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2762 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2488 ev->key_type, pin_len); 2763 ev->key_type, pin_len);
2489 2764
@@ -2551,12 +2826,16 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2551{ 2826{
2552 struct inquiry_data data; 2827 struct inquiry_data data;
2553 int num_rsp = *((__u8 *) skb->data); 2828 int num_rsp = *((__u8 *) skb->data);
2829 bool name_known, ssp;
2554 2830
2555 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2831 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2556 2832
2557 if (!num_rsp) 2833 if (!num_rsp)
2558 return; 2834 return;
2559 2835
2836 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2837 return;
2838
2560 hci_dev_lock(hdev); 2839 hci_dev_lock(hdev);
2561 2840
2562 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2841 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
@@ -2572,10 +2851,12 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2572 data.clock_offset = info->clock_offset; 2851 data.clock_offset = info->clock_offset;
2573 data.rssi = info->rssi; 2852 data.rssi = info->rssi;
2574 data.ssp_mode = 0x00; 2853 data.ssp_mode = 0x00;
2575 hci_inquiry_cache_update(hdev, &data); 2854
2855 name_known = hci_inquiry_cache_update(hdev, &data,
2856 false, &ssp);
2576 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2857 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2577 info->dev_class, info->rssi, 2858 info->dev_class, info->rssi,
2578 NULL); 2859 !name_known, ssp, NULL, 0);
2579 } 2860 }
2580 } else { 2861 } else {
2581 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2862 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -2589,10 +2870,11 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2589 data.clock_offset = info->clock_offset; 2870 data.clock_offset = info->clock_offset;
2590 data.rssi = info->rssi; 2871 data.rssi = info->rssi;
2591 data.ssp_mode = 0x00; 2872 data.ssp_mode = 0x00;
2592 hci_inquiry_cache_update(hdev, &data); 2873 name_known = hci_inquiry_cache_update(hdev, &data,
2874 false, &ssp);
2593 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2875 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2594 info->dev_class, info->rssi, 2876 info->dev_class, info->rssi,
2595 NULL); 2877 !name_known, ssp, NULL, 0);
2596 } 2878 }
2597 } 2879 }
2598 2880
@@ -2617,21 +2899,25 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
2617 2899
2618 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2900 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2619 if (ie) 2901 if (ie)
2620 ie->data.ssp_mode = (ev->features[0] & 0x01); 2902 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2621 2903
2622 conn->ssp_mode = (ev->features[0] & 0x01); 2904 if (ev->features[0] & LMP_HOST_SSP)
2905 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2623 } 2906 }
2624 2907
2625 if (conn->state != BT_CONFIG) 2908 if (conn->state != BT_CONFIG)
2626 goto unlock; 2909 goto unlock;
2627 2910
2628 if (!ev->status) { 2911 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2629 struct hci_cp_remote_name_req cp; 2912 struct hci_cp_remote_name_req cp;
2630 memset(&cp, 0, sizeof(cp)); 2913 memset(&cp, 0, sizeof(cp));
2631 bacpy(&cp.bdaddr, &conn->dst); 2914 bacpy(&cp.bdaddr, &conn->dst);
2632 cp.pscan_rep_mode = 0x02; 2915 cp.pscan_rep_mode = 0x02;
2633 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2916 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2634 } 2917 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2918 mgmt_device_connected(hdev, &conn->dst, conn->type,
2919 conn->dst_type, 0, NULL, 0,
2920 conn->dev_class);
2635 2921
2636 if (!hci_outgoing_auth_needed(hdev, conn)) { 2922 if (!hci_outgoing_auth_needed(hdev, conn)) {
2637 conn->state = BT_CONNECTED; 2923 conn->state = BT_CONNECTED;
@@ -2715,15 +3001,21 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2715 struct inquiry_data data; 3001 struct inquiry_data data;
2716 struct extended_inquiry_info *info = (void *) (skb->data + 1); 3002 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2717 int num_rsp = *((__u8 *) skb->data); 3003 int num_rsp = *((__u8 *) skb->data);
3004 size_t eir_len;
2718 3005
2719 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3006 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2720 3007
2721 if (!num_rsp) 3008 if (!num_rsp)
2722 return; 3009 return;
2723 3010
3011 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3012 return;
3013
2724 hci_dev_lock(hdev); 3014 hci_dev_lock(hdev);
2725 3015
2726 for (; num_rsp; num_rsp--, info++) { 3016 for (; num_rsp; num_rsp--, info++) {
3017 bool name_known, ssp;
3018
2727 bacpy(&data.bdaddr, &info->bdaddr); 3019 bacpy(&data.bdaddr, &info->bdaddr);
2728 data.pscan_rep_mode = info->pscan_rep_mode; 3020 data.pscan_rep_mode = info->pscan_rep_mode;
2729 data.pscan_period_mode = info->pscan_period_mode; 3021 data.pscan_period_mode = info->pscan_period_mode;
@@ -2732,9 +3024,20 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2732 data.clock_offset = info->clock_offset; 3024 data.clock_offset = info->clock_offset;
2733 data.rssi = info->rssi; 3025 data.rssi = info->rssi;
2734 data.ssp_mode = 0x01; 3026 data.ssp_mode = 0x01;
2735 hci_inquiry_cache_update(hdev, &data); 3027
3028 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3029 name_known = eir_has_data_type(info->data,
3030 sizeof(info->data),
3031 EIR_NAME_COMPLETE);
3032 else
3033 name_known = true;
3034
3035 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3036 &ssp);
3037 eir_len = eir_get_length(info->data, sizeof(info->data));
2736 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3038 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2737 info->dev_class, info->rssi, info->data); 3039 info->dev_class, info->rssi, !name_known,
3040 ssp, info->data, eir_len);
2738 } 3041 }
2739 3042
2740 hci_dev_unlock(hdev); 3043 hci_dev_unlock(hdev);
@@ -2774,19 +3077,22 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2774 3077
2775 hci_conn_hold(conn); 3078 hci_conn_hold(conn);
2776 3079
2777 if (!test_bit(HCI_MGMT, &hdev->flags)) 3080 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2778 goto unlock; 3081 goto unlock;
2779 3082
2780 if (test_bit(HCI_PAIRABLE, &hdev->flags) || 3083 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
2781 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3084 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2782 struct hci_cp_io_capability_reply cp; 3085 struct hci_cp_io_capability_reply cp;
2783 3086
2784 bacpy(&cp.bdaddr, &ev->bdaddr); 3087 bacpy(&cp.bdaddr, &ev->bdaddr);
2785 cp.capability = conn->io_capability; 3088 /* Change the IO capability from KeyboardDisplay
3089 * to DisplayYesNo as it is not supported by BT spec. */
3090 cp.capability = (conn->io_capability == 0x04) ?
3091 0x01 : conn->io_capability;
2786 conn->auth_type = hci_get_auth_req(conn); 3092 conn->auth_type = hci_get_auth_req(conn);
2787 cp.authentication = conn->auth_type; 3093 cp.authentication = conn->auth_type;
2788 3094
2789 if ((conn->out == 0x01 || conn->remote_oob == 0x01) && 3095 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
2790 hci_find_remote_oob_data(hdev, &conn->dst)) 3096 hci_find_remote_oob_data(hdev, &conn->dst))
2791 cp.oob_data = 0x01; 3097 cp.oob_data = 0x01;
2792 else 3098 else
@@ -2822,8 +3128,9 @@ static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *s
2822 goto unlock; 3128 goto unlock;
2823 3129
2824 conn->remote_cap = ev->capability; 3130 conn->remote_cap = ev->capability;
2825 conn->remote_oob = ev->oob_data;
2826 conn->remote_auth = ev->authentication; 3131 conn->remote_auth = ev->authentication;
3132 if (ev->oob_data)
3133 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
2827 3134
2828unlock: 3135unlock:
2829 hci_dev_unlock(hdev); 3136 hci_dev_unlock(hdev);
@@ -2840,7 +3147,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2840 3147
2841 hci_dev_lock(hdev); 3148 hci_dev_lock(hdev);
2842 3149
2843 if (!test_bit(HCI_MGMT, &hdev->flags)) 3150 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2844 goto unlock; 3151 goto unlock;
2845 3152
2846 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3153 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -2869,7 +3176,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2869 /* If we're not the initiators request authorization to 3176 /* If we're not the initiators request authorization to
2870 * proceed from user space (mgmt_user_confirm with 3177 * proceed from user space (mgmt_user_confirm with
2871 * confirm_hint set to 1). */ 3178 * confirm_hint set to 1). */
2872 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 3179 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2873 BT_DBG("Confirming auto-accept as acceptor"); 3180 BT_DBG("Confirming auto-accept as acceptor");
2874 confirm_hint = 1; 3181 confirm_hint = 1;
2875 goto confirm; 3182 goto confirm;
@@ -2890,8 +3197,8 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2890 } 3197 }
2891 3198
2892confirm: 3199confirm:
2893 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey, 3200 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
2894 confirm_hint); 3201 confirm_hint);
2895 3202
2896unlock: 3203unlock:
2897 hci_dev_unlock(hdev); 3204 hci_dev_unlock(hdev);
@@ -2906,8 +3213,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
2906 3213
2907 hci_dev_lock(hdev); 3214 hci_dev_lock(hdev);
2908 3215
2909 if (test_bit(HCI_MGMT, &hdev->flags)) 3216 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2910 mgmt_user_passkey_request(hdev, &ev->bdaddr); 3217 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
2911 3218
2912 hci_dev_unlock(hdev); 3219 hci_dev_unlock(hdev);
2913} 3220}
@@ -2930,8 +3237,9 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
2930 * initiated the authentication. A traditional auth_complete 3237 * initiated the authentication. A traditional auth_complete
2931 * event gets always produced as initiator and is also mapped to 3238 * event gets always produced as initiator and is also mapped to
2932 * the mgmt_auth_failed event */ 3239 * the mgmt_auth_failed event */
2933 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) 3240 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
2934 mgmt_auth_failed(hdev, &conn->dst, ev->status); 3241 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3242 ev->status);
2935 3243
2936 hci_conn_put(conn); 3244 hci_conn_put(conn);
2937 3245
@@ -2950,13 +3258,13 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
2950 3258
2951 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3259 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2952 if (ie) 3260 if (ie)
2953 ie->data.ssp_mode = (ev->features[0] & 0x01); 3261 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2954 3262
2955 hci_dev_unlock(hdev); 3263 hci_dev_unlock(hdev);
2956} 3264}
2957 3265
2958static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3266static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2959 struct sk_buff *skb) 3267 struct sk_buff *skb)
2960{ 3268{
2961 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3269 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2962 struct oob_data *data; 3270 struct oob_data *data;
@@ -2965,7 +3273,7 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2965 3273
2966 hci_dev_lock(hdev); 3274 hci_dev_lock(hdev);
2967 3275
2968 if (!test_bit(HCI_MGMT, &hdev->flags)) 3276 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2969 goto unlock; 3277 goto unlock;
2970 3278
2971 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 3279 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
@@ -3020,7 +3328,9 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3020 goto unlock; 3328 goto unlock;
3021 } 3329 }
3022 3330
3023 mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type); 3331 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3332 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3333 conn->dst_type, 0, NULL, 0, NULL);
3024 3334
3025 conn->sec_level = BT_SECURITY_LOW; 3335 conn->sec_level = BT_SECURITY_LOW;
3026 conn->handle = __le16_to_cpu(ev->handle); 3336 conn->handle = __le16_to_cpu(ev->handle);
@@ -3040,13 +3350,16 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3040{ 3350{
3041 u8 num_reports = skb->data[0]; 3351 u8 num_reports = skb->data[0];
3042 void *ptr = &skb->data[1]; 3352 void *ptr = &skb->data[1];
3353 s8 rssi;
3043 3354
3044 hci_dev_lock(hdev); 3355 hci_dev_lock(hdev);
3045 3356
3046 while (num_reports--) { 3357 while (num_reports--) {
3047 struct hci_ev_le_advertising_info *ev = ptr; 3358 struct hci_ev_le_advertising_info *ev = ptr;
3048 3359
3049 hci_add_adv_entry(hdev, ev); 3360 rssi = ev->data[ev->length];
3361 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3362 NULL, rssi, 0, 1, ev->data, ev->length);
3050 3363
3051 ptr += sizeof(*ev) + ev->length + 1; 3364 ptr += sizeof(*ev) + ev->length + 1;
3052 } 3365 }
@@ -3061,9 +3374,9 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3061 struct hci_cp_le_ltk_reply cp; 3374 struct hci_cp_le_ltk_reply cp;
3062 struct hci_cp_le_ltk_neg_reply neg; 3375 struct hci_cp_le_ltk_neg_reply neg;
3063 struct hci_conn *conn; 3376 struct hci_conn *conn;
3064 struct link_key *ltk; 3377 struct smp_ltk *ltk;
3065 3378
3066 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); 3379 BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
3067 3380
3068 hci_dev_lock(hdev); 3381 hci_dev_lock(hdev);
3069 3382
@@ -3077,10 +3390,17 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3077 3390
3078 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 3391 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3079 cp.handle = cpu_to_le16(conn->handle); 3392 cp.handle = cpu_to_le16(conn->handle);
3080 conn->pin_length = ltk->pin_len; 3393
3394 if (ltk->authenticated)
3395 conn->sec_level = BT_SECURITY_HIGH;
3081 3396
3082 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3397 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3083 3398
3399 if (ltk->type & HCI_SMP_STK) {
3400 list_del(&ltk->list);
3401 kfree(ltk);
3402 }
3403
3084 hci_dev_unlock(hdev); 3404 hci_dev_unlock(hdev);
3085 3405
3086 return; 3406 return;
@@ -3271,6 +3591,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3271 hci_remote_oob_data_request_evt(hdev, skb); 3591 hci_remote_oob_data_request_evt(hdev, skb);
3272 break; 3592 break;
3273 3593
3594 case HCI_EV_NUM_COMP_BLOCKS:
3595 hci_num_comp_blocks_evt(hdev, skb);
3596 break;
3597
3274 default: 3598 default:
3275 BT_DBG("%s event 0x%x", hdev->name, event); 3599 BT_DBG("%s event 0x%x", hdev->name, event);
3276 break; 3600 break;
@@ -3279,34 +3603,3 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3279 kfree_skb(skb); 3603 kfree_skb(skb);
3280 hdev->stat.evt_rx++; 3604 hdev->stat.evt_rx++;
3281} 3605}
3282
3283/* Generate internal stack event */
3284void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3285{
3286 struct hci_event_hdr *hdr;
3287 struct hci_ev_stack_internal *ev;
3288 struct sk_buff *skb;
3289
3290 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3291 if (!skb)
3292 return;
3293
3294 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3295 hdr->evt = HCI_EV_STACK_INTERNAL;
3296 hdr->plen = sizeof(*ev) + dlen;
3297
3298 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3299 ev->type = type;
3300 memcpy(ev->data, data, dlen);
3301
3302 bt_cb(skb)->incoming = 1;
3303 __net_timestamp(skb);
3304
3305 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3306 skb->dev = (void *) hdev;
3307 hci_send_to_sock(hdev, skb, NULL);
3308 kfree_skb(skb);
3309}
3310
3311module_param(enable_le, bool, 0644);
3312MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 0dcc96266779..5914623f426a 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -42,14 +42,14 @@
42#include <linux/ioctl.h> 42#include <linux/ioctl.h>
43#include <net/sock.h> 43#include <net/sock.h>
44 44
45#include <asm/system.h>
46#include <linux/uaccess.h> 45#include <linux/uaccess.h>
47#include <asm/unaligned.h> 46#include <asm/unaligned.h>
48 47
49#include <net/bluetooth/bluetooth.h> 48#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 49#include <net/bluetooth/hci_core.h>
50#include <net/bluetooth/hci_mon.h>
51 51
52static bool enable_mgmt; 52static atomic_t monitor_promisc = ATOMIC_INIT(0);
53 53
54/* ----- HCI socket interface ----- */ 54/* ----- HCI socket interface ----- */
55 55
@@ -85,22 +85,20 @@ static struct bt_sock_list hci_sk_list = {
85}; 85};
86 86
87/* Send frame to RAW socket */ 87/* Send frame to RAW socket */
88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb, 88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
89 struct sock *skip_sk)
90{ 89{
91 struct sock *sk; 90 struct sock *sk;
92 struct hlist_node *node; 91 struct hlist_node *node;
92 struct sk_buff *skb_copy = NULL;
93 93
94 BT_DBG("hdev %p len %d", hdev, skb->len); 94 BT_DBG("hdev %p len %d", hdev, skb->len);
95 95
96 read_lock(&hci_sk_list.lock); 96 read_lock(&hci_sk_list.lock);
97
97 sk_for_each(sk, node, &hci_sk_list.head) { 98 sk_for_each(sk, node, &hci_sk_list.head) {
98 struct hci_filter *flt; 99 struct hci_filter *flt;
99 struct sk_buff *nskb; 100 struct sk_buff *nskb;
100 101
101 if (sk == skip_sk)
102 continue;
103
104 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) 102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
105 continue; 103 continue;
106 104
@@ -108,12 +106,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
108 if (skb->sk == sk) 106 if (skb->sk == sk)
109 continue; 107 continue;
110 108
111 if (bt_cb(skb)->channel != hci_pi(sk)->channel) 109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
112 continue; 110 continue;
113 111
114 if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
115 goto clone;
116
117 /* Apply filter */ 112 /* Apply filter */
118 flt = &hci_pi(sk)->filter; 113 flt = &hci_pi(sk)->filter;
119 114
@@ -137,21 +132,303 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
137 continue; 132 continue;
138 } 133 }
139 134
140clone: 135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
146 if (!nskb)
147 continue;
148
149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
152
153 read_unlock(&hci_sk_list.lock);
154
155 kfree_skb(skb_copy);
156}
157
158/* Send frame to control socket */
159void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160{
161 struct sock *sk;
162 struct hlist_node *node;
163
164 BT_DBG("len %d", skb->len);
165
166 read_lock(&hci_sk_list.lock);
167
168 sk_for_each(sk, node, &hci_sk_list.head) {
169 struct sk_buff *nskb;
170
171 /* Skip the original socket */
172 if (sk == skip_sk)
173 continue;
174
175 if (sk->sk_state != BT_BOUND)
176 continue;
177
178 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
179 continue;
180
141 nskb = skb_clone(skb, GFP_ATOMIC); 181 nskb = skb_clone(skb, GFP_ATOMIC);
142 if (!nskb) 182 if (!nskb)
143 continue; 183 continue;
144 184
145 /* Put type byte before the data */ 185 if (sock_queue_rcv_skb(sk, nskb))
146 if (bt_cb(skb)->channel == HCI_CHANNEL_RAW) 186 kfree_skb(nskb);
147 memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1); 187 }
188
189 read_unlock(&hci_sk_list.lock);
190}
191
192/* Send frame to monitor socket */
193void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
194{
195 struct sock *sk;
196 struct hlist_node *node;
197 struct sk_buff *skb_copy = NULL;
198 __le16 opcode;
199
200 if (!atomic_read(&monitor_promisc))
201 return;
202
203 BT_DBG("hdev %p len %d", hdev, skb->len);
204
205 switch (bt_cb(skb)->pkt_type) {
206 case HCI_COMMAND_PKT:
207 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
208 break;
209 case HCI_EVENT_PKT:
210 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
211 break;
212 case HCI_ACLDATA_PKT:
213 if (bt_cb(skb)->incoming)
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
215 else
216 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
217 break;
218 case HCI_SCODATA_PKT:
219 if (bt_cb(skb)->incoming)
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
221 else
222 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
223 break;
224 default:
225 return;
226 }
227
228 read_lock(&hci_sk_list.lock);
229
230 sk_for_each(sk, node, &hci_sk_list.head) {
231 struct sk_buff *nskb;
232
233 if (sk->sk_state != BT_BOUND)
234 continue;
235
236 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
237 continue;
238
239 if (!skb_copy) {
240 struct hci_mon_hdr *hdr;
241
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
244 if (!skb_copy)
245 continue;
246
247 /* Put header before the data */
248 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
249 hdr->opcode = opcode;
250 hdr->index = cpu_to_le16(hdev->id);
251 hdr->len = cpu_to_le16(skb->len);
252 }
253
254 nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 if (!nskb)
256 continue;
257
258 if (sock_queue_rcv_skb(sk, nskb))
259 kfree_skb(nskb);
260 }
261
262 read_unlock(&hci_sk_list.lock);
263
264 kfree_skb(skb_copy);
265}
266
267static void send_monitor_event(struct sk_buff *skb)
268{
269 struct sock *sk;
270 struct hlist_node *node;
271
272 BT_DBG("len %d", skb->len);
273
274 read_lock(&hci_sk_list.lock);
275
276 sk_for_each(sk, node, &hci_sk_list.head) {
277 struct sk_buff *nskb;
278
279 if (sk->sk_state != BT_BOUND)
280 continue;
281
282 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
283 continue;
284
285 nskb = skb_clone(skb, GFP_ATOMIC);
286 if (!nskb)
287 continue;
148 288
149 if (sock_queue_rcv_skb(sk, nskb)) 289 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb); 290 kfree_skb(nskb);
151 } 291 }
292
152 read_unlock(&hci_sk_list.lock); 293 read_unlock(&hci_sk_list.lock);
153} 294}
154 295
296static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
297{
298 struct hci_mon_hdr *hdr;
299 struct hci_mon_new_index *ni;
300 struct sk_buff *skb;
301 __le16 opcode;
302
303 switch (event) {
304 case HCI_DEV_REG:
305 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
306 if (!skb)
307 return NULL;
308
309 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
310 ni->type = hdev->dev_type;
311 ni->bus = hdev->bus;
312 bacpy(&ni->bdaddr, &hdev->bdaddr);
313 memcpy(ni->name, hdev->name, 8);
314
315 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
316 break;
317
318 case HCI_DEV_UNREG:
319 skb = bt_skb_alloc(0, GFP_ATOMIC);
320 if (!skb)
321 return NULL;
322
323 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
324 break;
325
326 default:
327 return NULL;
328 }
329
330 __net_timestamp(skb);
331
332 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
333 hdr->opcode = opcode;
334 hdr->index = cpu_to_le16(hdev->id);
335 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
336
337 return skb;
338}
339
340static void send_monitor_replay(struct sock *sk)
341{
342 struct hci_dev *hdev;
343
344 read_lock(&hci_dev_list_lock);
345
346 list_for_each_entry(hdev, &hci_dev_list, list) {
347 struct sk_buff *skb;
348
349 skb = create_monitor_event(hdev, HCI_DEV_REG);
350 if (!skb)
351 continue;
352
353 if (sock_queue_rcv_skb(sk, skb))
354 kfree_skb(skb);
355 }
356
357 read_unlock(&hci_dev_list_lock);
358}
359
360/* Generate internal stack event */
361static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
362{
363 struct hci_event_hdr *hdr;
364 struct hci_ev_stack_internal *ev;
365 struct sk_buff *skb;
366
367 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
368 if (!skb)
369 return;
370
371 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
372 hdr->evt = HCI_EV_STACK_INTERNAL;
373 hdr->plen = sizeof(*ev) + dlen;
374
375 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
376 ev->type = type;
377 memcpy(ev->data, data, dlen);
378
379 bt_cb(skb)->incoming = 1;
380 __net_timestamp(skb);
381
382 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
383 skb->dev = (void *) hdev;
384 hci_send_to_sock(hdev, skb);
385 kfree_skb(skb);
386}
387
388void hci_sock_dev_event(struct hci_dev *hdev, int event)
389{
390 struct hci_ev_si_device ev;
391
392 BT_DBG("hdev %s event %d", hdev->name, event);
393
394 /* Send event to monitor */
395 if (atomic_read(&monitor_promisc)) {
396 struct sk_buff *skb;
397
398 skb = create_monitor_event(hdev, event);
399 if (skb) {
400 send_monitor_event(skb);
401 kfree_skb(skb);
402 }
403 }
404
405 /* Send event to sockets */
406 ev.event = event;
407 ev.dev_id = hdev->id;
408 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
409
410 if (event == HCI_DEV_UNREG) {
411 struct sock *sk;
412 struct hlist_node *node;
413
414 /* Detach sockets from device */
415 read_lock(&hci_sk_list.lock);
416 sk_for_each(sk, node, &hci_sk_list.head) {
417 bh_lock_sock_nested(sk);
418 if (hci_pi(sk)->hdev == hdev) {
419 hci_pi(sk)->hdev = NULL;
420 sk->sk_err = EPIPE;
421 sk->sk_state = BT_OPEN;
422 sk->sk_state_change(sk);
423
424 hci_dev_put(hdev);
425 }
426 bh_unlock_sock(sk);
427 }
428 read_unlock(&hci_sk_list.lock);
429 }
430}
431
155static int hci_sock_release(struct socket *sock) 432static int hci_sock_release(struct socket *sock)
156{ 433{
157 struct sock *sk = sock->sk; 434 struct sock *sk = sock->sk;
@@ -164,6 +441,9 @@ static int hci_sock_release(struct socket *sock)
164 441
165 hdev = hci_pi(sk)->hdev; 442 hdev = hci_pi(sk)->hdev;
166 443
444 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
445 atomic_dec(&monitor_promisc);
446
167 bt_sock_unlink(&hci_sk_list, sk); 447 bt_sock_unlink(&hci_sk_list, sk);
168 448
169 if (hdev) { 449 if (hdev) {
@@ -190,7 +470,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
190 470
191 hci_dev_lock(hdev); 471 hci_dev_lock(hdev);
192 472
193 err = hci_blacklist_add(hdev, &bdaddr); 473 err = hci_blacklist_add(hdev, &bdaddr, 0);
194 474
195 hci_dev_unlock(hdev); 475 hci_dev_unlock(hdev);
196 476
@@ -207,7 +487,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
207 487
208 hci_dev_lock(hdev); 488 hci_dev_lock(hdev);
209 489
210 err = hci_blacklist_del(hdev, &bdaddr); 490 err = hci_blacklist_del(hdev, &bdaddr, 0);
211 491
212 hci_dev_unlock(hdev); 492 hci_dev_unlock(hdev);
213 493
@@ -340,34 +620,69 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
340 if (haddr.hci_family != AF_BLUETOOTH) 620 if (haddr.hci_family != AF_BLUETOOTH)
341 return -EINVAL; 621 return -EINVAL;
342 622
343 if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
344 return -EINVAL;
345
346 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
347 if (!enable_mgmt)
348 return -EINVAL;
349 set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
350 }
351
352 lock_sock(sk); 623 lock_sock(sk);
353 624
354 if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) { 625 if (sk->sk_state == BT_BOUND) {
355 err = -EALREADY; 626 err = -EALREADY;
356 goto done; 627 goto done;
357 } 628 }
358 629
359 if (haddr.hci_dev != HCI_DEV_NONE) { 630 switch (haddr.hci_channel) {
360 hdev = hci_dev_get(haddr.hci_dev); 631 case HCI_CHANNEL_RAW:
361 if (!hdev) { 632 if (hci_pi(sk)->hdev) {
362 err = -ENODEV; 633 err = -EALREADY;
363 goto done; 634 goto done;
364 } 635 }
365 636
366 atomic_inc(&hdev->promisc); 637 if (haddr.hci_dev != HCI_DEV_NONE) {
638 hdev = hci_dev_get(haddr.hci_dev);
639 if (!hdev) {
640 err = -ENODEV;
641 goto done;
642 }
643
644 atomic_inc(&hdev->promisc);
645 }
646
647 hci_pi(sk)->hdev = hdev;
648 break;
649
650 case HCI_CHANNEL_CONTROL:
651 if (haddr.hci_dev != HCI_DEV_NONE) {
652 err = -EINVAL;
653 goto done;
654 }
655
656 if (!capable(CAP_NET_ADMIN)) {
657 err = -EPERM;
658 goto done;
659 }
660
661 break;
662
663 case HCI_CHANNEL_MONITOR:
664 if (haddr.hci_dev != HCI_DEV_NONE) {
665 err = -EINVAL;
666 goto done;
667 }
668
669 if (!capable(CAP_NET_RAW)) {
670 err = -EPERM;
671 goto done;
672 }
673
674 send_monitor_replay(sk);
675
676 atomic_inc(&monitor_promisc);
677 break;
678
679 default:
680 err = -EINVAL;
681 goto done;
367 } 682 }
368 683
684
369 hci_pi(sk)->channel = haddr.hci_channel; 685 hci_pi(sk)->channel = haddr.hci_channel;
370 hci_pi(sk)->hdev = hdev;
371 sk->sk_state = BT_BOUND; 686 sk->sk_state = BT_BOUND;
372 687
373done: 688done:
@@ -418,7 +733,8 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
418 data = &tv; 733 data = &tv;
419 len = sizeof(tv); 734 len = sizeof(tv);
420#ifdef CONFIG_COMPAT 735#ifdef CONFIG_COMPAT
421 if (msg->msg_flags & MSG_CMSG_COMPAT) { 736 if (!COMPAT_USE_64BIT_TIME &&
737 (msg->msg_flags & MSG_CMSG_COMPAT)) {
422 ctv.tv_sec = tv.tv_sec; 738 ctv.tv_sec = tv.tv_sec;
423 ctv.tv_usec = tv.tv_usec; 739 ctv.tv_usec = tv.tv_usec;
424 data = &ctv; 740 data = &ctv;
@@ -461,7 +777,15 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
461 skb_reset_transport_header(skb); 777 skb_reset_transport_header(skb);
462 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 778 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
463 779
464 hci_sock_cmsg(sk, msg, skb); 780 switch (hci_pi(sk)->channel) {
781 case HCI_CHANNEL_RAW:
782 hci_sock_cmsg(sk, msg, skb);
783 break;
784 case HCI_CHANNEL_CONTROL:
785 case HCI_CHANNEL_MONITOR:
786 sock_recv_timestamp(msg, sk, skb);
787 break;
788 }
465 789
466 skb_free_datagram(sk, skb); 790 skb_free_datagram(sk, skb);
467 791
@@ -495,6 +819,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
495 case HCI_CHANNEL_CONTROL: 819 case HCI_CHANNEL_CONTROL:
496 err = mgmt_control(sk, msg, len); 820 err = mgmt_control(sk, msg, len);
497 goto done; 821 goto done;
822 case HCI_CHANNEL_MONITOR:
823 err = -EOPNOTSUPP;
824 goto done;
498 default: 825 default:
499 err = -EINVAL; 826 err = -EINVAL;
500 goto done; 827 goto done;
@@ -574,6 +901,11 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
574 901
575 lock_sock(sk); 902 lock_sock(sk);
576 903
904 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
905 err = -EINVAL;
906 goto done;
907 }
908
577 switch (optname) { 909 switch (optname) {
578 case HCI_DATA_DIR: 910 case HCI_DATA_DIR:
579 if (get_user(opt, (int __user *)optval)) { 911 if (get_user(opt, (int __user *)optval)) {
@@ -636,6 +968,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
636 break; 968 break;
637 } 969 }
638 970
971done:
639 release_sock(sk); 972 release_sock(sk);
640 return err; 973 return err;
641} 974}
@@ -644,11 +977,20 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
644{ 977{
645 struct hci_ufilter uf; 978 struct hci_ufilter uf;
646 struct sock *sk = sock->sk; 979 struct sock *sk = sock->sk;
647 int len, opt; 980 int len, opt, err = 0;
981
982 BT_DBG("sk %p, opt %d", sk, optname);
648 983
649 if (get_user(len, optlen)) 984 if (get_user(len, optlen))
650 return -EFAULT; 985 return -EFAULT;
651 986
987 lock_sock(sk);
988
989 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
990 err = -EINVAL;
991 goto done;
992 }
993
652 switch (optname) { 994 switch (optname) {
653 case HCI_DATA_DIR: 995 case HCI_DATA_DIR:
654 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) 996 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
@@ -657,7 +999,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
657 opt = 0; 999 opt = 0;
658 1000
659 if (put_user(opt, optval)) 1001 if (put_user(opt, optval))
660 return -EFAULT; 1002 err = -EFAULT;
661 break; 1003 break;
662 1004
663 case HCI_TIME_STAMP: 1005 case HCI_TIME_STAMP:
@@ -667,7 +1009,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
667 opt = 0; 1009 opt = 0;
668 1010
669 if (put_user(opt, optval)) 1011 if (put_user(opt, optval))
670 return -EFAULT; 1012 err = -EFAULT;
671 break; 1013 break;
672 1014
673 case HCI_FILTER: 1015 case HCI_FILTER:
@@ -682,15 +1024,17 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
682 1024
683 len = min_t(unsigned int, len, sizeof(uf)); 1025 len = min_t(unsigned int, len, sizeof(uf));
684 if (copy_to_user(optval, &uf, len)) 1026 if (copy_to_user(optval, &uf, len))
685 return -EFAULT; 1027 err = -EFAULT;
686 break; 1028 break;
687 1029
688 default: 1030 default:
689 return -ENOPROTOOPT; 1031 err = -ENOPROTOOPT;
690 break; 1032 break;
691 } 1033 }
692 1034
693 return 0; 1035done:
1036 release_sock(sk);
1037 return err;
694} 1038}
695 1039
696static const struct proto_ops hci_sock_ops = { 1040static const struct proto_ops hci_sock_ops = {
@@ -748,52 +1092,12 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
748 return 0; 1092 return 0;
749} 1093}
750 1094
751static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
752{
753 struct hci_dev *hdev = (struct hci_dev *) ptr;
754 struct hci_ev_si_device ev;
755
756 BT_DBG("hdev %s event %ld", hdev->name, event);
757
758 /* Send event to sockets */
759 ev.event = event;
760 ev.dev_id = hdev->id;
761 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
762
763 if (event == HCI_DEV_UNREG) {
764 struct sock *sk;
765 struct hlist_node *node;
766
767 /* Detach sockets from device */
768 read_lock(&hci_sk_list.lock);
769 sk_for_each(sk, node, &hci_sk_list.head) {
770 bh_lock_sock_nested(sk);
771 if (hci_pi(sk)->hdev == hdev) {
772 hci_pi(sk)->hdev = NULL;
773 sk->sk_err = EPIPE;
774 sk->sk_state = BT_OPEN;
775 sk->sk_state_change(sk);
776
777 hci_dev_put(hdev);
778 }
779 bh_unlock_sock(sk);
780 }
781 read_unlock(&hci_sk_list.lock);
782 }
783
784 return NOTIFY_DONE;
785}
786
787static const struct net_proto_family hci_sock_family_ops = { 1095static const struct net_proto_family hci_sock_family_ops = {
788 .family = PF_BLUETOOTH, 1096 .family = PF_BLUETOOTH,
789 .owner = THIS_MODULE, 1097 .owner = THIS_MODULE,
790 .create = hci_sock_create, 1098 .create = hci_sock_create,
791}; 1099};
792 1100
793static struct notifier_block hci_sock_nblock = {
794 .notifier_call = hci_sock_dev_event
795};
796
797int __init hci_sock_init(void) 1101int __init hci_sock_init(void)
798{ 1102{
799 int err; 1103 int err;
@@ -806,8 +1110,6 @@ int __init hci_sock_init(void)
806 if (err < 0) 1110 if (err < 0)
807 goto error; 1111 goto error;
808 1112
809 hci_register_notifier(&hci_sock_nblock);
810
811 BT_INFO("HCI socket layer initialized"); 1113 BT_INFO("HCI socket layer initialized");
812 1114
813 return 0; 1115 return 0;
@@ -823,10 +1125,5 @@ void hci_sock_cleanup(void)
823 if (bt_sock_unregister(BTPROTO_HCI) < 0) 1125 if (bt_sock_unregister(BTPROTO_HCI) < 0)
824 BT_ERR("HCI socket unregistration failed"); 1126 BT_ERR("HCI socket unregistration failed");
825 1127
826 hci_unregister_notifier(&hci_sock_nblock);
827
828 proto_unregister(&hci_sk_proto); 1128 proto_unregister(&hci_sk_proto);
829} 1129}
830
831module_param(enable_mgmt, bool, 0644);
832MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 521095614235..937f3187eafa 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -33,19 +33,19 @@ static inline char *link_typetostr(int type)
33 33
34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
35{ 35{
36 struct hci_conn *conn = dev_get_drvdata(dev); 36 struct hci_conn *conn = to_hci_conn(dev);
37 return sprintf(buf, "%s\n", link_typetostr(conn->type)); 37 return sprintf(buf, "%s\n", link_typetostr(conn->type));
38} 38}
39 39
40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
41{ 41{
42 struct hci_conn *conn = dev_get_drvdata(dev); 42 struct hci_conn *conn = to_hci_conn(dev);
43 return sprintf(buf, "%s\n", batostr(&conn->dst)); 43 return sprintf(buf, "%s\n", batostr(&conn->dst));
44} 44}
45 45
46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
47{ 47{
48 struct hci_conn *conn = dev_get_drvdata(dev); 48 struct hci_conn *conn = to_hci_conn(dev);
49 49
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0], conn->features[1], 51 conn->features[0], conn->features[1],
@@ -79,8 +79,8 @@ static const struct attribute_group *bt_link_groups[] = {
79 79
80static void bt_link_release(struct device *dev) 80static void bt_link_release(struct device *dev)
81{ 81{
82 void *data = dev_get_drvdata(dev); 82 struct hci_conn *conn = to_hci_conn(dev);
83 kfree(data); 83 kfree(conn);
84} 84}
85 85
86static struct device_type bt_link = { 86static struct device_type bt_link = {
@@ -120,8 +120,6 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
120 120
121 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 121 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
122 122
123 dev_set_drvdata(&conn->dev, conn);
124
125 if (device_add(&conn->dev) < 0) { 123 if (device_add(&conn->dev) < 0) {
126 BT_ERR("Failed to register connection device"); 124 BT_ERR("Failed to register connection device");
127 return; 125 return;
@@ -189,19 +187,19 @@ static inline char *host_typetostr(int type)
189 187
190static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) 188static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
191{ 189{
192 struct hci_dev *hdev = dev_get_drvdata(dev); 190 struct hci_dev *hdev = to_hci_dev(dev);
193 return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); 191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
194} 192}
195 193
196static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 194static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
197{ 195{
198 struct hci_dev *hdev = dev_get_drvdata(dev); 196 struct hci_dev *hdev = to_hci_dev(dev);
199 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); 197 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
200} 198}
201 199
202static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 200static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
203{ 201{
204 struct hci_dev *hdev = dev_get_drvdata(dev); 202 struct hci_dev *hdev = to_hci_dev(dev);
205 char name[HCI_MAX_NAME_LENGTH + 1]; 203 char name[HCI_MAX_NAME_LENGTH + 1];
206 int i; 204 int i;
207 205
@@ -214,20 +212,20 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
214 212
215static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) 213static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
216{ 214{
217 struct hci_dev *hdev = dev_get_drvdata(dev); 215 struct hci_dev *hdev = to_hci_dev(dev);
218 return sprintf(buf, "0x%.2x%.2x%.2x\n", 216 return sprintf(buf, "0x%.2x%.2x%.2x\n",
219 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 217 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
220} 218}
221 219
222static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 220static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
223{ 221{
224 struct hci_dev *hdev = dev_get_drvdata(dev); 222 struct hci_dev *hdev = to_hci_dev(dev);
225 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); 223 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
226} 224}
227 225
228static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 226static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
229{ 227{
230 struct hci_dev *hdev = dev_get_drvdata(dev); 228 struct hci_dev *hdev = to_hci_dev(dev);
231 229
232 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 230 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
233 hdev->features[0], hdev->features[1], 231 hdev->features[0], hdev->features[1],
@@ -238,31 +236,31 @@ static ssize_t show_features(struct device *dev, struct device_attribute *attr,
238 236
239static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 237static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
240{ 238{
241 struct hci_dev *hdev = dev_get_drvdata(dev); 239 struct hci_dev *hdev = to_hci_dev(dev);
242 return sprintf(buf, "%d\n", hdev->manufacturer); 240 return sprintf(buf, "%d\n", hdev->manufacturer);
243} 241}
244 242
245static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) 243static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
246{ 244{
247 struct hci_dev *hdev = dev_get_drvdata(dev); 245 struct hci_dev *hdev = to_hci_dev(dev);
248 return sprintf(buf, "%d\n", hdev->hci_ver); 246 return sprintf(buf, "%d\n", hdev->hci_ver);
249} 247}
250 248
251static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) 249static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
252{ 250{
253 struct hci_dev *hdev = dev_get_drvdata(dev); 251 struct hci_dev *hdev = to_hci_dev(dev);
254 return sprintf(buf, "%d\n", hdev->hci_rev); 252 return sprintf(buf, "%d\n", hdev->hci_rev);
255} 253}
256 254
257static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 255static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
258{ 256{
259 struct hci_dev *hdev = dev_get_drvdata(dev); 257 struct hci_dev *hdev = to_hci_dev(dev);
260 return sprintf(buf, "%d\n", hdev->idle_timeout); 258 return sprintf(buf, "%d\n", hdev->idle_timeout);
261} 259}
262 260
263static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 261static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
264{ 262{
265 struct hci_dev *hdev = dev_get_drvdata(dev); 263 struct hci_dev *hdev = to_hci_dev(dev);
266 unsigned int val; 264 unsigned int val;
267 int rv; 265 int rv;
268 266
@@ -280,13 +278,13 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
280 278
281static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) 279static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
282{ 280{
283 struct hci_dev *hdev = dev_get_drvdata(dev); 281 struct hci_dev *hdev = to_hci_dev(dev);
284 return sprintf(buf, "%d\n", hdev->sniff_max_interval); 282 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
285} 283}
286 284
287static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 285static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
288{ 286{
289 struct hci_dev *hdev = dev_get_drvdata(dev); 287 struct hci_dev *hdev = to_hci_dev(dev);
290 u16 val; 288 u16 val;
291 int rv; 289 int rv;
292 290
@@ -304,13 +302,13 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
304 302
305static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) 303static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
306{ 304{
307 struct hci_dev *hdev = dev_get_drvdata(dev); 305 struct hci_dev *hdev = to_hci_dev(dev);
308 return sprintf(buf, "%d\n", hdev->sniff_min_interval); 306 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
309} 307}
310 308
311static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 309static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
312{ 310{
313 struct hci_dev *hdev = dev_get_drvdata(dev); 311 struct hci_dev *hdev = to_hci_dev(dev);
314 u16 val; 312 u16 val;
315 int rv; 313 int rv;
316 314
@@ -370,8 +368,9 @@ static const struct attribute_group *bt_host_groups[] = {
370 368
371static void bt_host_release(struct device *dev) 369static void bt_host_release(struct device *dev)
372{ 370{
373 void *data = dev_get_drvdata(dev); 371 struct hci_dev *hdev = to_hci_dev(dev);
374 kfree(data); 372 kfree(hdev);
373 module_put(THIS_MODULE);
375} 374}
376 375
377static struct device_type bt_host = { 376static struct device_type bt_host = {
@@ -383,12 +382,12 @@ static struct device_type bt_host = {
383static int inquiry_cache_show(struct seq_file *f, void *p) 382static int inquiry_cache_show(struct seq_file *f, void *p)
384{ 383{
385 struct hci_dev *hdev = f->private; 384 struct hci_dev *hdev = f->private;
386 struct inquiry_cache *cache = &hdev->inq_cache; 385 struct discovery_state *cache = &hdev->discovery;
387 struct inquiry_entry *e; 386 struct inquiry_entry *e;
388 387
389 hci_dev_lock(hdev); 388 hci_dev_lock(hdev);
390 389
391 for (e = cache->list; e; e = e->next) { 390 list_for_each_entry(e, &cache->all, all) {
392 struct inquiry_data *data = &e->data; 391 struct inquiry_data *data = &e->data;
393 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", 392 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
394 batostr(&data->bdaddr), 393 batostr(&data->bdaddr),
@@ -445,8 +444,8 @@ static const struct file_operations blacklist_fops = {
445 444
446static void print_bt_uuid(struct seq_file *f, u8 *uuid) 445static void print_bt_uuid(struct seq_file *f, u8 *uuid)
447{ 446{
448 u32 data0, data4; 447 __be32 data0, data4;
449 u16 data1, data2, data3, data5; 448 __be16 data1, data2, data3, data5;
450 449
451 memcpy(&data0, &uuid[0], 4); 450 memcpy(&data0, &uuid[0], 4);
452 memcpy(&data1, &uuid[4], 2); 451 memcpy(&data1, &uuid[4], 2);
@@ -523,7 +522,7 @@ void hci_init_sysfs(struct hci_dev *hdev)
523 dev->type = &bt_host; 522 dev->type = &bt_host;
524 dev->class = bt_class; 523 dev->class = bt_class;
525 524
526 dev_set_drvdata(dev, hdev); 525 __module_get(THIS_MODULE);
527 device_initialize(dev); 526 device_initialize(dev);
528} 527}
529 528
@@ -534,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev)
534 533
535 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 534 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
536 535
537 dev->parent = hdev->parent;
538 dev_set_name(dev, "%s", hdev->name); 536 dev_set_name(dev, "%s", hdev->name);
539 537
540 err = device_add(dev); 538 err = device_add(dev);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index d478be11d562..2c20d765b394 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1195,41 +1195,16 @@ int hidp_get_conninfo(struct hidp_conninfo *ci)
1195 return err; 1195 return err;
1196} 1196}
1197 1197
1198static const struct hid_device_id hidp_table[] = {
1199 { HID_BLUETOOTH_DEVICE(HID_ANY_ID, HID_ANY_ID) },
1200 { }
1201};
1202
1203static struct hid_driver hidp_driver = {
1204 .name = "generic-bluetooth",
1205 .id_table = hidp_table,
1206};
1207
1208static int __init hidp_init(void) 1198static int __init hidp_init(void)
1209{ 1199{
1210 int ret;
1211
1212 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); 1200 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
1213 1201
1214 ret = hid_register_driver(&hidp_driver); 1202 return hidp_init_sockets();
1215 if (ret)
1216 goto err;
1217
1218 ret = hidp_init_sockets();
1219 if (ret)
1220 goto err_drv;
1221
1222 return 0;
1223err_drv:
1224 hid_unregister_driver(&hidp_driver);
1225err:
1226 return ret;
1227} 1203}
1228 1204
1229static void __exit hidp_exit(void) 1205static void __exit hidp_exit(void)
1230{ 1206{
1231 hidp_cleanup_sockets(); 1207 hidp_cleanup_sockets();
1232 hid_unregister_driver(&hidp_driver);
1233} 1208}
1234 1209
1235module_init(hidp_init); 1210module_init(hidp_init);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 178ac7f127ad..73a32d705c1f 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -160,10 +160,10 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
160{ 160{
161 if (cmd == HIDPGETCONNLIST) { 161 if (cmd == HIDPGETCONNLIST) {
162 struct hidp_connlist_req cl; 162 struct hidp_connlist_req cl;
163 uint32_t uci; 163 u32 uci;
164 int err; 164 int err;
165 165
166 if (get_user(cl.cnum, (uint32_t __user *) arg) || 166 if (get_user(cl.cnum, (u32 __user *) arg) ||
167 get_user(uci, (u32 __user *) (arg + 4))) 167 get_user(uci, (u32 __user *) (arg + 4)))
168 return -EFAULT; 168 return -EFAULT;
169 169
@@ -174,7 +174,7 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
174 174
175 err = hidp_get_connlist(&cl); 175 err = hidp_get_connlist(&cl);
176 176
177 if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) 177 if (!err && put_user(cl.cnum, (u32 __user *) arg))
178 err = -EFAULT; 178 err = -EFAULT;
179 179
180 return err; 180 return err;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 32d338c30e65..24f144b72a96 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4,6 +4,7 @@
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc. 5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems 6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
7 8
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 10
@@ -49,7 +50,6 @@
49#include <linux/crc16.h> 50#include <linux/crc16.h>
50#include <net/sock.h> 51#include <net/sock.h>
51 52
52#include <asm/system.h>
53#include <asm/unaligned.h> 53#include <asm/unaligned.h>
54 54
55#include <net/bluetooth/bluetooth.h> 55#include <net/bluetooth/bluetooth.h>
@@ -71,83 +71,56 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data); 71 void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); 72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn, 73static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err); 74 struct l2cap_chan *chan, int err);
75
76static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 75
78/* ---- L2CAP channels ---- */ 76/* ---- L2CAP channels ---- */
79 77
80static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81{ 79{
82 struct l2cap_chan *c, *r = NULL; 80 struct l2cap_chan *c;
83
84 rcu_read_lock();
85 81
86 list_for_each_entry_rcu(c, &conn->chan_l, list) { 82 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid) { 83 if (c->dcid == cid)
88 r = c; 84 return c;
89 break;
90 }
91 } 85 }
92 86 return NULL;
93 rcu_read_unlock();
94 return r;
95} 87}
96 88
97static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 89static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
98{ 90{
99 struct l2cap_chan *c, *r = NULL; 91 struct l2cap_chan *c;
100
101 rcu_read_lock();
102 92
103 list_for_each_entry_rcu(c, &conn->chan_l, list) { 93 list_for_each_entry(c, &conn->chan_l, list) {
104 if (c->scid == cid) { 94 if (c->scid == cid)
105 r = c; 95 return c;
106 break;
107 }
108 } 96 }
109 97 return NULL;
110 rcu_read_unlock();
111 return r;
112} 98}
113 99
114/* Find channel with given SCID. 100/* Find channel with given SCID.
115 * Returns locked socket */ 101 * Returns locked channel. */
116static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 102static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117{ 103{
118 struct l2cap_chan *c; 104 struct l2cap_chan *c;
119 105
106 mutex_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid); 107 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c) 108 if (c)
122 lock_sock(c->sk); 109 l2cap_chan_lock(c);
110 mutex_unlock(&conn->chan_lock);
111
123 return c; 112 return c;
124} 113}
125 114
126static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) 115static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
127{ 116{
128 struct l2cap_chan *c, *r = NULL;
129
130 rcu_read_lock();
131
132 list_for_each_entry_rcu(c, &conn->chan_l, list) {
133 if (c->ident == ident) {
134 r = c;
135 break;
136 }
137 }
138
139 rcu_read_unlock();
140 return r;
141}
142
143static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
144{
145 struct l2cap_chan *c; 117 struct l2cap_chan *c;
146 118
147 c = __l2cap_get_chan_by_ident(conn, ident); 119 list_for_each_entry(c, &conn->chan_l, list) {
148 if (c) 120 if (c->ident == ident)
149 lock_sock(c->sk); 121 return c;
150 return c; 122 }
123 return NULL;
151} 124}
152 125
153static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) 126static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
@@ -217,51 +190,169 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
217 return 0; 190 return 0;
218} 191}
219 192
220static char *state_to_string(int state) 193static void __l2cap_state_change(struct l2cap_chan *chan, int state)
221{ 194{
222 switch(state) { 195 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
223 case BT_CONNECTED: 196 state_to_string(state));
224 return "BT_CONNECTED";
225 case BT_OPEN:
226 return "BT_OPEN";
227 case BT_BOUND:
228 return "BT_BOUND";
229 case BT_LISTEN:
230 return "BT_LISTEN";
231 case BT_CONNECT:
232 return "BT_CONNECT";
233 case BT_CONNECT2:
234 return "BT_CONNECT2";
235 case BT_CONFIG:
236 return "BT_CONFIG";
237 case BT_DISCONN:
238 return "BT_DISCONN";
239 case BT_CLOSED:
240 return "BT_CLOSED";
241 }
242 197
243 return "invalid state"; 198 chan->state = state;
199 chan->ops->state_change(chan->data, state);
244} 200}
245 201
246static void l2cap_state_change(struct l2cap_chan *chan, int state) 202static void l2cap_state_change(struct l2cap_chan *chan, int state)
247{ 203{
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state), 204 struct sock *sk = chan->sk;
249 state_to_string(state));
250 205
251 chan->state = state; 206 lock_sock(sk);
252 chan->ops->state_change(chan->data, state); 207 __l2cap_state_change(chan, state);
208 release_sock(sk);
209}
210
211static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
212{
213 struct sock *sk = chan->sk;
214
215 sk->sk_err = err;
216}
217
218static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
219{
220 struct sock *sk = chan->sk;
221
222 lock_sock(sk);
223 __l2cap_chan_set_err(chan, err);
224 release_sock(sk);
225}
226
227/* ---- L2CAP sequence number lists ---- */
228
229/* For ERTM, ordered lists of sequence numbers must be tracked for
230 * SREJ requests that are received and for frames that are to be
231 * retransmitted. These seq_list functions implement a singly-linked
232 * list in an array, where membership in the list can also be checked
233 * in constant time. Items can also be added to the tail of the list
234 * and removed from the head in constant time, without further memory
235 * allocs or frees.
236 */
237
238static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
239{
240 size_t alloc_size, i;
241
242 /* Allocated size is a power of 2 to map sequence numbers
243 * (which may be up to 14 bits) in to a smaller array that is
244 * sized for the negotiated ERTM transmit windows.
245 */
246 alloc_size = roundup_pow_of_two(size);
247
248 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
249 if (!seq_list->list)
250 return -ENOMEM;
251
252 seq_list->mask = alloc_size - 1;
253 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
254 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
255 for (i = 0; i < alloc_size; i++)
256 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
257
258 return 0;
259}
260
261static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
262{
263 kfree(seq_list->list);
264}
265
266static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
267 u16 seq)
268{
269 /* Constant-time check for list membership */
270 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
271}
272
273static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
274{
275 u16 mask = seq_list->mask;
276
277 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
278 /* In case someone tries to pop the head of an empty list */
279 return L2CAP_SEQ_LIST_CLEAR;
280 } else if (seq_list->head == seq) {
281 /* Head can be removed in constant time */
282 seq_list->head = seq_list->list[seq & mask];
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
284
285 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
286 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
287 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
288 }
289 } else {
290 /* Walk the list to find the sequence number */
291 u16 prev = seq_list->head;
292 while (seq_list->list[prev & mask] != seq) {
293 prev = seq_list->list[prev & mask];
294 if (prev == L2CAP_SEQ_LIST_TAIL)
295 return L2CAP_SEQ_LIST_CLEAR;
296 }
297
298 /* Unlink the number from the list and clear it */
299 seq_list->list[prev & mask] = seq_list->list[seq & mask];
300 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->tail == seq)
302 seq_list->tail = prev;
303 }
304 return seq;
305}
306
307static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
308{
309 /* Remove the head in constant time */
310 return l2cap_seq_list_remove(seq_list, seq_list->head);
311}
312
313static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
314{
315 u16 i;
316
317 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
318 return;
319
320 for (i = 0; i <= seq_list->mask; i++)
321 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
322
323 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
324 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
325}
326
327static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
328{
329 u16 mask = seq_list->mask;
330
331 /* All appends happen in constant time */
332
333 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
337 seq_list->head = seq;
338 else
339 seq_list->list[seq_list->tail & mask] = seq;
340
341 seq_list->tail = seq;
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
253} 343}
254 344
255static void l2cap_chan_timeout(struct work_struct *work) 345static void l2cap_chan_timeout(struct work_struct *work)
256{ 346{
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 347 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
258 chan_timer.work); 348 chan_timer.work);
259 struct sock *sk = chan->sk; 349 struct l2cap_conn *conn = chan->conn;
260 int reason; 350 int reason;
261 351
262 BT_DBG("chan %p state %d", chan, chan->state); 352 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
263 353
264 lock_sock(sk); 354 mutex_lock(&conn->chan_lock);
355 l2cap_chan_lock(chan);
265 356
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 357 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED; 358 reason = ECONNREFUSED;
@@ -273,13 +364,15 @@ static void l2cap_chan_timeout(struct work_struct *work)
273 364
274 l2cap_chan_close(chan, reason); 365 l2cap_chan_close(chan, reason);
275 366
276 release_sock(sk); 367 l2cap_chan_unlock(chan);
277 368
278 chan->ops->close(chan->data); 369 chan->ops->close(chan->data);
370 mutex_unlock(&conn->chan_lock);
371
279 l2cap_chan_put(chan); 372 l2cap_chan_put(chan);
280} 373}
281 374
282struct l2cap_chan *l2cap_chan_create(struct sock *sk) 375struct l2cap_chan *l2cap_chan_create(void)
283{ 376{
284 struct l2cap_chan *chan; 377 struct l2cap_chan *chan;
285 378
@@ -287,7 +380,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
287 if (!chan) 380 if (!chan)
288 return NULL; 381 return NULL;
289 382
290 chan->sk = sk; 383 mutex_init(&chan->lock);
291 384
292 write_lock(&chan_list_lock); 385 write_lock(&chan_list_lock);
293 list_add(&chan->global_l, &chan_list); 386 list_add(&chan->global_l, &chan_list);
@@ -299,7 +392,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
299 392
300 atomic_set(&chan->refcnt, 1); 393 atomic_set(&chan->refcnt, 1);
301 394
302 BT_DBG("sk %p chan %p", sk, chan); 395 BT_DBG("chan %p", chan);
303 396
304 return chan; 397 return chan;
305} 398}
@@ -313,16 +406,28 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
313 l2cap_chan_put(chan); 406 l2cap_chan_put(chan);
314} 407}
315 408
316static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 409void l2cap_chan_set_defaults(struct l2cap_chan *chan)
410{
411 chan->fcs = L2CAP_FCS_CRC16;
412 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
413 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
415 chan->sec_level = BT_SECURITY_LOW;
416
417 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
418}
419
420static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
317{ 421{
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 422 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid); 423 __le16_to_cpu(chan->psm), chan->dcid);
320 424
321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 425 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
322 426
323 chan->conn = conn; 427 chan->conn = conn;
324 428
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 429 switch (chan->chan_type) {
430 case L2CAP_CHAN_CONN_ORIENTED:
326 if (conn->hcon->type == LE_LINK) { 431 if (conn->hcon->type == LE_LINK) {
327 /* LE connection */ 432 /* LE connection */
328 chan->omtu = L2CAP_LE_DEFAULT_MTU; 433 chan->omtu = L2CAP_LE_DEFAULT_MTU;
@@ -333,12 +438,16 @@ static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
333 chan->scid = l2cap_alloc_cid(conn); 438 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU; 439 chan->omtu = L2CAP_DEFAULT_MTU;
335 } 440 }
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 441 break;
442
443 case L2CAP_CHAN_CONN_LESS:
337 /* Connectionless socket */ 444 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS; 445 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS; 446 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU; 447 chan->omtu = L2CAP_DEFAULT_MTU;
341 } else { 448 break;
449
450 default:
342 /* Raw socket can send/recv signalling messages only */ 451 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING; 452 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING; 453 chan->dcid = L2CAP_CID_SIGNALING;
@@ -354,11 +463,16 @@ static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
354 463
355 l2cap_chan_hold(chan); 464 l2cap_chan_hold(chan);
356 465
357 list_add_rcu(&chan->list, &conn->chan_l); 466 list_add(&chan->list, &conn->chan_l);
467}
468
469static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
470{
471 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock);
358} 474}
359 475
360/* Delete channel.
361 * Must be called on the locked socket. */
362static void l2cap_chan_del(struct l2cap_chan *chan, int err) 476static void l2cap_chan_del(struct l2cap_chan *chan, int err)
363{ 477{
364 struct sock *sk = chan->sk; 478 struct sock *sk = chan->sk;
@@ -371,8 +485,7 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
371 485
372 if (conn) { 486 if (conn) {
373 /* Delete from channel list */ 487 /* Delete from channel list */
374 list_del_rcu(&chan->list); 488 list_del(&chan->list);
375 synchronize_rcu();
376 489
377 l2cap_chan_put(chan); 490 l2cap_chan_put(chan);
378 491
@@ -380,11 +493,13 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
380 hci_conn_put(conn->hcon); 493 hci_conn_put(conn->hcon);
381 } 494 }
382 495
383 l2cap_state_change(chan, BT_CLOSED); 496 lock_sock(sk);
497
498 __l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED); 499 sock_set_flag(sk, SOCK_ZAPPED);
385 500
386 if (err) 501 if (err)
387 sk->sk_err = err; 502 __l2cap_chan_set_err(chan, err);
388 503
389 if (parent) { 504 if (parent) {
390 bt_accept_unlink(sk); 505 bt_accept_unlink(sk);
@@ -392,6 +507,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
392 } else 507 } else
393 sk->sk_state_change(sk); 508 sk->sk_state_change(sk);
394 509
510 release_sock(sk);
511
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && 512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state))) 513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 return; 514 return;
@@ -407,6 +524,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
407 524
408 skb_queue_purge(&chan->srej_q); 525 skb_queue_purge(&chan->srej_q);
409 526
527 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list);
410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
411 list_del(&l->list); 530 list_del(&l->list);
412 kfree(l); 531 kfree(l);
@@ -423,10 +542,12 @@ static void l2cap_chan_cleanup_listen(struct sock *parent)
423 /* Close not yet accepted channels */ 542 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) { 543 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
545
546 l2cap_chan_lock(chan);
426 __clear_chan_timer(chan); 547 __clear_chan_timer(chan);
427 lock_sock(sk);
428 l2cap_chan_close(chan, ECONNRESET); 548 l2cap_chan_close(chan, ECONNRESET);
429 release_sock(sk); 549 l2cap_chan_unlock(chan);
550
430 chan->ops->close(chan->data); 551 chan->ops->close(chan->data);
431 } 552 }
432} 553}
@@ -436,21 +557,23 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn; 557 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk; 558 struct sock *sk = chan->sk;
438 559
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket); 560 BT_DBG("chan %p state %s sk %p", chan,
561 state_to_string(chan->state), sk);
440 562
441 switch (chan->state) { 563 switch (chan->state) {
442 case BT_LISTEN: 564 case BT_LISTEN:
565 lock_sock(sk);
443 l2cap_chan_cleanup_listen(sk); 566 l2cap_chan_cleanup_listen(sk);
444 567
445 l2cap_state_change(chan, BT_CLOSED); 568 __l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED); 569 sock_set_flag(sk, SOCK_ZAPPED);
570 release_sock(sk);
447 break; 571 break;
448 572
449 case BT_CONNECTED: 573 case BT_CONNECTED:
450 case BT_CONFIG: 574 case BT_CONFIG:
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && 575 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) { 576 conn->hcon->type == ACL_LINK) {
453 __clear_chan_timer(chan);
454 __set_chan_timer(chan, sk->sk_sndtimeo); 577 __set_chan_timer(chan, sk->sk_sndtimeo);
455 l2cap_send_disconn_req(conn, chan, reason); 578 l2cap_send_disconn_req(conn, chan, reason);
456 } else 579 } else
@@ -463,7 +586,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
463 struct l2cap_conn_rsp rsp; 586 struct l2cap_conn_rsp rsp;
464 __u16 result; 587 __u16 result;
465 588
466 if (bt_sk(sk)->defer_setup) 589 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
467 result = L2CAP_CR_SEC_BLOCK; 590 result = L2CAP_CR_SEC_BLOCK;
468 else 591 else
469 result = L2CAP_CR_BAD_PSM; 592 result = L2CAP_CR_BAD_PSM;
@@ -486,7 +609,9 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
486 break; 609 break;
487 610
488 default: 611 default:
612 lock_sock(sk);
489 sock_set_flag(sk, SOCK_ZAPPED); 613 sock_set_flag(sk, SOCK_ZAPPED);
614 release_sock(sk);
490 break; 615 break;
491 } 616 }
492} 617}
@@ -594,6 +719,117 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
594 hci_send_acl(chan->conn->hchan, skb, flags); 719 hci_send_acl(chan->conn->hchan, skb, flags);
595} 720}
596 721
722static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
723{
724 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
725 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
726
727 if (enh & L2CAP_CTRL_FRAME_TYPE) {
728 /* S-Frame */
729 control->sframe = 1;
730 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
731 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
732
733 control->sar = 0;
734 control->txseq = 0;
735 } else {
736 /* I-Frame */
737 control->sframe = 0;
738 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
739 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
740
741 control->poll = 0;
742 control->super = 0;
743 }
744}
745
746static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
747{
748 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
749 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
750
751 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
752 /* S-Frame */
753 control->sframe = 1;
754 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
755 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
756
757 control->sar = 0;
758 control->txseq = 0;
759 } else {
760 /* I-Frame */
761 control->sframe = 0;
762 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
763 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
764
765 control->poll = 0;
766 control->super = 0;
767 }
768}
769
770static inline void __unpack_control(struct l2cap_chan *chan,
771 struct sk_buff *skb)
772{
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control);
776 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control);
779 }
780}
781
782static u32 __pack_extended_control(struct l2cap_ctrl *control)
783{
784 u32 packed;
785
786 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
787 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
788
789 if (control->sframe) {
790 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
791 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
792 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
793 } else {
794 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
795 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
796 }
797
798 return packed;
799}
800
801static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
802{
803 u16 packed;
804
805 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
807
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_CTRL_FRAME_TYPE;
812 } else {
813 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
815 }
816
817 return packed;
818}
819
820static inline void __pack_control(struct l2cap_chan *chan,
821 struct l2cap_ctrl *control,
822 struct sk_buff *skb)
823{
824 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
825 put_unaligned_le32(__pack_extended_control(control),
826 skb->data + L2CAP_HDR_SIZE);
827 } else {
828 put_unaligned_le16(__pack_enhanced_control(control),
829 skb->data + L2CAP_HDR_SIZE);
830 }
831}
832
597static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) 833static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
598{ 834{
599 struct sk_buff *skb; 835 struct sk_buff *skb;
@@ -661,26 +897,60 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); 897 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
662} 898}
663 899
900static void l2cap_send_conn_req(struct l2cap_chan *chan)
901{
902 struct l2cap_conn *conn = chan->conn;
903 struct l2cap_conn_req req;
904
905 req.scid = cpu_to_le16(chan->scid);
906 req.psm = chan->psm;
907
908 chan->ident = l2cap_get_ident(conn);
909
910 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
911
912 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
913}
914
915static void l2cap_chan_ready(struct l2cap_chan *chan)
916{
917 struct sock *sk = chan->sk;
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0;
927 __clear_chan_timer(chan);
928
929 __l2cap_state_change(chan, BT_CONNECTED);
930 sk->sk_state_change(sk);
931
932 if (parent)
933 parent->sk_data_ready(parent, 0);
934
935 release_sock(sk);
936}
937
664static void l2cap_do_start(struct l2cap_chan *chan) 938static void l2cap_do_start(struct l2cap_chan *chan)
665{ 939{
666 struct l2cap_conn *conn = chan->conn; 940 struct l2cap_conn *conn = chan->conn;
667 941
942 if (conn->hcon->type == LE_LINK) {
943 l2cap_chan_ready(chan);
944 return;
945 }
946
668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { 947 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 948 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
670 return; 949 return;
671 950
672 if (l2cap_chan_check_security(chan) && 951 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) { 952 __l2cap_no_conn_pending(chan))
674 struct l2cap_conn_req req; 953 l2cap_send_conn_req(chan);
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
677
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
680
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
682 sizeof(req), &req);
683 }
684 } else { 954 } else {
685 struct l2cap_info_req req; 955 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
@@ -688,8 +958,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn); 959 conn->info_ident = l2cap_get_ident(conn);
690 960
691 schedule_delayed_work(&conn->info_timer, 961 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
693 962
694 l2cap_send_cmd(conn, conn->info_ident, 963 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req); 964 L2CAP_INFO_REQ, sizeof(req), &req);
@@ -714,14 +983,12 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
714 983
715static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) 984static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
716{ 985{
717 struct sock *sk; 986 struct sock *sk = chan->sk;
718 struct l2cap_disconn_req req; 987 struct l2cap_disconn_req req;
719 988
720 if (!conn) 989 if (!conn)
721 return; 990 return;
722 991
723 sk = chan->sk;
724
725 if (chan->mode == L2CAP_MODE_ERTM) { 992 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan); 993 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan); 994 __clear_monitor_timer(chan);
@@ -733,56 +1000,47 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
733 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1000 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req); 1001 L2CAP_DISCONN_REQ, sizeof(req), &req);
735 1002
736 l2cap_state_change(chan, BT_DISCONN); 1003 lock_sock(sk);
737 sk->sk_err = err; 1004 __l2cap_state_change(chan, BT_DISCONN);
1005 __l2cap_chan_set_err(chan, err);
1006 release_sock(sk);
738} 1007}
739 1008
740/* ---- L2CAP connections ---- */ 1009/* ---- L2CAP connections ---- */
741static void l2cap_conn_start(struct l2cap_conn *conn) 1010static void l2cap_conn_start(struct l2cap_conn *conn)
742{ 1011{
743 struct l2cap_chan *chan; 1012 struct l2cap_chan *chan, *tmp;
744 1013
745 BT_DBG("conn %p", conn); 1014 BT_DBG("conn %p", conn);
746 1015
747 rcu_read_lock(); 1016 mutex_lock(&conn->chan_lock);
748 1017
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 1018 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
750 struct sock *sk = chan->sk; 1019 struct sock *sk = chan->sk;
751 1020
752 bh_lock_sock(sk); 1021 l2cap_chan_lock(chan);
753 1022
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1023 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
755 bh_unlock_sock(sk); 1024 l2cap_chan_unlock(chan);
756 continue; 1025 continue;
757 } 1026 }
758 1027
759 if (chan->state == BT_CONNECT) { 1028 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
761
762 if (!l2cap_chan_check_security(chan) || 1029 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) { 1030 !__l2cap_no_conn_pending(chan)) {
764 bh_unlock_sock(sk); 1031 l2cap_chan_unlock(chan);
765 continue; 1032 continue;
766 } 1033 }
767 1034
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask) 1035 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE, 1036 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) { 1037 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET); 1038 l2cap_chan_close(chan, ECONNRESET);
774 bh_unlock_sock(sk); 1039 l2cap_chan_unlock(chan);
775 continue; 1040 continue;
776 } 1041 }
777 1042
778 req.scid = cpu_to_le16(chan->scid); 1043 l2cap_send_conn_req(chan);
779 req.psm = chan->psm;
780
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
783
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 sizeof(req), &req);
786 1044
787 } else if (chan->state == BT_CONNECT2) { 1045 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp; 1046 struct l2cap_conn_rsp rsp;
@@ -791,7 +1049,9 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
791 rsp.dcid = cpu_to_le16(chan->scid); 1049 rsp.dcid = cpu_to_le16(chan->scid);
792 1050
793 if (l2cap_chan_check_security(chan)) { 1051 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) { 1052 lock_sock(sk);
1053 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) {
795 struct sock *parent = bt_sk(sk)->parent; 1055 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
@@ -799,10 +1059,11 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
799 parent->sk_data_ready(parent, 0); 1059 parent->sk_data_ready(parent, 0);
800 1060
801 } else { 1061 } else {
802 l2cap_state_change(chan, BT_CONFIG); 1062 __l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 } 1065 }
1066 release_sock(sk);
806 } else { 1067 } else {
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
@@ -813,7 +1074,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
813 1074
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) || 1075 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) { 1076 rsp.result != L2CAP_CR_SUCCESS) {
816 bh_unlock_sock(sk); 1077 l2cap_chan_unlock(chan);
817 continue; 1078 continue;
818 } 1079 }
819 1080
@@ -823,16 +1084,18 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
823 chan->num_conf_req++; 1084 chan->num_conf_req++;
824 } 1085 }
825 1086
826 bh_unlock_sock(sk); 1087 l2cap_chan_unlock(chan);
827 } 1088 }
828 1089
829 rcu_read_unlock(); 1090 mutex_unlock(&conn->chan_lock);
830} 1091}
831 1092
832/* Find socket with cid and source bdaddr. 1093/* Find socket with cid and source/destination bdaddr.
833 * Returns closest match, locked. 1094 * Returns closest match, locked.
834 */ 1095 */
835static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src) 1096static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1097 bdaddr_t *src,
1098 bdaddr_t *dst)
836{ 1099{
837 struct l2cap_chan *c, *c1 = NULL; 1100 struct l2cap_chan *c, *c1 = NULL;
838 1101
@@ -845,14 +1108,22 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
845 continue; 1108 continue;
846 1109
847 if (c->scid == cid) { 1110 if (c->scid == cid) {
1111 int src_match, dst_match;
1112 int src_any, dst_any;
1113
848 /* Exact match. */ 1114 /* Exact match. */
849 if (!bacmp(&bt_sk(sk)->src, src)) { 1115 src_match = !bacmp(&bt_sk(sk)->src, src);
1116 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1117 if (src_match && dst_match) {
850 read_unlock(&chan_list_lock); 1118 read_unlock(&chan_list_lock);
851 return c; 1119 return c;
852 } 1120 }
853 1121
854 /* Closest match */ 1122 /* Closest match */
855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 1123 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1124 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1125 if ((src_match && dst_any) || (src_any && dst_match) ||
1126 (src_any && dst_any))
856 c1 = c; 1127 c1 = c;
857 } 1128 }
858 } 1129 }
@@ -871,7 +1142,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
871 1142
872 /* Check if we have socket listening on cid */ 1143 /* Check if we have socket listening on cid */
873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA, 1144 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
874 conn->src); 1145 conn->src, conn->dst);
875 if (!pchan) 1146 if (!pchan)
876 return; 1147 return;
877 1148
@@ -902,30 +1173,13 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
902 1173
903 __set_chan_timer(chan, sk->sk_sndtimeo); 1174 __set_chan_timer(chan, sk->sk_sndtimeo);
904 1175
905 l2cap_state_change(chan, BT_CONNECTED); 1176 __l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0); 1177 parent->sk_data_ready(parent, 0);
907 1178
908clean: 1179clean:
909 release_sock(parent); 1180 release_sock(parent);
910} 1181}
911 1182
912static void l2cap_chan_ready(struct sock *sk)
913{
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
915 struct sock *parent = bt_sk(sk)->parent;
916
917 BT_DBG("sk %p, parent %p", sk, parent);
918
919 chan->conf_state = 0;
920 __clear_chan_timer(chan);
921
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
924
925 if (parent)
926 parent->sk_data_ready(parent, 0);
927}
928
929static void l2cap_conn_ready(struct l2cap_conn *conn) 1183static void l2cap_conn_ready(struct l2cap_conn *conn)
930{ 1184{
931 struct l2cap_chan *chan; 1185 struct l2cap_chan *chan;
@@ -938,29 +1192,31 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
938 if (conn->hcon->out && conn->hcon->type == LE_LINK) 1192 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level); 1193 smp_conn_security(conn, conn->hcon->pending_sec_level);
940 1194
941 rcu_read_lock(); 1195 mutex_lock(&conn->chan_lock);
942 1196
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 1197 list_for_each_entry(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
945 1198
946 bh_lock_sock(sk); 1199 l2cap_chan_lock(chan);
947 1200
948 if (conn->hcon->type == LE_LINK) { 1201 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level)) 1202 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk); 1203 l2cap_chan_ready(chan);
951 1204
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1205 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1206 struct sock *sk = chan->sk;
953 __clear_chan_timer(chan); 1207 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED); 1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk); 1210 sk->sk_state_change(sk);
1211 release_sock(sk);
956 1212
957 } else if (chan->state == BT_CONNECT) 1213 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan); 1214 l2cap_do_start(chan);
959 1215
960 bh_unlock_sock(sk); 1216 l2cap_chan_unlock(chan);
961 } 1217 }
962 1218
963 rcu_read_unlock(); 1219 mutex_unlock(&conn->chan_lock);
964} 1220}
965 1221
966/* Notify sockets that we cannot guaranty reliability anymore */ 1222/* Notify sockets that we cannot guaranty reliability anymore */
@@ -970,16 +1226,14 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
970 1226
971 BT_DBG("conn %p", conn); 1227 BT_DBG("conn %p", conn);
972 1228
973 rcu_read_lock(); 1229 mutex_lock(&conn->chan_lock);
974
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
977 1230
1231 list_for_each_entry(chan, &conn->chan_l, list) {
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 1232 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
979 sk->sk_err = err; 1233 __l2cap_chan_set_err(chan, err);
980 } 1234 }
981 1235
982 rcu_read_unlock(); 1236 mutex_unlock(&conn->chan_lock);
983} 1237}
984 1238
985static void l2cap_info_timeout(struct work_struct *work) 1239static void l2cap_info_timeout(struct work_struct *work)
@@ -997,7 +1251,6 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
997{ 1251{
998 struct l2cap_conn *conn = hcon->l2cap_data; 1252 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l; 1253 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1001 1254
1002 if (!conn) 1255 if (!conn)
1003 return; 1256 return;
@@ -1006,21 +1259,29 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1006 1259
1007 kfree_skb(conn->rx_skb); 1260 kfree_skb(conn->rx_skb);
1008 1261
1262 mutex_lock(&conn->chan_lock);
1263
1009 /* Kill channels */ 1264 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1265 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk; 1266 l2cap_chan_hold(chan);
1012 lock_sock(sk); 1267 l2cap_chan_lock(chan);
1268
1013 l2cap_chan_del(chan, err); 1269 l2cap_chan_del(chan, err);
1014 release_sock(sk); 1270
1271 l2cap_chan_unlock(chan);
1272
1015 chan->ops->close(chan->data); 1273 chan->ops->close(chan->data);
1274 l2cap_chan_put(chan);
1016 } 1275 }
1017 1276
1277 mutex_unlock(&conn->chan_lock);
1278
1018 hci_chan_del(conn->hchan); 1279 hci_chan_del(conn->hchan);
1019 1280
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 cancel_delayed_work_sync(&conn->info_timer); 1282 cancel_delayed_work_sync(&conn->info_timer);
1022 1283
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { 1284 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1024 cancel_delayed_work_sync(&conn->security_timer); 1285 cancel_delayed_work_sync(&conn->security_timer);
1025 smp_chan_destroy(conn); 1286 smp_chan_destroy(conn);
1026 } 1287 }
@@ -1072,6 +1333,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1072 conn->feat_mask = 0; 1333 conn->feat_mask = 0;
1073 1334
1074 spin_lock_init(&conn->lock); 1335 spin_lock_init(&conn->lock);
1336 mutex_init(&conn->chan_lock);
1075 1337
1076 INIT_LIST_HEAD(&conn->chan_l); 1338 INIT_LIST_HEAD(&conn->chan_l);
1077 1339
@@ -1087,10 +1349,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1087 1349
1088/* ---- Socket interface ---- */ 1350/* ---- Socket interface ---- */
1089 1351
1090/* Find socket with psm and source bdaddr. 1352/* Find socket with psm and source / destination bdaddr.
1091 * Returns closest match. 1353 * Returns closest match.
1092 */ 1354 */
1093static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src) 1355static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1356 bdaddr_t *src,
1357 bdaddr_t *dst)
1094{ 1358{
1095 struct l2cap_chan *c, *c1 = NULL; 1359 struct l2cap_chan *c, *c1 = NULL;
1096 1360
@@ -1103,14 +1367,22 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
1103 continue; 1367 continue;
1104 1368
1105 if (c->psm == psm) { 1369 if (c->psm == psm) {
1370 int src_match, dst_match;
1371 int src_any, dst_any;
1372
1106 /* Exact match. */ 1373 /* Exact match. */
1107 if (!bacmp(&bt_sk(sk)->src, src)) { 1374 src_match = !bacmp(&bt_sk(sk)->src, src);
1375 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1376 if (src_match && dst_match) {
1108 read_unlock(&chan_list_lock); 1377 read_unlock(&chan_list_lock);
1109 return c; 1378 return c;
1110 } 1379 }
1111 1380
1112 /* Closest match */ 1381 /* Closest match */
1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 1382 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1383 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1384 if ((src_match && dst_any) || (src_any && dst_match) ||
1385 (src_any && dst_any))
1114 c1 = c; 1386 c1 = c;
1115 } 1387 }
1116 } 1388 }
@@ -1120,7 +1392,8 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
1120 return c1; 1392 return c1;
1121} 1393}
1122 1394
1123int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst) 1395int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1396 bdaddr_t *dst, u8 dst_type)
1124{ 1397{
1125 struct sock *sk = chan->sk; 1398 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src; 1399 bdaddr_t *src = &bt_sk(sk)->src;
@@ -1130,8 +1403,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1130 __u8 auth_type; 1403 __u8 auth_type;
1131 int err; 1404 int err;
1132 1405
1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), 1406 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1134 chan->psm); 1407 dst_type, __le16_to_cpu(chan->psm));
1135 1408
1136 hdev = hci_get_route(dst, src); 1409 hdev = hci_get_route(dst, src);
1137 if (!hdev) 1410 if (!hdev)
@@ -1139,7 +1412,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1139 1412
1140 hci_dev_lock(hdev); 1413 hci_dev_lock(hdev);
1141 1414
1142 lock_sock(sk); 1415 l2cap_chan_lock(chan);
1143 1416
1144 /* PSM must be odd and lsb of upper byte must be 0 */ 1417 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && 1418 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
@@ -1166,17 +1439,21 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1166 goto done; 1439 goto done;
1167 } 1440 }
1168 1441
1442 lock_sock(sk);
1443
1169 switch (sk->sk_state) { 1444 switch (sk->sk_state) {
1170 case BT_CONNECT: 1445 case BT_CONNECT:
1171 case BT_CONNECT2: 1446 case BT_CONNECT2:
1172 case BT_CONFIG: 1447 case BT_CONFIG:
1173 /* Already connecting */ 1448 /* Already connecting */
1174 err = 0; 1449 err = 0;
1450 release_sock(sk);
1175 goto done; 1451 goto done;
1176 1452
1177 case BT_CONNECTED: 1453 case BT_CONNECTED:
1178 /* Already connected */ 1454 /* Already connected */
1179 err = -EISCONN; 1455 err = -EISCONN;
1456 release_sock(sk);
1180 goto done; 1457 goto done;
1181 1458
1182 case BT_OPEN: 1459 case BT_OPEN:
@@ -1186,22 +1463,26 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1186 1463
1187 default: 1464 default:
1188 err = -EBADFD; 1465 err = -EBADFD;
1466 release_sock(sk);
1189 goto done; 1467 goto done;
1190 } 1468 }
1191 1469
1192 /* Set destination address and psm */ 1470 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst); 1471 bacpy(&bt_sk(sk)->dst, dst);
1472
1473 release_sock(sk);
1474
1194 chan->psm = psm; 1475 chan->psm = psm;
1195 chan->dcid = cid; 1476 chan->dcid = cid;
1196 1477
1197 auth_type = l2cap_get_auth_type(chan); 1478 auth_type = l2cap_get_auth_type(chan);
1198 1479
1199 if (chan->dcid == L2CAP_CID_LE_DATA) 1480 if (chan->dcid == L2CAP_CID_LE_DATA)
1200 hcon = hci_connect(hdev, LE_LINK, dst, 1481 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1201 chan->sec_level, auth_type); 1482 chan->sec_level, auth_type);
1202 else 1483 else
1203 hcon = hci_connect(hdev, ACL_LINK, dst, 1484 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1204 chan->sec_level, auth_type); 1485 chan->sec_level, auth_type);
1205 1486
1206 if (IS_ERR(hcon)) { 1487 if (IS_ERR(hcon)) {
1207 err = PTR_ERR(hcon); 1488 err = PTR_ERR(hcon);
@@ -1215,10 +1496,24 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1215 goto done; 1496 goto done;
1216 } 1497 }
1217 1498
1499 if (hcon->type == LE_LINK) {
1500 err = 0;
1501
1502 if (!list_empty(&conn->chan_l)) {
1503 err = -EBUSY;
1504 hci_conn_put(hcon);
1505 }
1506
1507 if (err)
1508 goto done;
1509 }
1510
1218 /* Update source addr of the socket */ 1511 /* Update source addr of the socket */
1219 bacpy(src, conn->src); 1512 bacpy(src, conn->src);
1220 1513
1514 l2cap_chan_unlock(chan);
1221 l2cap_chan_add(conn, chan); 1515 l2cap_chan_add(conn, chan);
1516 l2cap_chan_lock(chan);
1222 1517
1223 l2cap_state_change(chan, BT_CONNECT); 1518 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo); 1519 __set_chan_timer(chan, sk->sk_sndtimeo);
@@ -1235,6 +1530,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1235 err = 0; 1530 err = 0;
1236 1531
1237done: 1532done:
1533 l2cap_chan_unlock(chan);
1238 hci_dev_unlock(hdev); 1534 hci_dev_unlock(hdev);
1239 hci_dev_put(hdev); 1535 hci_dev_put(hdev);
1240 return err; 1536 return err;
@@ -1276,14 +1572,15 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1276{ 1572{
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1573 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work); 1574 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1280 1575
1281 BT_DBG("chan %p", chan); 1576 BT_DBG("chan %p", chan);
1282 1577
1283 lock_sock(sk); 1578 l2cap_chan_lock(chan);
1579
1284 if (chan->retry_count >= chan->remote_max_tx) { 1580 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1581 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1286 release_sock(sk); 1582 l2cap_chan_unlock(chan);
1583 l2cap_chan_put(chan);
1287 return; 1584 return;
1288 } 1585 }
1289 1586
@@ -1291,25 +1588,28 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1291 __set_monitor_timer(chan); 1588 __set_monitor_timer(chan);
1292 1589
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1590 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1294 release_sock(sk); 1591 l2cap_chan_unlock(chan);
1592 l2cap_chan_put(chan);
1295} 1593}
1296 1594
1297static void l2cap_retrans_timeout(struct work_struct *work) 1595static void l2cap_retrans_timeout(struct work_struct *work)
1298{ 1596{
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1597 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work); 1598 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1302 1599
1303 BT_DBG("chan %p", chan); 1600 BT_DBG("chan %p", chan);
1304 1601
1305 lock_sock(sk); 1602 l2cap_chan_lock(chan);
1603
1306 chan->retry_count = 1; 1604 chan->retry_count = 1;
1307 __set_monitor_timer(chan); 1605 __set_monitor_timer(chan);
1308 1606
1309 set_bit(CONN_WAIT_F, &chan->conn_state); 1607 set_bit(CONN_WAIT_F, &chan->conn_state);
1310 1608
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1609 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1312 release_sock(sk); 1610
1611 l2cap_chan_unlock(chan);
1612 l2cap_chan_put(chan);
1313} 1613}
1314 1614
1315static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1615static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -1318,7 +1618,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1318 1618
1319 while ((skb = skb_peek(&chan->tx_q)) && 1619 while ((skb = skb_peek(&chan->tx_q)) &&
1320 chan->unacked_frames) { 1620 chan->unacked_frames) {
1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq) 1621 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1322 break; 1622 break;
1323 1623
1324 skb = skb_dequeue(&chan->tx_q); 1624 skb = skb_dequeue(&chan->tx_q);
@@ -1340,6 +1640,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan)
1340 while ((skb = skb_dequeue(&chan->tx_q))) { 1640 while ((skb = skb_dequeue(&chan->tx_q))) {
1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); 1641 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1342 control |= __set_txseq(chan, chan->next_tx_seq); 1642 control |= __set_txseq(chan, chan->next_tx_seq);
1643 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); 1644 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1344 1645
1345 if (chan->fcs == L2CAP_FCS_CRC16) { 1646 if (chan->fcs == L2CAP_FCS_CRC16) {
@@ -1365,21 +1666,21 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1365 if (!skb) 1666 if (!skb)
1366 return; 1667 return;
1367 1668
1368 while (bt_cb(skb)->tx_seq != tx_seq) { 1669 while (bt_cb(skb)->control.txseq != tx_seq) {
1369 if (skb_queue_is_last(&chan->tx_q, skb)) 1670 if (skb_queue_is_last(&chan->tx_q, skb))
1370 return; 1671 return;
1371 1672
1372 skb = skb_queue_next(&chan->tx_q, skb); 1673 skb = skb_queue_next(&chan->tx_q, skb);
1373 } 1674 }
1374 1675
1375 if (chan->remote_max_tx && 1676 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1376 bt_cb(skb)->retries == chan->remote_max_tx) { 1677 chan->remote_max_tx) {
1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1678 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1378 return; 1679 return;
1379 } 1680 }
1380 1681
1381 tx_skb = skb_clone(skb, GFP_ATOMIC); 1682 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++; 1683 bt_cb(skb)->control.retries++;
1383 1684
1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1685 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1385 control &= __get_sar_mask(chan); 1686 control &= __get_sar_mask(chan);
@@ -1412,17 +1713,20 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1412 if (chan->state != BT_CONNECTED) 1713 if (chan->state != BT_CONNECTED)
1413 return -ENOTCONN; 1714 return -ENOTCONN;
1414 1715
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1717 return 0;
1718
1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1719 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1416 1720
1417 if (chan->remote_max_tx && 1721 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1418 bt_cb(skb)->retries == chan->remote_max_tx) { 1722 chan->remote_max_tx) {
1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1723 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1420 break; 1724 break;
1421 } 1725 }
1422 1726
1423 tx_skb = skb_clone(skb, GFP_ATOMIC); 1727 tx_skb = skb_clone(skb, GFP_ATOMIC);
1424 1728
1425 bt_cb(skb)->retries++; 1729 bt_cb(skb)->control.retries++;
1426 1730
1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1731 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1428 control &= __get_sar_mask(chan); 1732 control &= __get_sar_mask(chan);
@@ -1432,6 +1736,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1432 1736
1433 control |= __set_reqseq(chan, chan->buffer_seq); 1737 control |= __set_reqseq(chan, chan->buffer_seq);
1434 control |= __set_txseq(chan, chan->next_tx_seq); 1738 control |= __set_txseq(chan, chan->next_tx_seq);
1739 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1435 1740
1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1741 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1437 1742
@@ -1446,21 +1751,23 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1446 1751
1447 __set_retrans_timer(chan); 1752 __set_retrans_timer(chan);
1448 1753
1449 bt_cb(skb)->tx_seq = chan->next_tx_seq; 1754 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1450 1755
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1756 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1452 1757
1453 if (bt_cb(skb)->retries == 1) 1758 if (bt_cb(skb)->control.retries == 1) {
1454 chan->unacked_frames++; 1759 chan->unacked_frames++;
1455 1760
1761 if (!nsent++)
1762 __clear_ack_timer(chan);
1763 }
1764
1456 chan->frames_sent++; 1765 chan->frames_sent++;
1457 1766
1458 if (skb_queue_is_last(&chan->tx_q, skb)) 1767 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL; 1768 chan->tx_send_head = NULL;
1460 else 1769 else
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); 1770 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1462
1463 nsent++;
1464 } 1771 }
1465 1772
1466 return nsent; 1773 return nsent;
@@ -1478,7 +1785,7 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1478 return ret; 1785 return ret;
1479} 1786}
1480 1787
1481static void l2cap_send_ack(struct l2cap_chan *chan) 1788static void __l2cap_send_ack(struct l2cap_chan *chan)
1482{ 1789{
1483 u32 control = 0; 1790 u32 control = 0;
1484 1791
@@ -1498,6 +1805,12 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
1498 l2cap_send_sframe(chan, control); 1805 l2cap_send_sframe(chan, control);
1499} 1806}
1500 1807
1808static void l2cap_send_ack(struct l2cap_chan *chan)
1809{
1810 __clear_ack_timer(chan);
1811 __l2cap_send_ack(chan);
1812}
1813
1501static void l2cap_send_srejtail(struct l2cap_chan *chan) 1814static void l2cap_send_srejtail(struct l2cap_chan *chan)
1502{ 1815{
1503 struct srej_list *tail; 1816 struct srej_list *tail;
@@ -1512,11 +1825,13 @@ static void l2cap_send_srejtail(struct l2cap_chan *chan)
1512 l2cap_send_sframe(chan, control); 1825 l2cap_send_sframe(chan, control);
1513} 1826}
1514 1827
1515static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) 1828static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1829 struct msghdr *msg, int len,
1830 int count, struct sk_buff *skb)
1516{ 1831{
1517 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 1832 struct l2cap_conn *conn = chan->conn;
1518 struct sk_buff **frag; 1833 struct sk_buff **frag;
1519 int err, sent = 0; 1834 int sent = 0;
1520 1835
1521 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) 1836 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1522 return -EFAULT; 1837 return -EFAULT;
@@ -1527,11 +1842,17 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1527 /* Continuation fragments (no L2CAP header) */ 1842 /* Continuation fragments (no L2CAP header) */
1528 frag = &skb_shinfo(skb)->frag_list; 1843 frag = &skb_shinfo(skb)->frag_list;
1529 while (len) { 1844 while (len) {
1845 struct sk_buff *tmp;
1846
1530 count = min_t(unsigned int, conn->mtu, len); 1847 count = min_t(unsigned int, conn->mtu, len);
1531 1848
1532 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); 1849 tmp = chan->ops->alloc_skb(chan, count,
1533 if (!*frag) 1850 msg->msg_flags & MSG_DONTWAIT);
1534 return err; 1851 if (IS_ERR(tmp))
1852 return PTR_ERR(tmp);
1853
1854 *frag = tmp;
1855
1535 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) 1856 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1536 return -EFAULT; 1857 return -EFAULT;
1537 1858
@@ -1540,6 +1861,9 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1540 sent += count; 1861 sent += count;
1541 len -= count; 1862 len -= count;
1542 1863
1864 skb->len += (*frag)->len;
1865 skb->data_len += (*frag)->len;
1866
1543 frag = &(*frag)->next; 1867 frag = &(*frag)->next;
1544 } 1868 }
1545 1869
@@ -1550,29 +1874,29 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1550 struct msghdr *msg, size_t len, 1874 struct msghdr *msg, size_t len,
1551 u32 priority) 1875 u32 priority)
1552{ 1876{
1553 struct sock *sk = chan->sk;
1554 struct l2cap_conn *conn = chan->conn; 1877 struct l2cap_conn *conn = chan->conn;
1555 struct sk_buff *skb; 1878 struct sk_buff *skb;
1556 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 1879 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1557 struct l2cap_hdr *lh; 1880 struct l2cap_hdr *lh;
1558 1881
1559 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority); 1882 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1560 1883
1561 count = min_t(unsigned int, (conn->mtu - hlen), len); 1884 count = min_t(unsigned int, (conn->mtu - hlen), len);
1562 skb = bt_skb_send_alloc(sk, count + hlen, 1885
1563 msg->msg_flags & MSG_DONTWAIT, &err); 1886 skb = chan->ops->alloc_skb(chan, count + hlen,
1564 if (!skb) 1887 msg->msg_flags & MSG_DONTWAIT);
1565 return ERR_PTR(err); 1888 if (IS_ERR(skb))
1889 return skb;
1566 1890
1567 skb->priority = priority; 1891 skb->priority = priority;
1568 1892
1569 /* Create L2CAP header */ 1893 /* Create L2CAP header */
1570 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1894 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1571 lh->cid = cpu_to_le16(chan->dcid); 1895 lh->cid = cpu_to_le16(chan->dcid);
1572 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1896 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1573 put_unaligned_le16(chan->psm, skb_put(skb, 2)); 1897 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1574 1898
1575 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1899 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1576 if (unlikely(err < 0)) { 1900 if (unlikely(err < 0)) {
1577 kfree_skb(skb); 1901 kfree_skb(skb);
1578 return ERR_PTR(err); 1902 return ERR_PTR(err);
@@ -1584,28 +1908,28 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1584 struct msghdr *msg, size_t len, 1908 struct msghdr *msg, size_t len,
1585 u32 priority) 1909 u32 priority)
1586{ 1910{
1587 struct sock *sk = chan->sk;
1588 struct l2cap_conn *conn = chan->conn; 1911 struct l2cap_conn *conn = chan->conn;
1589 struct sk_buff *skb; 1912 struct sk_buff *skb;
1590 int err, count, hlen = L2CAP_HDR_SIZE; 1913 int err, count;
1591 struct l2cap_hdr *lh; 1914 struct l2cap_hdr *lh;
1592 1915
1593 BT_DBG("sk %p len %d", sk, (int)len); 1916 BT_DBG("chan %p len %d", chan, (int)len);
1594 1917
1595 count = min_t(unsigned int, (conn->mtu - hlen), len); 1918 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen, 1919
1597 msg->msg_flags & MSG_DONTWAIT, &err); 1920 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1598 if (!skb) 1921 msg->msg_flags & MSG_DONTWAIT);
1599 return ERR_PTR(err); 1922 if (IS_ERR(skb))
1923 return skb;
1600 1924
1601 skb->priority = priority; 1925 skb->priority = priority;
1602 1926
1603 /* Create L2CAP header */ 1927 /* Create L2CAP header */
1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1605 lh->cid = cpu_to_le16(chan->dcid); 1929 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1930 lh->len = cpu_to_le16(len);
1607 1931
1608 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1932 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1609 if (unlikely(err < 0)) { 1933 if (unlikely(err < 0)) {
1610 kfree_skb(skb); 1934 kfree_skb(skb);
1611 return ERR_PTR(err); 1935 return ERR_PTR(err);
@@ -1615,15 +1939,14 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1615 1939
1616static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 1940static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1617 struct msghdr *msg, size_t len, 1941 struct msghdr *msg, size_t len,
1618 u32 control, u16 sdulen) 1942 u16 sdulen)
1619{ 1943{
1620 struct sock *sk = chan->sk;
1621 struct l2cap_conn *conn = chan->conn; 1944 struct l2cap_conn *conn = chan->conn;
1622 struct sk_buff *skb; 1945 struct sk_buff *skb;
1623 int err, count, hlen; 1946 int err, count, hlen;
1624 struct l2cap_hdr *lh; 1947 struct l2cap_hdr *lh;
1625 1948
1626 BT_DBG("sk %p len %d", sk, (int)len); 1949 BT_DBG("chan %p len %d", chan, (int)len);
1627 1950
1628 if (!conn) 1951 if (!conn)
1629 return ERR_PTR(-ENOTCONN); 1952 return ERR_PTR(-ENOTCONN);
@@ -1640,22 +1963,23 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1640 hlen += L2CAP_FCS_SIZE; 1963 hlen += L2CAP_FCS_SIZE;
1641 1964
1642 count = min_t(unsigned int, (conn->mtu - hlen), len); 1965 count = min_t(unsigned int, (conn->mtu - hlen), len);
1643 skb = bt_skb_send_alloc(sk, count + hlen, 1966
1644 msg->msg_flags & MSG_DONTWAIT, &err); 1967 skb = chan->ops->alloc_skb(chan, count + hlen,
1645 if (!skb) 1968 msg->msg_flags & MSG_DONTWAIT);
1646 return ERR_PTR(err); 1969 if (IS_ERR(skb))
1970 return skb;
1647 1971
1648 /* Create L2CAP header */ 1972 /* Create L2CAP header */
1649 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1973 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1650 lh->cid = cpu_to_le16(chan->dcid); 1974 lh->cid = cpu_to_le16(chan->dcid);
1651 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1975 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1652 1976
1653 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 1977 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1654 1978
1655 if (sdulen) 1979 if (sdulen)
1656 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 1980 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1657 1981
1658 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1982 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1659 if (unlikely(err < 0)) { 1983 if (unlikely(err < 0)) {
1660 kfree_skb(skb); 1984 kfree_skb(skb);
1661 return ERR_PTR(err); 1985 return ERR_PTR(err);
@@ -1664,61 +1988,82 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1664 if (chan->fcs == L2CAP_FCS_CRC16) 1988 if (chan->fcs == L2CAP_FCS_CRC16)
1665 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); 1989 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1666 1990
1667 bt_cb(skb)->retries = 0; 1991 bt_cb(skb)->control.retries = 0;
1668 return skb; 1992 return skb;
1669} 1993}
1670 1994
1671static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1995static int l2cap_segment_sdu(struct l2cap_chan *chan,
1996 struct sk_buff_head *seg_queue,
1997 struct msghdr *msg, size_t len)
1672{ 1998{
1673 struct sk_buff *skb; 1999 struct sk_buff *skb;
1674 struct sk_buff_head sar_queue; 2000 u16 sdu_len;
1675 u32 control; 2001 size_t pdu_len;
1676 size_t size = 0; 2002 int err = 0;
2003 u8 sar;
1677 2004
1678 skb_queue_head_init(&sar_queue); 2005 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
1679 control = __set_ctrl_sar(chan, L2CAP_SAR_START); 2006
1680 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); 2007 /* It is critical that ERTM PDUs fit in a single HCI fragment,
1681 if (IS_ERR(skb)) 2008 * so fragmented skbs are not used. The HCI layer's handling
1682 return PTR_ERR(skb); 2009 * of fragmented skbs is not compatible with ERTM's queueing.
2010 */
1683 2011
1684 __skb_queue_tail(&sar_queue, skb); 2012 /* PDU size is derived from the HCI MTU */
1685 len -= chan->remote_mps; 2013 pdu_len = chan->conn->mtu;
1686 size += chan->remote_mps;
1687 2014
1688 while (len > 0) { 2015 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
1689 size_t buflen;
1690 2016
1691 if (len > chan->remote_mps) { 2017 /* Adjust for largest possible L2CAP overhead. */
1692 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE); 2018 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
1693 buflen = chan->remote_mps; 2019
1694 } else { 2020 /* Remote device may have requested smaller PDUs */
1695 control = __set_ctrl_sar(chan, L2CAP_SAR_END); 2021 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
1696 buflen = len; 2022
1697 } 2023 if (len <= pdu_len) {
2024 sar = L2CAP_SAR_UNSEGMENTED;
2025 sdu_len = 0;
2026 pdu_len = len;
2027 } else {
2028 sar = L2CAP_SAR_START;
2029 sdu_len = len;
2030 pdu_len -= L2CAP_SDULEN_SIZE;
2031 }
2032
2033 while (len > 0) {
2034 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
1698 2035
1699 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1700 if (IS_ERR(skb)) { 2036 if (IS_ERR(skb)) {
1701 skb_queue_purge(&sar_queue); 2037 __skb_queue_purge(seg_queue);
1702 return PTR_ERR(skb); 2038 return PTR_ERR(skb);
1703 } 2039 }
1704 2040
1705 __skb_queue_tail(&sar_queue, skb); 2041 bt_cb(skb)->control.sar = sar;
1706 len -= buflen; 2042 __skb_queue_tail(seg_queue, skb);
1707 size += buflen; 2043
2044 len -= pdu_len;
2045 if (sdu_len) {
2046 sdu_len = 0;
2047 pdu_len += L2CAP_SDULEN_SIZE;
2048 }
2049
2050 if (len <= pdu_len) {
2051 sar = L2CAP_SAR_END;
2052 pdu_len = len;
2053 } else {
2054 sar = L2CAP_SAR_CONTINUE;
2055 }
1708 } 2056 }
1709 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1710 if (chan->tx_send_head == NULL)
1711 chan->tx_send_head = sar_queue.next;
1712 2057
1713 return size; 2058 return err;
1714} 2059}
1715 2060
1716int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2061int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1717 u32 priority) 2062 u32 priority)
1718{ 2063{
1719 struct sk_buff *skb; 2064 struct sk_buff *skb;
1720 u32 control;
1721 int err; 2065 int err;
2066 struct sk_buff_head seg_queue;
1722 2067
1723 /* Connectionless channel */ 2068 /* Connectionless channel */
1724 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2069 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
@@ -1747,42 +2092,47 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1747 2092
1748 case L2CAP_MODE_ERTM: 2093 case L2CAP_MODE_ERTM:
1749 case L2CAP_MODE_STREAMING: 2094 case L2CAP_MODE_STREAMING:
1750 /* Entire SDU fits into one PDU */ 2095 /* Check outgoing MTU */
1751 if (len <= chan->remote_mps) { 2096 if (len > chan->omtu) {
1752 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED); 2097 err = -EMSGSIZE;
1753 skb = l2cap_create_iframe_pdu(chan, msg, len, control, 2098 break;
1754 0); 2099 }
1755 if (IS_ERR(skb))
1756 return PTR_ERR(skb);
1757 2100
1758 __skb_queue_tail(&chan->tx_q, skb); 2101 __skb_queue_head_init(&seg_queue);
1759 2102
1760 if (chan->tx_send_head == NULL) 2103 /* Do segmentation before calling in to the state machine,
1761 chan->tx_send_head = skb; 2104 * since it's possible to block while waiting for memory
2105 * allocation.
2106 */
2107 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
1762 2108
1763 } else { 2109 /* The channel could have been closed while segmenting,
1764 /* Segment SDU into multiples PDUs */ 2110 * check that it is still connected.
1765 err = l2cap_sar_segment_sdu(chan, msg, len); 2111 */
1766 if (err < 0) 2112 if (chan->state != BT_CONNECTED) {
1767 return err; 2113 __skb_queue_purge(&seg_queue);
2114 err = -ENOTCONN;
1768 } 2115 }
1769 2116
1770 if (chan->mode == L2CAP_MODE_STREAMING) { 2117 if (err)
1771 l2cap_streaming_send(chan);
1772 err = len;
1773 break; 2118 break;
1774 }
1775 2119
1776 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 2120 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
1777 test_bit(CONN_WAIT_F, &chan->conn_state)) { 2121 chan->tx_send_head = seg_queue.next;
1778 err = len; 2122 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
1779 break; 2123
1780 } 2124 if (chan->mode == L2CAP_MODE_ERTM)
2125 err = l2cap_ertm_send(chan);
2126 else
2127 l2cap_streaming_send(chan);
1781 2128
1782 err = l2cap_ertm_send(chan);
1783 if (err >= 0) 2129 if (err >= 0)
1784 err = len; 2130 err = len;
1785 2131
2132 /* If the skbs were not queued for sending, they'll still be in
2133 * seg_queue and need to be purged.
2134 */
2135 __skb_queue_purge(&seg_queue);
1786 break; 2136 break;
1787 2137
1788 default: 2138 default:
@@ -1801,9 +2151,9 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1801 2151
1802 BT_DBG("conn %p", conn); 2152 BT_DBG("conn %p", conn);
1803 2153
1804 rcu_read_lock(); 2154 mutex_lock(&conn->chan_lock);
1805 2155
1806 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 2156 list_for_each_entry(chan, &conn->chan_l, list) {
1807 struct sock *sk = chan->sk; 2157 struct sock *sk = chan->sk;
1808 if (chan->chan_type != L2CAP_CHAN_RAW) 2158 if (chan->chan_type != L2CAP_CHAN_RAW)
1809 continue; 2159 continue;
@@ -1819,7 +2169,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1819 kfree_skb(nskb); 2169 kfree_skb(nskb);
1820 } 2170 }
1821 2171
1822 rcu_read_unlock(); 2172 mutex_unlock(&conn->chan_lock);
1823} 2173}
1824 2174
1825/* ---- L2CAP signalling commands ---- */ 2175/* ---- L2CAP signalling commands ---- */
@@ -1987,18 +2337,38 @@ static void l2cap_ack_timeout(struct work_struct *work)
1987 2337
1988 BT_DBG("chan %p", chan); 2338 BT_DBG("chan %p", chan);
1989 2339
1990 lock_sock(chan->sk); 2340 l2cap_chan_lock(chan);
1991 l2cap_send_ack(chan); 2341
1992 release_sock(chan->sk); 2342 __l2cap_send_ack(chan);
2343
2344 l2cap_chan_unlock(chan);
2345
2346 l2cap_chan_put(chan);
1993} 2347}
1994 2348
1995static inline void l2cap_ertm_init(struct l2cap_chan *chan) 2349static inline int l2cap_ertm_init(struct l2cap_chan *chan)
1996{ 2350{
2351 int err;
2352
2353 chan->next_tx_seq = 0;
2354 chan->expected_tx_seq = 0;
1997 chan->expected_ack_seq = 0; 2355 chan->expected_ack_seq = 0;
1998 chan->unacked_frames = 0; 2356 chan->unacked_frames = 0;
1999 chan->buffer_seq = 0; 2357 chan->buffer_seq = 0;
2000 chan->num_acked = 0; 2358 chan->num_acked = 0;
2001 chan->frames_sent = 0; 2359 chan->frames_sent = 0;
2360 chan->last_acked_seq = 0;
2361 chan->sdu = NULL;
2362 chan->sdu_last_frag = NULL;
2363 chan->sdu_len = 0;
2364
2365 skb_queue_head_init(&chan->tx_q);
2366
2367 if (chan->mode != L2CAP_MODE_ERTM)
2368 return 0;
2369
2370 chan->rx_state = L2CAP_RX_STATE_RECV;
2371 chan->tx_state = L2CAP_TX_STATE_XMIT;
2002 2372
2003 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); 2373 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2004 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); 2374 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
@@ -2007,6 +2377,11 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2007 skb_queue_head_init(&chan->srej_q); 2377 skb_queue_head_init(&chan->srej_q);
2008 2378
2009 INIT_LIST_HEAD(&chan->srej_l); 2379 INIT_LIST_HEAD(&chan->srej_l);
2380 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2381 if (err < 0)
2382 return err;
2383
2384 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2010} 2385}
2011 2386
2012static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2387static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2330,9 +2705,9 @@ done:
2330 chan->remote_mps = size; 2705 chan->remote_mps = size;
2331 2706
2332 rfc.retrans_timeout = 2707 rfc.retrans_timeout =
2333 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); 2708 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2334 rfc.monitor_timeout = 2709 rfc.monitor_timeout =
2335 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 2710 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2336 2711
2337 set_bit(CONF_MODE_DONE, &chan->conf_state); 2712 set_bit(CONF_MODE_DONE, &chan->conf_state);
2338 2713
@@ -2596,10 +2971,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2596 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 2971 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2597 __le16 psm = req->psm; 2972 __le16 psm = req->psm;
2598 2973
2599 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); 2974 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2600 2975
2601 /* Check if we have socket listening on psm */ 2976 /* Check if we have socket listening on psm */
2602 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src); 2977 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2603 if (!pchan) { 2978 if (!pchan) {
2604 result = L2CAP_CR_BAD_PSM; 2979 result = L2CAP_CR_BAD_PSM;
2605 goto sendresp; 2980 goto sendresp;
@@ -2607,6 +2982,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2607 2982
2608 parent = pchan->sk; 2983 parent = pchan->sk;
2609 2984
2985 mutex_lock(&conn->chan_lock);
2610 lock_sock(parent); 2986 lock_sock(parent);
2611 2987
2612 /* Check if the ACL is secure enough (if not SDP) */ 2988 /* Check if the ACL is secure enough (if not SDP) */
@@ -2647,7 +3023,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2647 3023
2648 bt_accept_enqueue(parent, sk); 3024 bt_accept_enqueue(parent, sk);
2649 3025
2650 l2cap_chan_add(conn, chan); 3026 __l2cap_chan_add(conn, chan);
2651 3027
2652 dcid = chan->scid; 3028 dcid = chan->scid;
2653 3029
@@ -2657,29 +3033,30 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2657 3033
2658 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 3034 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2659 if (l2cap_chan_check_security(chan)) { 3035 if (l2cap_chan_check_security(chan)) {
2660 if (bt_sk(sk)->defer_setup) { 3036 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
2661 l2cap_state_change(chan, BT_CONNECT2); 3037 __l2cap_state_change(chan, BT_CONNECT2);
2662 result = L2CAP_CR_PEND; 3038 result = L2CAP_CR_PEND;
2663 status = L2CAP_CS_AUTHOR_PEND; 3039 status = L2CAP_CS_AUTHOR_PEND;
2664 parent->sk_data_ready(parent, 0); 3040 parent->sk_data_ready(parent, 0);
2665 } else { 3041 } else {
2666 l2cap_state_change(chan, BT_CONFIG); 3042 __l2cap_state_change(chan, BT_CONFIG);
2667 result = L2CAP_CR_SUCCESS; 3043 result = L2CAP_CR_SUCCESS;
2668 status = L2CAP_CS_NO_INFO; 3044 status = L2CAP_CS_NO_INFO;
2669 } 3045 }
2670 } else { 3046 } else {
2671 l2cap_state_change(chan, BT_CONNECT2); 3047 __l2cap_state_change(chan, BT_CONNECT2);
2672 result = L2CAP_CR_PEND; 3048 result = L2CAP_CR_PEND;
2673 status = L2CAP_CS_AUTHEN_PEND; 3049 status = L2CAP_CS_AUTHEN_PEND;
2674 } 3050 }
2675 } else { 3051 } else {
2676 l2cap_state_change(chan, BT_CONNECT2); 3052 __l2cap_state_change(chan, BT_CONNECT2);
2677 result = L2CAP_CR_PEND; 3053 result = L2CAP_CR_PEND;
2678 status = L2CAP_CS_NO_INFO; 3054 status = L2CAP_CS_NO_INFO;
2679 } 3055 }
2680 3056
2681response: 3057response:
2682 release_sock(parent); 3058 release_sock(parent);
3059 mutex_unlock(&conn->chan_lock);
2683 3060
2684sendresp: 3061sendresp:
2685 rsp.scid = cpu_to_le16(scid); 3062 rsp.scid = cpu_to_le16(scid);
@@ -2695,8 +3072,7 @@ sendresp:
2695 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 3072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2696 conn->info_ident = l2cap_get_ident(conn); 3073 conn->info_ident = l2cap_get_ident(conn);
2697 3074
2698 schedule_delayed_work(&conn->info_timer, 3075 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2699 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2700 3076
2701 l2cap_send_cmd(conn, conn->info_ident, 3077 l2cap_send_cmd(conn, conn->info_ident,
2702 L2CAP_INFO_REQ, sizeof(info), &info); 3078 L2CAP_INFO_REQ, sizeof(info), &info);
@@ -2719,27 +3095,36 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2719 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 3095 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2720 u16 scid, dcid, result, status; 3096 u16 scid, dcid, result, status;
2721 struct l2cap_chan *chan; 3097 struct l2cap_chan *chan;
2722 struct sock *sk;
2723 u8 req[128]; 3098 u8 req[128];
3099 int err;
2724 3100
2725 scid = __le16_to_cpu(rsp->scid); 3101 scid = __le16_to_cpu(rsp->scid);
2726 dcid = __le16_to_cpu(rsp->dcid); 3102 dcid = __le16_to_cpu(rsp->dcid);
2727 result = __le16_to_cpu(rsp->result); 3103 result = __le16_to_cpu(rsp->result);
2728 status = __le16_to_cpu(rsp->status); 3104 status = __le16_to_cpu(rsp->status);
2729 3105
2730 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); 3106 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3107 dcid, scid, result, status);
3108
3109 mutex_lock(&conn->chan_lock);
2731 3110
2732 if (scid) { 3111 if (scid) {
2733 chan = l2cap_get_chan_by_scid(conn, scid); 3112 chan = __l2cap_get_chan_by_scid(conn, scid);
2734 if (!chan) 3113 if (!chan) {
2735 return -EFAULT; 3114 err = -EFAULT;
3115 goto unlock;
3116 }
2736 } else { 3117 } else {
2737 chan = l2cap_get_chan_by_ident(conn, cmd->ident); 3118 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2738 if (!chan) 3119 if (!chan) {
2739 return -EFAULT; 3120 err = -EFAULT;
3121 goto unlock;
3122 }
2740 } 3123 }
2741 3124
2742 sk = chan->sk; 3125 err = 0;
3126
3127 l2cap_chan_lock(chan);
2743 3128
2744 switch (result) { 3129 switch (result) {
2745 case L2CAP_CR_SUCCESS: 3130 case L2CAP_CR_SUCCESS:
@@ -2765,8 +3150,12 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2765 break; 3150 break;
2766 } 3151 }
2767 3152
2768 release_sock(sk); 3153 l2cap_chan_unlock(chan);
2769 return 0; 3154
3155unlock:
3156 mutex_unlock(&conn->chan_lock);
3157
3158 return err;
2770} 3159}
2771 3160
2772static inline void set_default_fcs(struct l2cap_chan *chan) 3161static inline void set_default_fcs(struct l2cap_chan *chan)
@@ -2786,8 +3175,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2786 u16 dcid, flags; 3175 u16 dcid, flags;
2787 u8 rsp[64]; 3176 u8 rsp[64];
2788 struct l2cap_chan *chan; 3177 struct l2cap_chan *chan;
2789 struct sock *sk; 3178 int len, err = 0;
2790 int len;
2791 3179
2792 dcid = __le16_to_cpu(req->dcid); 3180 dcid = __le16_to_cpu(req->dcid);
2793 flags = __le16_to_cpu(req->flags); 3181 flags = __le16_to_cpu(req->flags);
@@ -2798,8 +3186,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2798 if (!chan) 3186 if (!chan)
2799 return -ENOENT; 3187 return -ENOENT;
2800 3188
2801 sk = chan->sk;
2802
2803 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 3189 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2804 struct l2cap_cmd_rej_cid rej; 3190 struct l2cap_cmd_rej_cid rej;
2805 3191
@@ -2854,13 +3240,15 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2854 3240
2855 l2cap_state_change(chan, BT_CONNECTED); 3241 l2cap_state_change(chan, BT_CONNECTED);
2856 3242
2857 chan->next_tx_seq = 0; 3243 if (chan->mode == L2CAP_MODE_ERTM ||
2858 chan->expected_tx_seq = 0; 3244 chan->mode == L2CAP_MODE_STREAMING)
2859 skb_queue_head_init(&chan->tx_q); 3245 err = l2cap_ertm_init(chan);
2860 if (chan->mode == L2CAP_MODE_ERTM) 3246
2861 l2cap_ertm_init(chan); 3247 if (err < 0)
3248 l2cap_send_disconn_req(chan->conn, chan, -err);
3249 else
3250 l2cap_chan_ready(chan);
2862 3251
2863 l2cap_chan_ready(sk);
2864 goto unlock; 3252 goto unlock;
2865 } 3253 }
2866 3254
@@ -2887,8 +3275,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2887 } 3275 }
2888 3276
2889unlock: 3277unlock:
2890 release_sock(sk); 3278 l2cap_chan_unlock(chan);
2891 return 0; 3279 return err;
2892} 3280}
2893 3281
2894static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3282static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -2896,22 +3284,20 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2896 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 3284 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2897 u16 scid, flags, result; 3285 u16 scid, flags, result;
2898 struct l2cap_chan *chan; 3286 struct l2cap_chan *chan;
2899 struct sock *sk; 3287 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
2900 int len = cmd->len - sizeof(*rsp); 3288 int err = 0;
2901 3289
2902 scid = __le16_to_cpu(rsp->scid); 3290 scid = __le16_to_cpu(rsp->scid);
2903 flags = __le16_to_cpu(rsp->flags); 3291 flags = __le16_to_cpu(rsp->flags);
2904 result = __le16_to_cpu(rsp->result); 3292 result = __le16_to_cpu(rsp->result);
2905 3293
2906 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", 3294 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
2907 scid, flags, result); 3295 result, len);
2908 3296
2909 chan = l2cap_get_chan_by_scid(conn, scid); 3297 chan = l2cap_get_chan_by_scid(conn, scid);
2910 if (!chan) 3298 if (!chan)
2911 return 0; 3299 return 0;
2912 3300
2913 sk = chan->sk;
2914
2915 switch (result) { 3301 switch (result) {
2916 case L2CAP_CONF_SUCCESS: 3302 case L2CAP_CONF_SUCCESS:
2917 l2cap_conf_rfc_get(chan, rsp->data, len); 3303 l2cap_conf_rfc_get(chan, rsp->data, len);
@@ -2969,9 +3355,9 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2969 } 3355 }
2970 3356
2971 default: 3357 default:
2972 sk->sk_err = ECONNRESET; 3358 l2cap_chan_set_err(chan, ECONNRESET);
2973 __set_chan_timer(chan, 3359
2974 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT)); 3360 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2975 l2cap_send_disconn_req(conn, chan, ECONNRESET); 3361 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2976 goto done; 3362 goto done;
2977 } 3363 }
@@ -2985,18 +3371,19 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2985 set_default_fcs(chan); 3371 set_default_fcs(chan);
2986 3372
2987 l2cap_state_change(chan, BT_CONNECTED); 3373 l2cap_state_change(chan, BT_CONNECTED);
2988 chan->next_tx_seq = 0; 3374 if (chan->mode == L2CAP_MODE_ERTM ||
2989 chan->expected_tx_seq = 0; 3375 chan->mode == L2CAP_MODE_STREAMING)
2990 skb_queue_head_init(&chan->tx_q); 3376 err = l2cap_ertm_init(chan);
2991 if (chan->mode == L2CAP_MODE_ERTM)
2992 l2cap_ertm_init(chan);
2993 3377
2994 l2cap_chan_ready(sk); 3378 if (err < 0)
3379 l2cap_send_disconn_req(chan->conn, chan, -err);
3380 else
3381 l2cap_chan_ready(chan);
2995 } 3382 }
2996 3383
2997done: 3384done:
2998 release_sock(sk); 3385 l2cap_chan_unlock(chan);
2999 return 0; 3386 return err;
3000} 3387}
3001 3388
3002static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3389static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -3012,9 +3399,15 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3012 3399
3013 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 3400 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3014 3401
3015 chan = l2cap_get_chan_by_scid(conn, dcid); 3402 mutex_lock(&conn->chan_lock);
3016 if (!chan) 3403
3404 chan = __l2cap_get_chan_by_scid(conn, dcid);
3405 if (!chan) {
3406 mutex_unlock(&conn->chan_lock);
3017 return 0; 3407 return 0;
3408 }
3409
3410 l2cap_chan_lock(chan);
3018 3411
3019 sk = chan->sk; 3412 sk = chan->sk;
3020 3413
@@ -3022,12 +3415,20 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3022 rsp.scid = cpu_to_le16(chan->dcid); 3415 rsp.scid = cpu_to_le16(chan->dcid);
3023 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 3416 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3024 3417
3418 lock_sock(sk);
3025 sk->sk_shutdown = SHUTDOWN_MASK; 3419 sk->sk_shutdown = SHUTDOWN_MASK;
3420 release_sock(sk);
3026 3421
3422 l2cap_chan_hold(chan);
3027 l2cap_chan_del(chan, ECONNRESET); 3423 l2cap_chan_del(chan, ECONNRESET);
3028 release_sock(sk); 3424
3425 l2cap_chan_unlock(chan);
3029 3426
3030 chan->ops->close(chan->data); 3427 chan->ops->close(chan->data);
3428 l2cap_chan_put(chan);
3429
3430 mutex_unlock(&conn->chan_lock);
3431
3031 return 0; 3432 return 0;
3032} 3433}
3033 3434
@@ -3036,23 +3437,32 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3036 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 3437 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3037 u16 dcid, scid; 3438 u16 dcid, scid;
3038 struct l2cap_chan *chan; 3439 struct l2cap_chan *chan;
3039 struct sock *sk;
3040 3440
3041 scid = __le16_to_cpu(rsp->scid); 3441 scid = __le16_to_cpu(rsp->scid);
3042 dcid = __le16_to_cpu(rsp->dcid); 3442 dcid = __le16_to_cpu(rsp->dcid);
3043 3443
3044 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 3444 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3045 3445
3046 chan = l2cap_get_chan_by_scid(conn, scid); 3446 mutex_lock(&conn->chan_lock);
3047 if (!chan) 3447
3448 chan = __l2cap_get_chan_by_scid(conn, scid);
3449 if (!chan) {
3450 mutex_unlock(&conn->chan_lock);
3048 return 0; 3451 return 0;
3452 }
3049 3453
3050 sk = chan->sk; 3454 l2cap_chan_lock(chan);
3051 3455
3456 l2cap_chan_hold(chan);
3052 l2cap_chan_del(chan, 0); 3457 l2cap_chan_del(chan, 0);
3053 release_sock(sk); 3458
3459 l2cap_chan_unlock(chan);
3054 3460
3055 chan->ops->close(chan->data); 3461 chan->ops->close(chan->data);
3462 l2cap_chan_put(chan);
3463
3464 mutex_unlock(&conn->chan_lock);
3465
3056 return 0; 3466 return 0;
3057} 3467}
3058 3468
@@ -3132,7 +3542,8 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3132 return 0; 3542 return 0;
3133 } 3543 }
3134 3544
3135 if (type == L2CAP_IT_FEAT_MASK) { 3545 switch (type) {
3546 case L2CAP_IT_FEAT_MASK:
3136 conn->feat_mask = get_unaligned_le32(rsp->data); 3547 conn->feat_mask = get_unaligned_le32(rsp->data);
3137 3548
3138 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 3549 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
@@ -3149,11 +3560,15 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3149 3560
3150 l2cap_conn_start(conn); 3561 l2cap_conn_start(conn);
3151 } 3562 }
3152 } else if (type == L2CAP_IT_FIXED_CHAN) { 3563 break;
3564
3565 case L2CAP_IT_FIXED_CHAN:
3566 conn->fixed_chan_mask = rsp->data[0];
3153 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3567 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3154 conn->info_ident = 0; 3568 conn->info_ident = 0;
3155 3569
3156 l2cap_conn_start(conn); 3570 l2cap_conn_start(conn);
3571 break;
3157 } 3572 }
3158 3573
3159 return 0; 3574 return 0;
@@ -3181,8 +3596,8 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3181 /* Placeholder: Always reject */ 3596 /* Placeholder: Always reject */
3182 rsp.dcid = 0; 3597 rsp.dcid = 0;
3183 rsp.scid = cpu_to_le16(scid); 3598 rsp.scid = cpu_to_le16(scid);
3184 rsp.result = L2CAP_CR_NO_MEM; 3599 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3185 rsp.status = L2CAP_CS_NO_INFO; 3600 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3186 3601
3187 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, 3602 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3188 sizeof(rsp), &rsp); 3603 sizeof(rsp), &rsp);
@@ -3581,19 +3996,19 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
3581 struct sk_buff *next_skb; 3996 struct sk_buff *next_skb;
3582 int tx_seq_offset, next_tx_seq_offset; 3997 int tx_seq_offset, next_tx_seq_offset;
3583 3998
3584 bt_cb(skb)->tx_seq = tx_seq; 3999 bt_cb(skb)->control.txseq = tx_seq;
3585 bt_cb(skb)->sar = sar; 4000 bt_cb(skb)->control.sar = sar;
3586 4001
3587 next_skb = skb_peek(&chan->srej_q); 4002 next_skb = skb_peek(&chan->srej_q);
3588 4003
3589 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 4004 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3590 4005
3591 while (next_skb) { 4006 while (next_skb) {
3592 if (bt_cb(next_skb)->tx_seq == tx_seq) 4007 if (bt_cb(next_skb)->control.txseq == tx_seq)
3593 return -EINVAL; 4008 return -EINVAL;
3594 4009
3595 next_tx_seq_offset = __seq_offset(chan, 4010 next_tx_seq_offset = __seq_offset(chan,
3596 bt_cb(next_skb)->tx_seq, chan->buffer_seq); 4011 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
3597 4012
3598 if (next_tx_seq_offset > tx_seq_offset) { 4013 if (next_tx_seq_offset > tx_seq_offset) {
3599 __skb_queue_before(&chan->srej_q, next_skb, skb); 4014 __skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3713,19 +4128,12 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
3713 4128
3714static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 4129static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3715{ 4130{
3716 u32 control;
3717
3718 BT_DBG("chan %p, Enter local busy", chan); 4131 BT_DBG("chan %p, Enter local busy", chan);
3719 4132
3720 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4133 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4134 l2cap_seq_list_clear(&chan->srej_list);
3721 4135
3722 control = __set_reqseq(chan, chan->buffer_seq); 4136 __set_ack_timer(chan);
3723 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3724 l2cap_send_sframe(chan, control);
3725
3726 set_bit(CONN_RNR_SENT, &chan->conn_state);
3727
3728 __clear_ack_timer(chan);
3729} 4137}
3730 4138
3731static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 4139static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
@@ -3772,11 +4180,11 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3772 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4180 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3773 int err; 4181 int err;
3774 4182
3775 if (bt_cb(skb)->tx_seq != tx_seq) 4183 if (bt_cb(skb)->control.txseq != tx_seq)
3776 break; 4184 break;
3777 4185
3778 skb = skb_dequeue(&chan->srej_q); 4186 skb = skb_dequeue(&chan->srej_q);
3779 control = __set_ctrl_sar(chan, bt_cb(skb)->sar); 4187 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
3780 err = l2cap_reassemble_sdu(chan, skb, control); 4188 err = l2cap_reassemble_sdu(chan, skb, control);
3781 4189
3782 if (err < 0) { 4190 if (err < 0) {
@@ -3816,6 +4224,7 @@ static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3816 while (tx_seq != chan->expected_tx_seq) { 4224 while (tx_seq != chan->expected_tx_seq) {
3817 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 4225 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3818 control |= __set_reqseq(chan, chan->expected_tx_seq); 4226 control |= __set_reqseq(chan, chan->expected_tx_seq);
4227 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
3819 l2cap_send_sframe(chan, control); 4228 l2cap_send_sframe(chan, control);
3820 4229
3821 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 4230 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
@@ -3865,8 +4274,11 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont
3865 goto drop; 4274 goto drop;
3866 } 4275 }
3867 4276
3868 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 4277 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4278 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4279 l2cap_send_ack(chan);
3869 goto drop; 4280 goto drop;
4281 }
3870 4282
3871 if (tx_seq == chan->expected_tx_seq) 4283 if (tx_seq == chan->expected_tx_seq)
3872 goto expected; 4284 goto expected;
@@ -3927,15 +4339,15 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont
3927 __skb_queue_head_init(&chan->srej_q); 4339 __skb_queue_head_init(&chan->srej_q);
3928 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 4340 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3929 4341
3930 set_bit(CONN_SEND_PBIT, &chan->conn_state); 4342 /* Set P-bit only if there are some I-frames to ack. */
4343 if (__clear_ack_timer(chan))
4344 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3931 4345
3932 err = l2cap_send_srejframe(chan, tx_seq); 4346 err = l2cap_send_srejframe(chan, tx_seq);
3933 if (err < 0) { 4347 if (err < 0) {
3934 l2cap_send_disconn_req(chan->conn, chan, -err); 4348 l2cap_send_disconn_req(chan->conn, chan, -err);
3935 return err; 4349 return err;
3936 } 4350 }
3937
3938 __clear_ack_timer(chan);
3939 } 4351 }
3940 return 0; 4352 return 0;
3941 4353
@@ -3943,8 +4355,8 @@ expected:
3943 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4355 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3944 4356
3945 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4357 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3946 bt_cb(skb)->tx_seq = tx_seq; 4358 bt_cb(skb)->control.txseq = tx_seq;
3947 bt_cb(skb)->sar = sar; 4359 bt_cb(skb)->control.sar = sar;
3948 __skb_queue_tail(&chan->srej_q, skb); 4360 __skb_queue_tail(&chan->srej_q, skb);
3949 return 0; 4361 return 0;
3950 } 4362 }
@@ -4135,13 +4547,14 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_cont
4135 return 0; 4547 return 0;
4136} 4548}
4137 4549
4138static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) 4550static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4139{ 4551{
4140 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4141 u32 control; 4552 u32 control;
4142 u16 req_seq; 4553 u16 req_seq;
4143 int len, next_tx_seq_offset, req_seq_offset; 4554 int len, next_tx_seq_offset, req_seq_offset;
4144 4555
4556 __unpack_control(chan, skb);
4557
4145 control = __get_control(chan, skb->data); 4558 control = __get_control(chan, skb->data);
4146 skb_pull(skb, __ctrl_size(chan)); 4559 skb_pull(skb, __ctrl_size(chan));
4147 len = skb->len; 4560 len = skb->len;
@@ -4205,7 +4618,6 @@ drop:
4205static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 4618static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4206{ 4619{
4207 struct l2cap_chan *chan; 4620 struct l2cap_chan *chan;
4208 struct sock *sk = NULL;
4209 u32 control; 4621 u32 control;
4210 u16 tx_seq; 4622 u16 tx_seq;
4211 int len; 4623 int len;
@@ -4213,11 +4625,11 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4213 chan = l2cap_get_chan_by_scid(conn, cid); 4625 chan = l2cap_get_chan_by_scid(conn, cid);
4214 if (!chan) { 4626 if (!chan) {
4215 BT_DBG("unknown cid 0x%4.4x", cid); 4627 BT_DBG("unknown cid 0x%4.4x", cid);
4216 goto drop; 4628 /* Drop packet and return */
4629 kfree_skb(skb);
4630 return 0;
4217 } 4631 }
4218 4632
4219 sk = chan->sk;
4220
4221 BT_DBG("chan %p, len %d", chan, skb->len); 4633 BT_DBG("chan %p, len %d", chan, skb->len);
4222 4634
4223 if (chan->state != BT_CONNECTED) 4635 if (chan->state != BT_CONNECTED)
@@ -4238,7 +4650,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4238 break; 4650 break;
4239 4651
4240 case L2CAP_MODE_ERTM: 4652 case L2CAP_MODE_ERTM:
4241 l2cap_ertm_data_rcv(sk, skb); 4653 l2cap_ertm_data_rcv(chan, skb);
4242 4654
4243 goto done; 4655 goto done;
4244 4656
@@ -4287,26 +4699,20 @@ drop:
4287 kfree_skb(skb); 4699 kfree_skb(skb);
4288 4700
4289done: 4701done:
4290 if (sk) 4702 l2cap_chan_unlock(chan);
4291 release_sock(sk);
4292 4703
4293 return 0; 4704 return 0;
4294} 4705}
4295 4706
4296static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 4707static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4297{ 4708{
4298 struct sock *sk = NULL;
4299 struct l2cap_chan *chan; 4709 struct l2cap_chan *chan;
4300 4710
4301 chan = l2cap_global_chan_by_psm(0, psm, conn->src); 4711 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4302 if (!chan) 4712 if (!chan)
4303 goto drop; 4713 goto drop;
4304 4714
4305 sk = chan->sk; 4715 BT_DBG("chan %p, len %d", chan, skb->len);
4306
4307 lock_sock(sk);
4308
4309 BT_DBG("sk %p, len %d", sk, skb->len);
4310 4716
4311 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 4717 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4312 goto drop; 4718 goto drop;
@@ -4315,31 +4721,24 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4315 goto drop; 4721 goto drop;
4316 4722
4317 if (!chan->ops->recv(chan->data, skb)) 4723 if (!chan->ops->recv(chan->data, skb))
4318 goto done; 4724 return 0;
4319 4725
4320drop: 4726drop:
4321 kfree_skb(skb); 4727 kfree_skb(skb);
4322 4728
4323done:
4324 if (sk)
4325 release_sock(sk);
4326 return 0; 4729 return 0;
4327} 4730}
4328 4731
4329static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb) 4732static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4733 struct sk_buff *skb)
4330{ 4734{
4331 struct sock *sk = NULL;
4332 struct l2cap_chan *chan; 4735 struct l2cap_chan *chan;
4333 4736
4334 chan = l2cap_global_chan_by_scid(0, cid, conn->src); 4737 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4335 if (!chan) 4738 if (!chan)
4336 goto drop; 4739 goto drop;
4337 4740
4338 sk = chan->sk; 4741 BT_DBG("chan %p, len %d", chan, skb->len);
4339
4340 lock_sock(sk);
4341
4342 BT_DBG("sk %p, len %d", sk, skb->len);
4343 4742
4344 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 4743 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4345 goto drop; 4744 goto drop;
@@ -4348,14 +4747,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
4348 goto drop; 4747 goto drop;
4349 4748
4350 if (!chan->ops->recv(chan->data, skb)) 4749 if (!chan->ops->recv(chan->data, skb))
4351 goto done; 4750 return 0;
4352 4751
4353drop: 4752drop:
4354 kfree_skb(skb); 4753 kfree_skb(skb);
4355 4754
4356done:
4357 if (sk)
4358 release_sock(sk);
4359 return 0; 4755 return 0;
4360} 4756}
4361 4757
@@ -4383,7 +4779,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4383 break; 4779 break;
4384 4780
4385 case L2CAP_CID_CONN_LESS: 4781 case L2CAP_CID_CONN_LESS:
4386 psm = get_unaligned_le16(skb->data); 4782 psm = get_unaligned((__le16 *) skb->data);
4387 skb_pull(skb, 2); 4783 skb_pull(skb, 2);
4388 l2cap_conless_channel(conn, psm, skb); 4784 l2cap_conless_channel(conn, psm, skb);
4389 break; 4785 break;
@@ -4478,9 +4874,7 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4478 4874
4479 if (encrypt == 0x00) { 4875 if (encrypt == 0x00) {
4480 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4876 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4481 __clear_chan_timer(chan); 4877 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4482 __set_chan_timer(chan,
4483 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4484 } else if (chan->sec_level == BT_SECURITY_HIGH) 4878 } else if (chan->sec_level == BT_SECURITY_HIGH)
4485 l2cap_chan_close(chan, ECONNREFUSED); 4879 l2cap_chan_close(chan, ECONNREFUSED);
4486 } else { 4880 } else {
@@ -4500,81 +4894,80 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4500 BT_DBG("conn %p", conn); 4894 BT_DBG("conn %p", conn);
4501 4895
4502 if (hcon->type == LE_LINK) { 4896 if (hcon->type == LE_LINK) {
4503 smp_distribute_keys(conn, 0); 4897 if (!status && encrypt)
4898 smp_distribute_keys(conn, 0);
4504 cancel_delayed_work(&conn->security_timer); 4899 cancel_delayed_work(&conn->security_timer);
4505 } 4900 }
4506 4901
4507 rcu_read_lock(); 4902 mutex_lock(&conn->chan_lock);
4508
4509 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4510 struct sock *sk = chan->sk;
4511 4903
4512 bh_lock_sock(sk); 4904 list_for_each_entry(chan, &conn->chan_l, list) {
4905 l2cap_chan_lock(chan);
4513 4906
4514 BT_DBG("chan->scid %d", chan->scid); 4907 BT_DBG("chan->scid %d", chan->scid);
4515 4908
4516 if (chan->scid == L2CAP_CID_LE_DATA) { 4909 if (chan->scid == L2CAP_CID_LE_DATA) {
4517 if (!status && encrypt) { 4910 if (!status && encrypt) {
4518 chan->sec_level = hcon->sec_level; 4911 chan->sec_level = hcon->sec_level;
4519 l2cap_chan_ready(sk); 4912 l2cap_chan_ready(chan);
4520 } 4913 }
4521 4914
4522 bh_unlock_sock(sk); 4915 l2cap_chan_unlock(chan);
4523 continue; 4916 continue;
4524 } 4917 }
4525 4918
4526 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) { 4919 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4527 bh_unlock_sock(sk); 4920 l2cap_chan_unlock(chan);
4528 continue; 4921 continue;
4529 } 4922 }
4530 4923
4531 if (!status && (chan->state == BT_CONNECTED || 4924 if (!status && (chan->state == BT_CONNECTED ||
4532 chan->state == BT_CONFIG)) { 4925 chan->state == BT_CONFIG)) {
4926 struct sock *sk = chan->sk;
4927
4928 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
4929 sk->sk_state_change(sk);
4930
4533 l2cap_check_encryption(chan, encrypt); 4931 l2cap_check_encryption(chan, encrypt);
4534 bh_unlock_sock(sk); 4932 l2cap_chan_unlock(chan);
4535 continue; 4933 continue;
4536 } 4934 }
4537 4935
4538 if (chan->state == BT_CONNECT) { 4936 if (chan->state == BT_CONNECT) {
4539 if (!status) { 4937 if (!status) {
4540 struct l2cap_conn_req req; 4938 l2cap_send_conn_req(chan);
4541 req.scid = cpu_to_le16(chan->scid);
4542 req.psm = chan->psm;
4543
4544 chan->ident = l2cap_get_ident(conn);
4545 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4546
4547 l2cap_send_cmd(conn, chan->ident,
4548 L2CAP_CONN_REQ, sizeof(req), &req);
4549 } else { 4939 } else {
4550 __clear_chan_timer(chan); 4940 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4551 __set_chan_timer(chan,
4552 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4553 } 4941 }
4554 } else if (chan->state == BT_CONNECT2) { 4942 } else if (chan->state == BT_CONNECT2) {
4943 struct sock *sk = chan->sk;
4555 struct l2cap_conn_rsp rsp; 4944 struct l2cap_conn_rsp rsp;
4556 __u16 res, stat; 4945 __u16 res, stat;
4557 4946
4947 lock_sock(sk);
4948
4558 if (!status) { 4949 if (!status) {
4559 if (bt_sk(sk)->defer_setup) { 4950 if (test_bit(BT_SK_DEFER_SETUP,
4951 &bt_sk(sk)->flags)) {
4560 struct sock *parent = bt_sk(sk)->parent; 4952 struct sock *parent = bt_sk(sk)->parent;
4561 res = L2CAP_CR_PEND; 4953 res = L2CAP_CR_PEND;
4562 stat = L2CAP_CS_AUTHOR_PEND; 4954 stat = L2CAP_CS_AUTHOR_PEND;
4563 if (parent) 4955 if (parent)
4564 parent->sk_data_ready(parent, 0); 4956 parent->sk_data_ready(parent, 0);
4565 } else { 4957 } else {
4566 l2cap_state_change(chan, BT_CONFIG); 4958 __l2cap_state_change(chan, BT_CONFIG);
4567 res = L2CAP_CR_SUCCESS; 4959 res = L2CAP_CR_SUCCESS;
4568 stat = L2CAP_CS_NO_INFO; 4960 stat = L2CAP_CS_NO_INFO;
4569 } 4961 }
4570 } else { 4962 } else {
4571 l2cap_state_change(chan, BT_DISCONN); 4963 __l2cap_state_change(chan, BT_DISCONN);
4572 __set_chan_timer(chan, 4964 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4573 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4574 res = L2CAP_CR_SEC_BLOCK; 4965 res = L2CAP_CR_SEC_BLOCK;
4575 stat = L2CAP_CS_NO_INFO; 4966 stat = L2CAP_CS_NO_INFO;
4576 } 4967 }
4577 4968
4969 release_sock(sk);
4970
4578 rsp.scid = cpu_to_le16(chan->dcid); 4971 rsp.scid = cpu_to_le16(chan->dcid);
4579 rsp.dcid = cpu_to_le16(chan->scid); 4972 rsp.dcid = cpu_to_le16(chan->scid);
4580 rsp.result = cpu_to_le16(res); 4973 rsp.result = cpu_to_le16(res);
@@ -4583,10 +4976,10 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4583 sizeof(rsp), &rsp); 4976 sizeof(rsp), &rsp);
4584 } 4977 }
4585 4978
4586 bh_unlock_sock(sk); 4979 l2cap_chan_unlock(chan);
4587 } 4980 }
4588 4981
4589 rcu_read_unlock(); 4982 mutex_unlock(&conn->chan_lock);
4590 4983
4591 return 0; 4984 return 0;
4592} 4985}
@@ -4605,8 +4998,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4605 4998
4606 if (!(flags & ACL_CONT)) { 4999 if (!(flags & ACL_CONT)) {
4607 struct l2cap_hdr *hdr; 5000 struct l2cap_hdr *hdr;
4608 struct l2cap_chan *chan;
4609 u16 cid;
4610 int len; 5001 int len;
4611 5002
4612 if (conn->rx_len) { 5003 if (conn->rx_len) {
@@ -4626,7 +5017,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4626 5017
4627 hdr = (struct l2cap_hdr *) skb->data; 5018 hdr = (struct l2cap_hdr *) skb->data;
4628 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; 5019 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4629 cid = __le16_to_cpu(hdr->cid);
4630 5020
4631 if (len == skb->len) { 5021 if (len == skb->len) {
4632 /* Complete frame received */ 5022 /* Complete frame received */
@@ -4643,22 +5033,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4643 goto drop; 5033 goto drop;
4644 } 5034 }
4645 5035
4646 chan = l2cap_get_chan_by_scid(conn, cid);
4647
4648 if (chan && chan->sk) {
4649 struct sock *sk = chan->sk;
4650
4651 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4652 BT_ERR("Frame exceeding recv MTU (len %d, "
4653 "MTU %d)", len,
4654 chan->imtu);
4655 release_sock(sk);
4656 l2cap_conn_unreliable(conn, ECOMM);
4657 goto drop;
4658 }
4659 release_sock(sk);
4660 }
4661
4662 /* Allocate skb for the complete frame (with header) */ 5036 /* Allocate skb for the complete frame (with header) */
4663 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); 5037 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4664 if (!conn->rx_skb) 5038 if (!conn->rx_skb)
@@ -4717,7 +5091,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
4717 c->state, __le16_to_cpu(c->psm), 5091 c->state, __le16_to_cpu(c->psm),
4718 c->scid, c->dcid, c->imtu, c->omtu, 5092 c->scid, c->dcid, c->imtu, c->omtu,
4719 c->sec_level, c->mode); 5093 c->sec_level, c->mode);
4720} 5094 }
4721 5095
4722 read_unlock(&chan_list_lock); 5096 read_unlock(&chan_list_lock);
4723 5097
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 401d9428ae4c..3bb1611b9d48 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -82,7 +82,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
82 } 82 }
83 83
84 if (la.l2_cid) 84 if (la.l2_cid)
85 err = l2cap_add_scid(chan, la.l2_cid); 85 err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
86 else 86 else
87 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm); 87 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
88 88
@@ -123,15 +123,18 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
123 if (la.l2_cid && la.l2_psm) 123 if (la.l2_cid && la.l2_psm)
124 return -EINVAL; 124 return -EINVAL;
125 125
126 err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr); 126 err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
127 &la.l2_bdaddr, la.l2_bdaddr_type);
127 if (err) 128 if (err)
128 goto done; 129 return err;
130
131 lock_sock(sk);
129 132
130 err = bt_sock_wait_state(sk, BT_CONNECTED, 133 err = bt_sock_wait_state(sk, BT_CONNECTED,
131 sock_sndtimeo(sk, flags & O_NONBLOCK)); 134 sock_sndtimeo(sk, flags & O_NONBLOCK));
132done: 135
133 if (sock_owned_by_user(sk)) 136 release_sock(sk);
134 release_sock(sk); 137
135 return err; 138 return err;
136} 139}
137 140
@@ -145,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
145 148
146 lock_sock(sk); 149 lock_sock(sk);
147 150
148 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) 151 if (sk->sk_state != BT_BOUND) {
149 || sk->sk_state != BT_BOUND) {
150 err = -EBADFD; 152 err = -EBADFD;
151 goto done; 153 goto done;
152 } 154 }
153 155
156 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
157 err = -EINVAL;
158 goto done;
159 }
160
154 switch (chan->mode) { 161 switch (chan->mode) {
155 case L2CAP_MODE_BASIC: 162 case L2CAP_MODE_BASIC:
156 break; 163 break;
@@ -317,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
317 324
318 case L2CAP_CONNINFO: 325 case L2CAP_CONNINFO:
319 if (sk->sk_state != BT_CONNECTED && 326 if (sk->sk_state != BT_CONNECTED &&
320 !(sk->sk_state == BT_CONNECT2 && 327 !(sk->sk_state == BT_CONNECT2 &&
321 bt_sk(sk)->defer_setup)) { 328 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
322 err = -ENOTCONN; 329 err = -ENOTCONN;
323 break; 330 break;
324 } 331 }
@@ -372,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
372 } 379 }
373 380
374 memset(&sec, 0, sizeof(sec)); 381 memset(&sec, 0, sizeof(sec));
375 sec.level = chan->sec_level; 382 if (chan->conn)
383 sec.level = chan->conn->hcon->sec_level;
384 else
385 sec.level = chan->sec_level;
376 386
377 if (sk->sk_state == BT_CONNECTED) 387 if (sk->sk_state == BT_CONNECTED)
378 sec.key_size = chan->conn->hcon->enc_key_size; 388 sec.key_size = chan->conn->hcon->enc_key_size;
@@ -389,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
389 break; 399 break;
390 } 400 }
391 401
392 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) 402 if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
403 (u32 __user *) optval))
393 err = -EFAULT; 404 err = -EFAULT;
394 405
395 break; 406 break;
@@ -589,10 +600,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
589 sk->sk_state = BT_CONFIG; 600 sk->sk_state = BT_CONFIG;
590 chan->state = BT_CONFIG; 601 chan->state = BT_CONFIG;
591 602
592 /* or for ACL link, under defer_setup time */ 603 /* or for ACL link */
593 } else if (sk->sk_state == BT_CONNECT2 && 604 } else if ((sk->sk_state == BT_CONNECT2 &&
594 bt_sk(sk)->defer_setup) { 605 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
595 err = l2cap_chan_check_security(chan); 606 sk->sk_state == BT_CONNECTED) {
607 if (!l2cap_chan_check_security(chan))
608 set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
609 else
610 sk->sk_state_change(sk);
596 } else { 611 } else {
597 err = -EINVAL; 612 err = -EINVAL;
598 } 613 }
@@ -609,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
609 break; 624 break;
610 } 625 }
611 626
612 bt_sk(sk)->defer_setup = opt; 627 if (opt)
628 set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
629 else
630 clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
613 break; 631 break;
614 632
615 case BT_FLUSHABLE: 633 case BT_FLUSHABLE:
@@ -709,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
709 if (msg->msg_flags & MSG_OOB) 727 if (msg->msg_flags & MSG_OOB)
710 return -EOPNOTSUPP; 728 return -EOPNOTSUPP;
711 729
712 lock_sock(sk); 730 if (sk->sk_state != BT_CONNECTED)
713
714 if (sk->sk_state != BT_CONNECTED) {
715 release_sock(sk);
716 return -ENOTCONN; 731 return -ENOTCONN;
717 }
718 732
733 l2cap_chan_lock(chan);
719 err = l2cap_chan_send(chan, msg, len, sk->sk_priority); 734 err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
735 l2cap_chan_unlock(chan);
720 736
721 release_sock(sk);
722 return err; 737 return err;
723} 738}
724 739
@@ -730,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
730 745
731 lock_sock(sk); 746 lock_sock(sk);
732 747
733 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 748 if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
749 &bt_sk(sk)->flags)) {
734 sk->sk_state = BT_CONFIG; 750 sk->sk_state = BT_CONFIG;
735 pi->chan->state = BT_CONFIG; 751 pi->chan->state = BT_CONFIG;
736 752
@@ -783,7 +799,7 @@ static void l2cap_sock_kill(struct sock *sk)
783 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 799 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
784 return; 800 return;
785 801
786 BT_DBG("sk %p state %d", sk, sk->sk_state); 802 BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
787 803
788 /* Kill poor orphan */ 804 /* Kill poor orphan */
789 805
@@ -795,7 +811,8 @@ static void l2cap_sock_kill(struct sock *sk)
795static int l2cap_sock_shutdown(struct socket *sock, int how) 811static int l2cap_sock_shutdown(struct socket *sock, int how)
796{ 812{
797 struct sock *sk = sock->sk; 813 struct sock *sk = sock->sk;
798 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 814 struct l2cap_chan *chan;
815 struct l2cap_conn *conn;
799 int err = 0; 816 int err = 0;
800 817
801 BT_DBG("sock %p, sk %p", sock, sk); 818 BT_DBG("sock %p, sk %p", sock, sk);
@@ -803,13 +820,24 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
803 if (!sk) 820 if (!sk)
804 return 0; 821 return 0;
805 822
823 chan = l2cap_pi(sk)->chan;
824 conn = chan->conn;
825
826 if (conn)
827 mutex_lock(&conn->chan_lock);
828
829 l2cap_chan_lock(chan);
806 lock_sock(sk); 830 lock_sock(sk);
831
807 if (!sk->sk_shutdown) { 832 if (!sk->sk_shutdown) {
808 if (chan->mode == L2CAP_MODE_ERTM) 833 if (chan->mode == L2CAP_MODE_ERTM)
809 err = __l2cap_wait_ack(sk); 834 err = __l2cap_wait_ack(sk);
810 835
811 sk->sk_shutdown = SHUTDOWN_MASK; 836 sk->sk_shutdown = SHUTDOWN_MASK;
837
838 release_sock(sk);
812 l2cap_chan_close(chan, 0); 839 l2cap_chan_close(chan, 0);
840 lock_sock(sk);
813 841
814 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 842 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
815 err = bt_sock_wait_state(sk, BT_CLOSED, 843 err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -820,6 +848,11 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
820 err = -sk->sk_err; 848 err = -sk->sk_err;
821 849
822 release_sock(sk); 850 release_sock(sk);
851 l2cap_chan_unlock(chan);
852
853 if (conn)
854 mutex_unlock(&conn->chan_lock);
855
823 return err; 856 return err;
824} 857}
825 858
@@ -862,8 +895,12 @@ static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
862 struct sock *sk = data; 895 struct sock *sk = data;
863 struct l2cap_pinfo *pi = l2cap_pi(sk); 896 struct l2cap_pinfo *pi = l2cap_pi(sk);
864 897
865 if (pi->rx_busy_skb) 898 lock_sock(sk);
866 return -ENOMEM; 899
900 if (pi->rx_busy_skb) {
901 err = -ENOMEM;
902 goto done;
903 }
867 904
868 err = sock_queue_rcv_skb(sk, skb); 905 err = sock_queue_rcv_skb(sk, skb);
869 906
@@ -882,6 +919,9 @@ static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
882 err = 0; 919 err = 0;
883 } 920 }
884 921
922done:
923 release_sock(sk);
924
885 return err; 925 return err;
886} 926}
887 927
@@ -899,18 +939,36 @@ static void l2cap_sock_state_change_cb(void *data, int state)
899 sk->sk_state = state; 939 sk->sk_state = state;
900} 940}
901 941
942static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
943 unsigned long len, int nb)
944{
945 struct sk_buff *skb;
946 int err;
947
948 l2cap_chan_unlock(chan);
949 skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
950 l2cap_chan_lock(chan);
951
952 if (!skb)
953 return ERR_PTR(err);
954
955 return skb;
956}
957
902static struct l2cap_ops l2cap_chan_ops = { 958static struct l2cap_ops l2cap_chan_ops = {
903 .name = "L2CAP Socket Interface", 959 .name = "L2CAP Socket Interface",
904 .new_connection = l2cap_sock_new_connection_cb, 960 .new_connection = l2cap_sock_new_connection_cb,
905 .recv = l2cap_sock_recv_cb, 961 .recv = l2cap_sock_recv_cb,
906 .close = l2cap_sock_close_cb, 962 .close = l2cap_sock_close_cb,
907 .state_change = l2cap_sock_state_change_cb, 963 .state_change = l2cap_sock_state_change_cb,
964 .alloc_skb = l2cap_sock_alloc_skb_cb,
908}; 965};
909 966
910static void l2cap_sock_destruct(struct sock *sk) 967static void l2cap_sock_destruct(struct sock *sk)
911{ 968{
912 BT_DBG("sk %p", sk); 969 BT_DBG("sk %p", sk);
913 970
971 l2cap_chan_put(l2cap_pi(sk)->chan);
914 if (l2cap_pi(sk)->rx_busy_skb) { 972 if (l2cap_pi(sk)->rx_busy_skb) {
915 kfree_skb(l2cap_pi(sk)->rx_busy_skb); 973 kfree_skb(l2cap_pi(sk)->rx_busy_skb);
916 l2cap_pi(sk)->rx_busy_skb = NULL; 974 l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -931,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
931 struct l2cap_chan *pchan = l2cap_pi(parent)->chan; 989 struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
932 990
933 sk->sk_type = parent->sk_type; 991 sk->sk_type = parent->sk_type;
934 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; 992 bt_sk(sk)->flags = bt_sk(parent)->flags;
935 993
936 chan->chan_type = pchan->chan_type; 994 chan->chan_type = pchan->chan_type;
937 chan->imtu = pchan->imtu; 995 chan->imtu = pchan->imtu;
@@ -969,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
969 } else { 1027 } else {
970 chan->mode = L2CAP_MODE_BASIC; 1028 chan->mode = L2CAP_MODE_BASIC;
971 } 1029 }
972 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 1030
973 chan->fcs = L2CAP_FCS_CRC16; 1031 l2cap_chan_set_defaults(chan);
974 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
975 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
976 chan->sec_level = BT_SECURITY_LOW;
977 chan->flags = 0;
978 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
979 } 1032 }
980 1033
981 /* Default config options */ 1034 /* Default config options */
@@ -1004,19 +1057,23 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
1004 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 1057 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1005 1058
1006 sk->sk_destruct = l2cap_sock_destruct; 1059 sk->sk_destruct = l2cap_sock_destruct;
1007 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); 1060 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
1008 1061
1009 sock_reset_flag(sk, SOCK_ZAPPED); 1062 sock_reset_flag(sk, SOCK_ZAPPED);
1010 1063
1011 sk->sk_protocol = proto; 1064 sk->sk_protocol = proto;
1012 sk->sk_state = BT_OPEN; 1065 sk->sk_state = BT_OPEN;
1013 1066
1014 chan = l2cap_chan_create(sk); 1067 chan = l2cap_chan_create();
1015 if (!chan) { 1068 if (!chan) {
1016 l2cap_sock_kill(sk); 1069 l2cap_sock_kill(sk);
1017 return NULL; 1070 return NULL;
1018 } 1071 }
1019 1072
1073 l2cap_chan_hold(chan);
1074
1075 chan->sk = sk;
1076
1020 l2cap_pi(sk)->chan = chan; 1077 l2cap_pi(sk)->chan = chan;
1021 1078
1022 return sk; 1079 return sk;
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 86a6bed229df..506628876f36 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -24,6 +24,8 @@
24 24
25/* Bluetooth kernel library. */ 25/* Bluetooth kernel library. */
26 26
27#define pr_fmt(fmt) "Bluetooth: " fmt
28
27#include <linux/module.h> 29#include <linux/module.h>
28 30
29#include <linux/kernel.h> 31#include <linux/kernel.h>
@@ -151,7 +153,26 @@ int bt_to_errno(__u16 code)
151} 153}
152EXPORT_SYMBOL(bt_to_errno); 154EXPORT_SYMBOL(bt_to_errno);
153 155
154int bt_printk(const char *level, const char *format, ...) 156int bt_info(const char *format, ...)
157{
158 struct va_format vaf;
159 va_list args;
160 int r;
161
162 va_start(args, format);
163
164 vaf.fmt = format;
165 vaf.va = &args;
166
167 r = pr_info("%pV", &vaf);
168
169 va_end(args);
170
171 return r;
172}
173EXPORT_SYMBOL(bt_info);
174
175int bt_err(const char *format, ...)
155{ 176{
156 struct va_format vaf; 177 struct va_format vaf;
157 va_list args; 178 va_list args;
@@ -162,10 +183,10 @@ int bt_printk(const char *level, const char *format, ...)
162 vaf.fmt = format; 183 vaf.fmt = format;
163 vaf.va = &args; 184 vaf.va = &args;
164 185
165 r = printk("%sBluetooth: %pV\n", level, &vaf); 186 r = pr_err("%pV", &vaf);
166 187
167 va_end(args); 188 va_end(args);
168 189
169 return r; 190 return r;
170} 191}
171EXPORT_SYMBOL(bt_printk); 192EXPORT_SYMBOL(bt_err);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index bc8e59dda78e..25d220776079 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1,6 +1,8 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3
3 Copyright (C) 2010 Nokia Corporation 4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
4 6
5 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as 8 it under the terms of the GNU General Public License version 2 as
@@ -32,12 +34,92 @@
32#include <net/bluetooth/mgmt.h> 34#include <net/bluetooth/mgmt.h>
33#include <net/bluetooth/smp.h> 35#include <net/bluetooth/smp.h>
34 36
35#define MGMT_VERSION 0 37bool enable_hs;
38
39#define MGMT_VERSION 1
36#define MGMT_REVISION 1 40#define MGMT_REVISION 1
37 41
38#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */ 42static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_READ_INFO,
45 MGMT_OP_SET_POWERED,
46 MGMT_OP_SET_DISCOVERABLE,
47 MGMT_OP_SET_CONNECTABLE,
48 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_PAIRABLE,
50 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_SSP,
52 MGMT_OP_SET_HS,
53 MGMT_OP_SET_LE,
54 MGMT_OP_SET_DEV_CLASS,
55 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_ADD_UUID,
57 MGMT_OP_REMOVE_UUID,
58 MGMT_OP_LOAD_LINK_KEYS,
59 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_DISCONNECT,
61 MGMT_OP_GET_CONNECTIONS,
62 MGMT_OP_PIN_CODE_REPLY,
63 MGMT_OP_PIN_CODE_NEG_REPLY,
64 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_PAIR_DEVICE,
66 MGMT_OP_CANCEL_PAIR_DEVICE,
67 MGMT_OP_UNPAIR_DEVICE,
68 MGMT_OP_USER_CONFIRM_REPLY,
69 MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 MGMT_OP_USER_PASSKEY_REPLY,
71 MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 MGMT_OP_READ_LOCAL_OOB_DATA,
73 MGMT_OP_ADD_REMOTE_OOB_DATA,
74 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 MGMT_OP_START_DISCOVERY,
76 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_CONFIRM_NAME,
78 MGMT_OP_BLOCK_DEVICE,
79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81};
82
83static const u16 mgmt_events[] = {
84 MGMT_EV_CONTROLLER_ERROR,
85 MGMT_EV_INDEX_ADDED,
86 MGMT_EV_INDEX_REMOVED,
87 MGMT_EV_NEW_SETTINGS,
88 MGMT_EV_CLASS_OF_DEV_CHANGED,
89 MGMT_EV_LOCAL_NAME_CHANGED,
90 MGMT_EV_NEW_LINK_KEY,
91 MGMT_EV_NEW_LONG_TERM_KEY,
92 MGMT_EV_DEVICE_CONNECTED,
93 MGMT_EV_DEVICE_DISCONNECTED,
94 MGMT_EV_CONNECT_FAILED,
95 MGMT_EV_PIN_CODE_REQUEST,
96 MGMT_EV_USER_CONFIRM_REQUEST,
97 MGMT_EV_USER_PASSKEY_REQUEST,
98 MGMT_EV_AUTH_FAILED,
99 MGMT_EV_DEVICE_FOUND,
100 MGMT_EV_DISCOVERING,
101 MGMT_EV_DEVICE_BLOCKED,
102 MGMT_EV_DEVICE_UNBLOCKED,
103 MGMT_EV_DEVICE_UNPAIRED,
104};
105
106/*
107 * These LE scan and inquiry parameters were chosen according to LE General
108 * Discovery Procedure specification.
109 */
110#define LE_SCAN_TYPE 0x01
111#define LE_SCAN_WIN 0x12
112#define LE_SCAN_INT 0x12
113#define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
114#define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115
116#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
117#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118
119#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
39 120
40#define SERVICE_CACHE_TIMEOUT (5 * 1000) 121#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
122 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
41 123
42struct pending_cmd { 124struct pending_cmd {
43 struct list_head list; 125 struct list_head list;
@@ -142,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
142 224
143 ev = (void *) skb_put(skb, sizeof(*ev)); 225 ev = (void *) skb_put(skb, sizeof(*ev));
144 ev->status = status; 226 ev->status = status;
145 put_unaligned_le16(cmd, &ev->opcode); 227 ev->opcode = cpu_to_le16(cmd);
146 228
147 err = sock_queue_rcv_skb(sk, skb); 229 err = sock_queue_rcv_skb(sk, skb);
148 if (err < 0) 230 if (err < 0)
@@ -151,8 +233,8 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
151 return err; 233 return err;
152} 234}
153 235
154static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, 236static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
155 size_t rp_len) 237 void *rp, size_t rp_len)
156{ 238{
157 struct sk_buff *skb; 239 struct sk_buff *skb;
158 struct mgmt_hdr *hdr; 240 struct mgmt_hdr *hdr;
@@ -172,7 +254,8 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
172 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); 254 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
173 255
174 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); 256 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
175 put_unaligned_le16(cmd, &ev->opcode); 257 ev->opcode = cpu_to_le16(cmd);
258 ev->status = status;
176 259
177 if (rp) 260 if (rp)
178 memcpy(ev->data, rp, rp_len); 261 memcpy(ev->data, rp, rp_len);
@@ -181,23 +264,59 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
181 if (err < 0) 264 if (err < 0)
182 kfree_skb(skb); 265 kfree_skb(skb);
183 266
184 return err;; 267 return err;
185} 268}
186 269
187static int read_version(struct sock *sk) 270static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
271 u16 data_len)
188{ 272{
189 struct mgmt_rp_read_version rp; 273 struct mgmt_rp_read_version rp;
190 274
191 BT_DBG("sock %p", sk); 275 BT_DBG("sock %p", sk);
192 276
193 rp.version = MGMT_VERSION; 277 rp.version = MGMT_VERSION;
194 put_unaligned_le16(MGMT_REVISION, &rp.revision); 278 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
279
280 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
281 sizeof(rp));
282}
283
284static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
285 u16 data_len)
286{
287 struct mgmt_rp_read_commands *rp;
288 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
289 const u16 num_events = ARRAY_SIZE(mgmt_events);
290 __le16 *opcode;
291 size_t rp_size;
292 int i, err;
293
294 BT_DBG("sock %p", sk);
295
296 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
297
298 rp = kmalloc(rp_size, GFP_KERNEL);
299 if (!rp)
300 return -ENOMEM;
195 301
196 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp, 302 rp->num_commands = __constant_cpu_to_le16(num_commands);
197 sizeof(rp)); 303 rp->num_events = __constant_cpu_to_le16(num_events);
304
305 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
306 put_unaligned_le16(mgmt_commands[i], opcode);
307
308 for (i = 0; i < num_events; i++, opcode++)
309 put_unaligned_le16(mgmt_events[i], opcode);
310
311 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
312 rp_size);
313 kfree(rp);
314
315 return err;
198} 316}
199 317
200static int read_index_list(struct sock *sk) 318static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
319 u16 data_len)
201{ 320{
202 struct mgmt_rp_read_index_list *rp; 321 struct mgmt_rp_read_index_list *rp;
203 struct list_head *p; 322 struct list_head *p;
@@ -222,24 +341,21 @@ static int read_index_list(struct sock *sk)
222 return -ENOMEM; 341 return -ENOMEM;
223 } 342 }
224 343
225 put_unaligned_le16(count, &rp->num_controllers); 344 rp->num_controllers = cpu_to_le16(count);
226 345
227 i = 0; 346 i = 0;
228 list_for_each_entry(d, &hci_dev_list, list) { 347 list_for_each_entry(d, &hci_dev_list, list) {
229 if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags)) 348 if (test_bit(HCI_SETUP, &d->dev_flags))
230 cancel_delayed_work(&d->power_off);
231
232 if (test_bit(HCI_SETUP, &d->flags))
233 continue; 349 continue;
234 350
235 put_unaligned_le16(d->id, &rp->index[i++]); 351 rp->index[i++] = cpu_to_le16(d->id);
236 BT_DBG("Added hci%u", d->id); 352 BT_DBG("Added hci%u", d->id);
237 } 353 }
238 354
239 read_unlock(&hci_dev_list_lock); 355 read_unlock(&hci_dev_list_lock);
240 356
241 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp, 357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
242 rp_len); 358 rp_len);
243 359
244 kfree(rp); 360 kfree(rp);
245 361
@@ -264,6 +380,9 @@ static u32 get_supported_settings(struct hci_dev *hdev)
264 settings |= MGMT_SETTING_LINK_SECURITY; 380 settings |= MGMT_SETTING_LINK_SECURITY;
265 } 381 }
266 382
383 if (enable_hs)
384 settings |= MGMT_SETTING_HS;
385
267 if (hdev->features[4] & LMP_LE) 386 if (hdev->features[4] & LMP_LE)
268 settings |= MGMT_SETTING_LE; 387 settings |= MGMT_SETTING_LE;
269 388
@@ -274,47 +393,36 @@ static u32 get_current_settings(struct hci_dev *hdev)
274{ 393{
275 u32 settings = 0; 394 u32 settings = 0;
276 395
277 if (test_bit(HCI_UP, &hdev->flags)) 396 if (hdev_is_powered(hdev))
278 settings |= MGMT_SETTING_POWERED; 397 settings |= MGMT_SETTING_POWERED;
279 else
280 return settings;
281 398
282 if (test_bit(HCI_PSCAN, &hdev->flags)) 399 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
283 settings |= MGMT_SETTING_CONNECTABLE; 400 settings |= MGMT_SETTING_CONNECTABLE;
284 401
285 if (test_bit(HCI_ISCAN, &hdev->flags)) 402 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
286 settings |= MGMT_SETTING_DISCOVERABLE; 403 settings |= MGMT_SETTING_DISCOVERABLE;
287 404
288 if (test_bit(HCI_PAIRABLE, &hdev->flags)) 405 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
289 settings |= MGMT_SETTING_PAIRABLE; 406 settings |= MGMT_SETTING_PAIRABLE;
290 407
291 if (!(hdev->features[4] & LMP_NO_BREDR)) 408 if (!(hdev->features[4] & LMP_NO_BREDR))
292 settings |= MGMT_SETTING_BREDR; 409 settings |= MGMT_SETTING_BREDR;
293 410
294 if (hdev->host_features[0] & LMP_HOST_LE) 411 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
295 settings |= MGMT_SETTING_LE; 412 settings |= MGMT_SETTING_LE;
296 413
297 if (test_bit(HCI_AUTH, &hdev->flags)) 414 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
298 settings |= MGMT_SETTING_LINK_SECURITY; 415 settings |= MGMT_SETTING_LINK_SECURITY;
299 416
300 if (hdev->ssp_mode > 0) 417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
301 settings |= MGMT_SETTING_SSP; 418 settings |= MGMT_SETTING_SSP;
302 419
420 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_HS;
422
303 return settings; 423 return settings;
304} 424}
305 425
306#define EIR_FLAGS 0x01 /* flags */
307#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
308#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
309#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
310#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
311#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
312#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
313#define EIR_NAME_SHORT 0x08 /* shortened local name */
314#define EIR_NAME_COMPLETE 0x09 /* complete local name */
315#define EIR_TX_POWER 0x0A /* transmit power level */
316#define EIR_DEVICE_ID 0x10 /* device ID */
317
318#define PNP_INFO_SVCLASS_ID 0x1200 426#define PNP_INFO_SVCLASS_ID 0x1200
319 427
320static u8 bluetooth_base_uuid[] = { 428static u8 bluetooth_base_uuid[] = {
@@ -332,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128)
332 return 0; 440 return 0;
333 } 441 }
334 442
335 memcpy(&val, &uuid128[12], 4); 443 val = get_unaligned_le32(&uuid128[12]);
336
337 val = le32_to_cpu(val);
338 if (val > 0xffff) 444 if (val > 0xffff)
339 return 0; 445 return 0;
340 446
@@ -369,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
369 ptr += (name_len + 2); 475 ptr += (name_len + 2);
370 } 476 }
371 477
478 if (hdev->inq_tx_power) {
479 ptr[0] = 2;
480 ptr[1] = EIR_TX_POWER;
481 ptr[2] = (u8) hdev->inq_tx_power;
482
483 eir_len += 3;
484 ptr += 3;
485 }
486
487 if (hdev->devid_source > 0) {
488 ptr[0] = 9;
489 ptr[1] = EIR_DEVICE_ID;
490
491 put_unaligned_le16(hdev->devid_source, ptr + 2);
492 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
493 put_unaligned_le16(hdev->devid_product, ptr + 6);
494 put_unaligned_le16(hdev->devid_version, ptr + 8);
495
496 eir_len += 10;
497 ptr += 10;
498 }
499
372 memset(uuid16_list, 0, sizeof(uuid16_list)); 500 memset(uuid16_list, 0, sizeof(uuid16_list));
373 501
374 /* Group all UUID16 types */ 502 /* Group all UUID16 types */
@@ -425,13 +553,16 @@ static int update_eir(struct hci_dev *hdev)
425{ 553{
426 struct hci_cp_write_eir cp; 554 struct hci_cp_write_eir cp;
427 555
556 if (!hdev_is_powered(hdev))
557 return 0;
558
428 if (!(hdev->features[6] & LMP_EXT_INQ)) 559 if (!(hdev->features[6] & LMP_EXT_INQ))
429 return 0; 560 return 0;
430 561
431 if (hdev->ssp_mode == 0) 562 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 return 0; 563 return 0;
433 564
434 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) 565 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
435 return 0; 566 return 0;
436 567
437 memset(&cp, 0, sizeof(cp)); 568 memset(&cp, 0, sizeof(cp));
@@ -460,10 +591,14 @@ static u8 get_service_classes(struct hci_dev *hdev)
460static int update_class(struct hci_dev *hdev) 591static int update_class(struct hci_dev *hdev)
461{ 592{
462 u8 cod[3]; 593 u8 cod[3];
594 int err;
463 595
464 BT_DBG("%s", hdev->name); 596 BT_DBG("%s", hdev->name);
465 597
466 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) 598 if (!hdev_is_powered(hdev))
599 return 0;
600
601 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
467 return 0; 602 return 0;
468 603
469 cod[0] = hdev->minor_class; 604 cod[0] = hdev->minor_class;
@@ -473,15 +608,19 @@ static int update_class(struct hci_dev *hdev)
473 if (memcmp(cod, hdev->dev_class, 3) == 0) 608 if (memcmp(cod, hdev->dev_class, 3) == 0)
474 return 0; 609 return 0;
475 610
476 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); 611 err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
612 if (err == 0)
613 set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
614
615 return err;
477} 616}
478 617
479static void service_cache_off(struct work_struct *work) 618static void service_cache_off(struct work_struct *work)
480{ 619{
481 struct hci_dev *hdev = container_of(work, struct hci_dev, 620 struct hci_dev *hdev = container_of(work, struct hci_dev,
482 service_cache.work); 621 service_cache.work);
483 622
484 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) 623 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
485 return; 624 return;
486 625
487 hci_dev_lock(hdev); 626 hci_dev_lock(hdev);
@@ -492,43 +631,36 @@ static void service_cache_off(struct work_struct *work)
492 hci_dev_unlock(hdev); 631 hci_dev_unlock(hdev);
493} 632}
494 633
495static void mgmt_init_hdev(struct hci_dev *hdev) 634static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
496{ 635{
497 if (!test_and_set_bit(HCI_MGMT, &hdev->flags)) 636 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
498 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); 637 return;
499 638
500 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags)) 639 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
501 schedule_delayed_work(&hdev->service_cache, 640
502 msecs_to_jiffies(SERVICE_CACHE_TIMEOUT)); 641 /* Non-mgmt controlled devices get this bit set
642 * implicitly so that pairing works for them, however
643 * for mgmt we require user-space to explicitly enable
644 * it
645 */
646 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
503} 647}
504 648
505static int read_controller_info(struct sock *sk, u16 index) 649static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
650 void *data, u16 data_len)
506{ 651{
507 struct mgmt_rp_read_info rp; 652 struct mgmt_rp_read_info rp;
508 struct hci_dev *hdev;
509
510 BT_DBG("sock %p hci%u", sk, index);
511 653
512 hdev = hci_dev_get(index); 654 BT_DBG("sock %p %s", sk, hdev->name);
513 if (!hdev)
514 return cmd_status(sk, index, MGMT_OP_READ_INFO,
515 MGMT_STATUS_INVALID_PARAMS);
516
517 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
518 cancel_delayed_work_sync(&hdev->power_off);
519 655
520 hci_dev_lock(hdev); 656 hci_dev_lock(hdev);
521 657
522 if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags))
523 mgmt_init_hdev(hdev);
524
525 memset(&rp, 0, sizeof(rp)); 658 memset(&rp, 0, sizeof(rp));
526 659
527 bacpy(&rp.bdaddr, &hdev->bdaddr); 660 bacpy(&rp.bdaddr, &hdev->bdaddr);
528 661
529 rp.version = hdev->hci_ver; 662 rp.version = hdev->hci_ver;
530 663 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
531 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
532 664
533 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); 665 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
534 rp.current_settings = cpu_to_le32(get_current_settings(hdev)); 666 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
@@ -536,11 +668,12 @@ static int read_controller_info(struct sock *sk, u16 index)
536 memcpy(rp.dev_class, hdev->dev_class, 3); 668 memcpy(rp.dev_class, hdev->dev_class, 3);
537 669
538 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); 670 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
671 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
539 672
540 hci_dev_unlock(hdev); 673 hci_dev_unlock(hdev);
541 hci_dev_put(hdev);
542 674
543 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 675 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
676 sizeof(rp));
544} 677}
545 678
546static void mgmt_pending_free(struct pending_cmd *cmd) 679static void mgmt_pending_free(struct pending_cmd *cmd)
@@ -551,8 +684,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd)
551} 684}
552 685
553static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 686static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
554 struct hci_dev *hdev, 687 struct hci_dev *hdev, void *data,
555 void *data, u16 len) 688 u16 len)
556{ 689{
557 struct pending_cmd *cmd; 690 struct pending_cmd *cmd;
558 691
@@ -581,8 +714,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
581} 714}
582 715
583static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 716static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
584 void (*cb)(struct pending_cmd *cmd, void *data), 717 void (*cb)(struct pending_cmd *cmd, void *data),
585 void *data) 718 void *data)
586{ 719{
587 struct list_head *p, *n; 720 struct list_head *p, *n;
588 721
@@ -620,40 +753,39 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
620{ 753{
621 __le32 settings = cpu_to_le32(get_current_settings(hdev)); 754 __le32 settings = cpu_to_le32(get_current_settings(hdev));
622 755
623 return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings)); 756 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
757 sizeof(settings));
624} 758}
625 759
626static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) 760static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
761 u16 len)
627{ 762{
628 struct mgmt_mode *cp; 763 struct mgmt_mode *cp = data;
629 struct hci_dev *hdev;
630 struct pending_cmd *cmd; 764 struct pending_cmd *cmd;
631 int err, up; 765 int err;
632
633 cp = (void *) data;
634 766
635 BT_DBG("request for hci%u", index); 767 BT_DBG("request for %s", hdev->name);
636 768
637 if (len != sizeof(*cp)) 769 hci_dev_lock(hdev);
638 return cmd_status(sk, index, MGMT_OP_SET_POWERED,
639 MGMT_STATUS_INVALID_PARAMS);
640 770
641 hdev = hci_dev_get(index); 771 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
642 if (!hdev) 772 cancel_delayed_work(&hdev->power_off);
643 return cmd_status(sk, index, MGMT_OP_SET_POWERED,
644 MGMT_STATUS_INVALID_PARAMS);
645 773
646 hci_dev_lock(hdev); 774 if (cp->val) {
775 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
776 mgmt_powered(hdev, 1);
777 goto failed;
778 }
779 }
647 780
648 up = test_bit(HCI_UP, &hdev->flags); 781 if (!!cp->val == hdev_is_powered(hdev)) {
649 if ((cp->val && up) || (!cp->val && !up)) {
650 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); 782 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
651 goto failed; 783 goto failed;
652 } 784 }
653 785
654 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { 786 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
655 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, 787 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
656 MGMT_STATUS_BUSY); 788 MGMT_STATUS_BUSY);
657 goto failed; 789 goto failed;
658 } 790 }
659 791
@@ -672,49 +804,115 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
672 804
673failed: 805failed:
674 hci_dev_unlock(hdev); 806 hci_dev_unlock(hdev);
675 hci_dev_put(hdev);
676 return err; 807 return err;
677} 808}
678 809
679static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, 810static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
680 u16 len) 811 struct sock *skip_sk)
681{ 812{
682 struct mgmt_cp_set_discoverable *cp; 813 struct sk_buff *skb;
683 struct hci_dev *hdev; 814 struct mgmt_hdr *hdr;
815
816 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
817 if (!skb)
818 return -ENOMEM;
819
820 hdr = (void *) skb_put(skb, sizeof(*hdr));
821 hdr->opcode = cpu_to_le16(event);
822 if (hdev)
823 hdr->index = cpu_to_le16(hdev->id);
824 else
825 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
826 hdr->len = cpu_to_le16(data_len);
827
828 if (data)
829 memcpy(skb_put(skb, data_len), data, data_len);
830
831 /* Time stamp */
832 __net_timestamp(skb);
833
834 hci_send_to_control(skb, skip_sk);
835 kfree_skb(skb);
836
837 return 0;
838}
839
840static int new_settings(struct hci_dev *hdev, struct sock *skip)
841{
842 __le32 ev;
843
844 ev = cpu_to_le32(get_current_settings(hdev));
845
846 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
847}
848
849static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
850 u16 len)
851{
852 struct mgmt_cp_set_discoverable *cp = data;
684 struct pending_cmd *cmd; 853 struct pending_cmd *cmd;
854 u16 timeout;
685 u8 scan; 855 u8 scan;
686 int err; 856 int err;
687 857
688 cp = (void *) data; 858 BT_DBG("request for %s", hdev->name);
689 859
690 BT_DBG("request for hci%u", index); 860 timeout = __le16_to_cpu(cp->timeout);
691 861 if (!cp->val && timeout > 0)
692 if (len != sizeof(*cp)) 862 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
693 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, 863 MGMT_STATUS_INVALID_PARAMS);
694 MGMT_STATUS_INVALID_PARAMS);
695
696 hdev = hci_dev_get(index);
697 if (!hdev)
698 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
699 MGMT_STATUS_INVALID_PARAMS);
700 864
701 hci_dev_lock(hdev); 865 hci_dev_lock(hdev);
702 866
703 if (!test_bit(HCI_UP, &hdev->flags)) { 867 if (!hdev_is_powered(hdev) && timeout > 0) {
704 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, 868 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
705 MGMT_STATUS_NOT_POWERED); 869 MGMT_STATUS_NOT_POWERED);
706 goto failed; 870 goto failed;
707 } 871 }
708 872
709 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 873 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
710 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 874 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
711 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, 875 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
712 MGMT_STATUS_BUSY); 876 MGMT_STATUS_BUSY);
877 goto failed;
878 }
879
880 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
881 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
882 MGMT_STATUS_REJECTED);
883 goto failed;
884 }
885
886 if (!hdev_is_powered(hdev)) {
887 bool changed = false;
888
889 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
890 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
891 changed = true;
892 }
893
894 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
895 if (err < 0)
896 goto failed;
897
898 if (changed)
899 err = new_settings(hdev, sk);
900
713 goto failed; 901 goto failed;
714 } 902 }
715 903
716 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && 904 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
717 test_bit(HCI_PSCAN, &hdev->flags)) { 905 if (hdev->discov_timeout > 0) {
906 cancel_delayed_work(&hdev->discov_off);
907 hdev->discov_timeout = 0;
908 }
909
910 if (cp->val && timeout > 0) {
911 hdev->discov_timeout = timeout;
912 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
913 msecs_to_jiffies(hdev->discov_timeout * 1000));
914 }
915
718 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); 916 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
719 goto failed; 917 goto failed;
720 } 918 }
@@ -737,53 +935,56 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
737 mgmt_pending_remove(cmd); 935 mgmt_pending_remove(cmd);
738 936
739 if (cp->val) 937 if (cp->val)
740 hdev->discov_timeout = get_unaligned_le16(&cp->timeout); 938 hdev->discov_timeout = timeout;
741 939
742failed: 940failed:
743 hci_dev_unlock(hdev); 941 hci_dev_unlock(hdev);
744 hci_dev_put(hdev);
745
746 return err; 942 return err;
747} 943}
748 944
749static int set_connectable(struct sock *sk, u16 index, unsigned char *data, 945static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
750 u16 len) 946 u16 len)
751{ 947{
752 struct mgmt_mode *cp; 948 struct mgmt_mode *cp = data;
753 struct hci_dev *hdev;
754 struct pending_cmd *cmd; 949 struct pending_cmd *cmd;
755 u8 scan; 950 u8 scan;
756 int err; 951 int err;
757 952
758 cp = (void *) data; 953 BT_DBG("request for %s", hdev->name);
759 954
760 BT_DBG("request for hci%u", index); 955 hci_dev_lock(hdev);
761 956
762 if (len != sizeof(*cp)) 957 if (!hdev_is_powered(hdev)) {
763 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, 958 bool changed = false;
764 MGMT_STATUS_INVALID_PARAMS);
765 959
766 hdev = hci_dev_get(index); 960 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
767 if (!hdev) 961 changed = true;
768 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
769 MGMT_STATUS_INVALID_PARAMS);
770 962
771 hci_dev_lock(hdev); 963 if (cp->val) {
964 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
965 } else {
966 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
967 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
968 }
969
970 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
971 if (err < 0)
972 goto failed;
973
974 if (changed)
975 err = new_settings(hdev, sk);
772 976
773 if (!test_bit(HCI_UP, &hdev->flags)) {
774 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
775 MGMT_STATUS_NOT_POWERED);
776 goto failed; 977 goto failed;
777 } 978 }
778 979
779 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 980 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
780 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 981 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
781 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, 982 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
782 MGMT_STATUS_BUSY); 983 MGMT_STATUS_BUSY);
783 goto failed; 984 goto failed;
784 } 985 }
785 986
786 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { 987 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
787 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); 988 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
788 goto failed; 989 goto failed;
789 } 990 }
@@ -794,116 +995,280 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
794 goto failed; 995 goto failed;
795 } 996 }
796 997
797 if (cp->val) 998 if (cp->val) {
798 scan = SCAN_PAGE; 999 scan = SCAN_PAGE;
799 else 1000 } else {
800 scan = 0; 1001 scan = 0;
801 1002
1003 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1004 hdev->discov_timeout > 0)
1005 cancel_delayed_work(&hdev->discov_off);
1006 }
1007
802 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1008 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
803 if (err < 0) 1009 if (err < 0)
804 mgmt_pending_remove(cmd); 1010 mgmt_pending_remove(cmd);
805 1011
806failed: 1012failed:
807 hci_dev_unlock(hdev); 1013 hci_dev_unlock(hdev);
808 hci_dev_put(hdev);
809
810 return err; 1014 return err;
811} 1015}
812 1016
813static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, 1017static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
814 u16 data_len, struct sock *skip_sk) 1018 u16 len)
815{ 1019{
816 struct sk_buff *skb; 1020 struct mgmt_mode *cp = data;
817 struct mgmt_hdr *hdr; 1021 int err;
818 1022
819 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC); 1023 BT_DBG("request for %s", hdev->name);
820 if (!skb)
821 return -ENOMEM;
822 1024
823 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL; 1025 hci_dev_lock(hdev);
824 1026
825 hdr = (void *) skb_put(skb, sizeof(*hdr)); 1027 if (cp->val)
826 hdr->opcode = cpu_to_le16(event); 1028 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
827 if (hdev)
828 hdr->index = cpu_to_le16(hdev->id);
829 else 1029 else
830 hdr->index = cpu_to_le16(MGMT_INDEX_NONE); 1030 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
831 hdr->len = cpu_to_le16(data_len);
832 1031
833 if (data) 1032 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
834 memcpy(skb_put(skb, data_len), data, data_len); 1033 if (err < 0)
1034 goto failed;
835 1035
836 hci_send_to_sock(NULL, skb, skip_sk); 1036 err = new_settings(hdev, sk);
837 kfree_skb(skb);
838 1037
839 return 0; 1038failed:
1039 hci_dev_unlock(hdev);
1040 return err;
840} 1041}
841 1042
842static int set_pairable(struct sock *sk, u16 index, unsigned char *data, 1043static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
843 u16 len) 1044 u16 len)
844{ 1045{
845 struct mgmt_mode *cp; 1046 struct mgmt_mode *cp = data;
846 struct hci_dev *hdev; 1047 struct pending_cmd *cmd;
847 __le32 ev; 1048 u8 val;
848 int err; 1049 int err;
849 1050
850 cp = (void *) data; 1051 BT_DBG("request for %s", hdev->name);
851 1052
852 BT_DBG("request for hci%u", index); 1053 hci_dev_lock(hdev);
853 1054
854 if (len != sizeof(*cp)) 1055 if (!hdev_is_powered(hdev)) {
855 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, 1056 bool changed = false;
856 MGMT_STATUS_INVALID_PARAMS);
857 1057
858 hdev = hci_dev_get(index); 1058 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
859 if (!hdev) 1059 &hdev->dev_flags)) {
860 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, 1060 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
861 MGMT_STATUS_INVALID_PARAMS); 1061 changed = true;
1062 }
862 1063
863 hci_dev_lock(hdev); 1064 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1065 if (err < 0)
1066 goto failed;
864 1067
865 if (cp->val) 1068 if (changed)
866 set_bit(HCI_PAIRABLE, &hdev->flags); 1069 err = new_settings(hdev, sk);
867 else
868 clear_bit(HCI_PAIRABLE, &hdev->flags);
869 1070
870 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
871 if (err < 0)
872 goto failed; 1071 goto failed;
1072 }
873 1073
874 ev = cpu_to_le32(get_current_settings(hdev)); 1074 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1075 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1076 MGMT_STATUS_BUSY);
1077 goto failed;
1078 }
1079
1080 val = !!cp->val;
1081
1082 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1083 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1084 goto failed;
1085 }
1086
1087 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1088 if (!cmd) {
1089 err = -ENOMEM;
1090 goto failed;
1091 }
875 1092
876 err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk); 1093 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1094 if (err < 0) {
1095 mgmt_pending_remove(cmd);
1096 goto failed;
1097 }
877 1098
878failed: 1099failed:
879 hci_dev_unlock(hdev); 1100 hci_dev_unlock(hdev);
880 hci_dev_put(hdev); 1101 return err;
1102}
1103
1104static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1105{
1106 struct mgmt_mode *cp = data;
1107 struct pending_cmd *cmd;
1108 u8 val;
1109 int err;
1110
1111 BT_DBG("request for %s", hdev->name);
1112
1113 hci_dev_lock(hdev);
1114
1115 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1116 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1117 MGMT_STATUS_NOT_SUPPORTED);
1118 goto failed;
1119 }
1120
1121 val = !!cp->val;
1122
1123 if (!hdev_is_powered(hdev)) {
1124 bool changed = false;
1125
1126 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1127 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1128 changed = true;
1129 }
1130
1131 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1132 if (err < 0)
1133 goto failed;
1134
1135 if (changed)
1136 err = new_settings(hdev, sk);
1137
1138 goto failed;
1139 }
1140
1141 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1142 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1143 MGMT_STATUS_BUSY);
1144 goto failed;
1145 }
1146
1147 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1148 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1149 goto failed;
1150 }
1151
1152 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1153 if (!cmd) {
1154 err = -ENOMEM;
1155 goto failed;
1156 }
1157
1158 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1159 if (err < 0) {
1160 mgmt_pending_remove(cmd);
1161 goto failed;
1162 }
881 1163
1164failed:
1165 hci_dev_unlock(hdev);
882 return err; 1166 return err;
883} 1167}
884 1168
885static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) 1169static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
886{ 1170{
887 struct mgmt_cp_add_uuid *cp; 1171 struct mgmt_mode *cp = data;
888 struct hci_dev *hdev; 1172
889 struct bt_uuid *uuid; 1173 BT_DBG("request for %s", hdev->name);
1174
1175 if (!enable_hs)
1176 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1177 MGMT_STATUS_NOT_SUPPORTED);
1178
1179 if (cp->val)
1180 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1181 else
1182 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1183
1184 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1185}
1186
1187static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1188{
1189 struct mgmt_mode *cp = data;
1190 struct hci_cp_write_le_host_supported hci_cp;
1191 struct pending_cmd *cmd;
890 int err; 1192 int err;
1193 u8 val, enabled;
891 1194
892 cp = (void *) data; 1195 BT_DBG("request for %s", hdev->name);
893 1196
894 BT_DBG("request for hci%u", index); 1197 hci_dev_lock(hdev);
895 1198
896 if (len != sizeof(*cp)) 1199 if (!(hdev->features[4] & LMP_LE)) {
897 return cmd_status(sk, index, MGMT_OP_ADD_UUID, 1200 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
898 MGMT_STATUS_INVALID_PARAMS); 1201 MGMT_STATUS_NOT_SUPPORTED);
1202 goto unlock;
1203 }
1204
1205 val = !!cp->val;
1206 enabled = !!(hdev->host_features[0] & LMP_HOST_LE);
1207
1208 if (!hdev_is_powered(hdev) || val == enabled) {
1209 bool changed = false;
1210
1211 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1212 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1213 changed = true;
1214 }
1215
1216 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1217 if (err < 0)
1218 goto unlock;
1219
1220 if (changed)
1221 err = new_settings(hdev, sk);
899 1222
900 hdev = hci_dev_get(index); 1223 goto unlock;
901 if (!hdev) 1224 }
902 return cmd_status(sk, index, MGMT_OP_ADD_UUID, 1225
903 MGMT_STATUS_INVALID_PARAMS); 1226 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1227 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1228 MGMT_STATUS_BUSY);
1229 goto unlock;
1230 }
1231
1232 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1233 if (!cmd) {
1234 err = -ENOMEM;
1235 goto unlock;
1236 }
1237
1238 memset(&hci_cp, 0, sizeof(hci_cp));
1239
1240 if (val) {
1241 hci_cp.le = val;
1242 hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
1243 }
1244
1245 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1246 &hci_cp);
1247 if (err < 0)
1248 mgmt_pending_remove(cmd);
1249
1250unlock:
1251 hci_dev_unlock(hdev);
1252 return err;
1253}
1254
1255static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1256{
1257 struct mgmt_cp_add_uuid *cp = data;
1258 struct pending_cmd *cmd;
1259 struct bt_uuid *uuid;
1260 int err;
1261
1262 BT_DBG("request for %s", hdev->name);
904 1263
905 hci_dev_lock(hdev); 1264 hci_dev_lock(hdev);
906 1265
1266 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1267 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1268 MGMT_STATUS_BUSY);
1269 goto failed;
1270 }
1271
907 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 1272 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
908 if (!uuid) { 1273 if (!uuid) {
909 err = -ENOMEM; 1274 err = -ENOMEM;
@@ -923,41 +1288,63 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
923 if (err < 0) 1288 if (err < 0)
924 goto failed; 1289 goto failed;
925 1290
926 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 1291 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1292 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1293 hdev->dev_class, 3);
1294 goto failed;
1295 }
1296
1297 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1298 if (!cmd)
1299 err = -ENOMEM;
927 1300
928failed: 1301failed:
929 hci_dev_unlock(hdev); 1302 hci_dev_unlock(hdev);
930 hci_dev_put(hdev);
931
932 return err; 1303 return err;
933} 1304}
934 1305
935static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) 1306static bool enable_service_cache(struct hci_dev *hdev)
936{ 1307{
937 struct list_head *p, *n; 1308 if (!hdev_is_powered(hdev))
938 struct mgmt_cp_remove_uuid *cp; 1309 return false;
939 struct hci_dev *hdev;
940 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
941 int err, found;
942 1310
943 cp = (void *) data; 1311 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1312 schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT);
1313 return true;
1314 }
944 1315
945 BT_DBG("request for hci%u", index); 1316 return false;
1317}
946 1318
947 if (len != sizeof(*cp)) 1319static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
948 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, 1320 u16 len)
949 MGMT_STATUS_INVALID_PARAMS); 1321{
1322 struct mgmt_cp_remove_uuid *cp = data;
1323 struct pending_cmd *cmd;
1324 struct list_head *p, *n;
1325 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1326 int err, found;
950 1327
951 hdev = hci_dev_get(index); 1328 BT_DBG("request for %s", hdev->name);
952 if (!hdev)
953 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
954 MGMT_STATUS_INVALID_PARAMS);
955 1329
956 hci_dev_lock(hdev); 1330 hci_dev_lock(hdev);
957 1331
1332 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1333 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1334 MGMT_STATUS_BUSY);
1335 goto unlock;
1336 }
1337
958 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 1338 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
959 err = hci_uuids_clear(hdev); 1339 err = hci_uuids_clear(hdev);
960 goto unlock; 1340
1341 if (enable_service_cache(hdev)) {
1342 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1343 0, hdev->dev_class, 3);
1344 goto unlock;
1345 }
1346
1347 goto update_class;
961 } 1348 }
962 1349
963 found = 0; 1350 found = 0;
@@ -973,11 +1360,12 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
973 } 1360 }
974 1361
975 if (found == 0) { 1362 if (found == 0) {
976 err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, 1363 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
977 MGMT_STATUS_INVALID_PARAMS); 1364 MGMT_STATUS_INVALID_PARAMS);
978 goto unlock; 1365 goto unlock;
979 } 1366 }
980 1367
1368update_class:
981 err = update_class(hdev); 1369 err = update_class(hdev);
982 if (err < 0) 1370 if (err < 0)
983 goto unlock; 1371 goto unlock;
@@ -986,41 +1374,48 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
986 if (err < 0) 1374 if (err < 0)
987 goto unlock; 1375 goto unlock;
988 1376
989 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 1377 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1378 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1379 hdev->dev_class, 3);
1380 goto unlock;
1381 }
1382
1383 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1384 if (!cmd)
1385 err = -ENOMEM;
990 1386
991unlock: 1387unlock:
992 hci_dev_unlock(hdev); 1388 hci_dev_unlock(hdev);
993 hci_dev_put(hdev);
994
995 return err; 1389 return err;
996} 1390}
997 1391
998static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, 1392static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
999 u16 len) 1393 u16 len)
1000{ 1394{
1001 struct hci_dev *hdev; 1395 struct mgmt_cp_set_dev_class *cp = data;
1002 struct mgmt_cp_set_dev_class *cp; 1396 struct pending_cmd *cmd;
1003 int err; 1397 int err;
1004 1398
1005 cp = (void *) data; 1399 BT_DBG("request for %s", hdev->name);
1006
1007 BT_DBG("request for hci%u", index);
1008
1009 if (len != sizeof(*cp))
1010 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
1011 MGMT_STATUS_INVALID_PARAMS);
1012
1013 hdev = hci_dev_get(index);
1014 if (!hdev)
1015 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
1016 MGMT_STATUS_INVALID_PARAMS);
1017 1400
1018 hci_dev_lock(hdev); 1401 hci_dev_lock(hdev);
1019 1402
1403 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1404 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1405 MGMT_STATUS_BUSY);
1406 goto unlock;
1407 }
1408
1020 hdev->major_class = cp->major; 1409 hdev->major_class = cp->major;
1021 hdev->minor_class = cp->minor; 1410 hdev->minor_class = cp->minor;
1022 1411
1023 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) { 1412 if (!hdev_is_powered(hdev)) {
1413 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1414 hdev->dev_class, 3);
1415 goto unlock;
1416 }
1417
1418 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1024 hci_dev_unlock(hdev); 1419 hci_dev_unlock(hdev);
1025 cancel_delayed_work_sync(&hdev->service_cache); 1420 cancel_delayed_work_sync(&hdev->service_cache);
1026 hci_dev_lock(hdev); 1421 hci_dev_lock(hdev);
@@ -1028,148 +1423,155 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
1028 } 1423 }
1029 1424
1030 err = update_class(hdev); 1425 err = update_class(hdev);
1426 if (err < 0)
1427 goto unlock;
1031 1428
1032 if (err == 0) 1429 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1033 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 1430 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1431 hdev->dev_class, 3);
1432 goto unlock;
1433 }
1034 1434
1035 hci_dev_unlock(hdev); 1435 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1036 hci_dev_put(hdev); 1436 if (!cmd)
1437 err = -ENOMEM;
1037 1438
1439unlock:
1440 hci_dev_unlock(hdev);
1038 return err; 1441 return err;
1039} 1442}
1040 1443
1041static int load_link_keys(struct sock *sk, u16 index, unsigned char *data, 1444static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1042 u16 len) 1445 u16 len)
1043{ 1446{
1044 struct hci_dev *hdev; 1447 struct mgmt_cp_load_link_keys *cp = data;
1045 struct mgmt_cp_load_link_keys *cp;
1046 u16 key_count, expected_len; 1448 u16 key_count, expected_len;
1047 int i; 1449 int i;
1048 1450
1049 cp = (void *) data; 1451 key_count = __le16_to_cpu(cp->key_count);
1050
1051 if (len < sizeof(*cp))
1052 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
1053 MGMT_STATUS_INVALID_PARAMS);
1054
1055 key_count = get_unaligned_le16(&cp->key_count);
1056 1452
1057 expected_len = sizeof(*cp) + key_count * 1453 expected_len = sizeof(*cp) + key_count *
1058 sizeof(struct mgmt_link_key_info); 1454 sizeof(struct mgmt_link_key_info);
1059 if (expected_len != len) { 1455 if (expected_len != len) {
1060 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 1456 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1061 len, expected_len); 1457 len, expected_len);
1062 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, 1458 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1063 MGMT_STATUS_INVALID_PARAMS); 1459 MGMT_STATUS_INVALID_PARAMS);
1064 } 1460 }
1065 1461
1066 hdev = hci_dev_get(index); 1462 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1067 if (!hdev)
1068 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
1069 MGMT_STATUS_INVALID_PARAMS);
1070
1071 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
1072 key_count); 1463 key_count);
1073 1464
1074 hci_dev_lock(hdev); 1465 hci_dev_lock(hdev);
1075 1466
1076 hci_link_keys_clear(hdev); 1467 hci_link_keys_clear(hdev);
1077 1468
1078 set_bit(HCI_LINK_KEYS, &hdev->flags); 1469 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1079 1470
1080 if (cp->debug_keys) 1471 if (cp->debug_keys)
1081 set_bit(HCI_DEBUG_KEYS, &hdev->flags); 1472 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1082 else 1473 else
1083 clear_bit(HCI_DEBUG_KEYS, &hdev->flags); 1474 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1084 1475
1085 for (i = 0; i < key_count; i++) { 1476 for (i = 0; i < key_count; i++) {
1086 struct mgmt_link_key_info *key = &cp->keys[i]; 1477 struct mgmt_link_key_info *key = &cp->keys[i];
1087 1478
1088 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, 1479 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1089 key->pin_len); 1480 key->type, key->pin_len);
1090 } 1481 }
1091 1482
1092 cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0); 1483 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1093 1484
1094 hci_dev_unlock(hdev); 1485 hci_dev_unlock(hdev);
1095 hci_dev_put(hdev);
1096 1486
1097 return 0; 1487 return 0;
1098} 1488}
1099 1489
1100static int remove_keys(struct sock *sk, u16 index, unsigned char *data, 1490static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1101 u16 len) 1491 u8 addr_type, struct sock *skip_sk)
1102{ 1492{
1103 struct hci_dev *hdev; 1493 struct mgmt_ev_device_unpaired ev;
1104 struct mgmt_cp_remove_keys *cp; 1494
1105 struct mgmt_rp_remove_keys rp; 1495 bacpy(&ev.addr.bdaddr, bdaddr);
1496 ev.addr.type = addr_type;
1497
1498 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1499 skip_sk);
1500}
1501
1502static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1503 u16 len)
1504{
1505 struct mgmt_cp_unpair_device *cp = data;
1506 struct mgmt_rp_unpair_device rp;
1106 struct hci_cp_disconnect dc; 1507 struct hci_cp_disconnect dc;
1107 struct pending_cmd *cmd; 1508 struct pending_cmd *cmd;
1108 struct hci_conn *conn; 1509 struct hci_conn *conn;
1109 int err; 1510 int err;
1110 1511
1111 cp = (void *) data;
1112
1113 if (len != sizeof(*cp))
1114 return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
1115 MGMT_STATUS_INVALID_PARAMS);
1116
1117 hdev = hci_dev_get(index);
1118 if (!hdev)
1119 return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
1120 MGMT_STATUS_INVALID_PARAMS);
1121
1122 hci_dev_lock(hdev); 1512 hci_dev_lock(hdev);
1123 1513
1124 memset(&rp, 0, sizeof(rp)); 1514 memset(&rp, 0, sizeof(rp));
1125 bacpy(&rp.bdaddr, &cp->bdaddr); 1515 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1126 rp.status = MGMT_STATUS_FAILED; 1516 rp.addr.type = cp->addr.type;
1127 1517
1128 err = hci_remove_link_key(hdev, &cp->bdaddr); 1518 if (!hdev_is_powered(hdev)) {
1129 if (err < 0) { 1519 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1130 rp.status = MGMT_STATUS_NOT_PAIRED; 1520 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1131 goto unlock; 1521 goto unlock;
1132 } 1522 }
1133 1523
1134 if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) { 1524 if (cp->addr.type == BDADDR_BREDR)
1135 err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, 1525 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1136 sizeof(rp)); 1526 else
1527 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1528
1529 if (err < 0) {
1530 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1531 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1137 goto unlock; 1532 goto unlock;
1138 } 1533 }
1139 1534
1140 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1535 if (cp->disconnect) {
1536 if (cp->addr.type == BDADDR_BREDR)
1537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1538 &cp->addr.bdaddr);
1539 else
1540 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1541 &cp->addr.bdaddr);
1542 } else {
1543 conn = NULL;
1544 }
1545
1141 if (!conn) { 1546 if (!conn) {
1142 err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, 1547 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1143 sizeof(rp)); 1548 &rp, sizeof(rp));
1549 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1144 goto unlock; 1550 goto unlock;
1145 } 1551 }
1146 1552
1147 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp)); 1553 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1554 sizeof(*cp));
1148 if (!cmd) { 1555 if (!cmd) {
1149 err = -ENOMEM; 1556 err = -ENOMEM;
1150 goto unlock; 1557 goto unlock;
1151 } 1558 }
1152 1559
1153 put_unaligned_le16(conn->handle, &dc.handle); 1560 dc.handle = cpu_to_le16(conn->handle);
1154 dc.reason = 0x13; /* Remote User Terminated Connection */ 1561 dc.reason = 0x13; /* Remote User Terminated Connection */
1155 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 1562 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1156 if (err < 0) 1563 if (err < 0)
1157 mgmt_pending_remove(cmd); 1564 mgmt_pending_remove(cmd);
1158 1565
1159unlock: 1566unlock:
1160 if (err < 0)
1161 err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
1162 sizeof(rp));
1163 hci_dev_unlock(hdev); 1567 hci_dev_unlock(hdev);
1164 hci_dev_put(hdev);
1165
1166 return err; 1568 return err;
1167} 1569}
1168 1570
1169static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) 1571static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1572 u16 len)
1170{ 1573{
1171 struct hci_dev *hdev; 1574 struct mgmt_cp_disconnect *cp = data;
1172 struct mgmt_cp_disconnect *cp;
1173 struct hci_cp_disconnect dc; 1575 struct hci_cp_disconnect dc;
1174 struct pending_cmd *cmd; 1576 struct pending_cmd *cmd;
1175 struct hci_conn *conn; 1577 struct hci_conn *conn;
@@ -1177,38 +1579,28 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1177 1579
1178 BT_DBG(""); 1580 BT_DBG("");
1179 1581
1180 cp = (void *) data;
1181
1182 if (len != sizeof(*cp))
1183 return cmd_status(sk, index, MGMT_OP_DISCONNECT,
1184 MGMT_STATUS_INVALID_PARAMS);
1185
1186 hdev = hci_dev_get(index);
1187 if (!hdev)
1188 return cmd_status(sk, index, MGMT_OP_DISCONNECT,
1189 MGMT_STATUS_INVALID_PARAMS);
1190
1191 hci_dev_lock(hdev); 1582 hci_dev_lock(hdev);
1192 1583
1193 if (!test_bit(HCI_UP, &hdev->flags)) { 1584 if (!test_bit(HCI_UP, &hdev->flags)) {
1194 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, 1585 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1195 MGMT_STATUS_NOT_POWERED); 1586 MGMT_STATUS_NOT_POWERED);
1196 goto failed; 1587 goto failed;
1197 } 1588 }
1198 1589
1199 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { 1590 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1200 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, 1591 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1201 MGMT_STATUS_BUSY); 1592 MGMT_STATUS_BUSY);
1202 goto failed; 1593 goto failed;
1203 } 1594 }
1204 1595
1205 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1596 if (cp->addr.type == BDADDR_BREDR)
1206 if (!conn) 1597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1207 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); 1598 else
1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1208 1600
1209 if (!conn) { 1601 if (!conn) {
1210 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, 1602 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1211 MGMT_STATUS_NOT_CONNECTED); 1603 MGMT_STATUS_NOT_CONNECTED);
1212 goto failed; 1604 goto failed;
1213 } 1605 }
1214 1606
@@ -1218,7 +1610,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1218 goto failed; 1610 goto failed;
1219 } 1611 }
1220 1612
1221 put_unaligned_le16(conn->handle, &dc.handle); 1613 dc.handle = cpu_to_le16(conn->handle);
1222 dc.reason = 0x13; /* Remote User Terminated Connection */ 1614 dc.reason = 0x13; /* Remote User Terminated Connection */
1223 1615
1224 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 1616 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
@@ -1227,151 +1619,142 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1227 1619
1228failed: 1620failed:
1229 hci_dev_unlock(hdev); 1621 hci_dev_unlock(hdev);
1230 hci_dev_put(hdev);
1231
1232 return err; 1622 return err;
1233} 1623}
1234 1624
1235static u8 link_to_mgmt(u8 link_type, u8 addr_type) 1625static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1236{ 1626{
1237 switch (link_type) { 1627 switch (link_type) {
1238 case LE_LINK: 1628 case LE_LINK:
1239 switch (addr_type) { 1629 switch (addr_type) {
1240 case ADDR_LE_DEV_PUBLIC: 1630 case ADDR_LE_DEV_PUBLIC:
1241 return MGMT_ADDR_LE_PUBLIC; 1631 return BDADDR_LE_PUBLIC;
1242 case ADDR_LE_DEV_RANDOM: 1632
1243 return MGMT_ADDR_LE_RANDOM;
1244 default: 1633 default:
1245 return MGMT_ADDR_INVALID; 1634 /* Fallback to LE Random address type */
1635 return BDADDR_LE_RANDOM;
1246 } 1636 }
1247 case ACL_LINK: 1637
1248 return MGMT_ADDR_BREDR;
1249 default: 1638 default:
1250 return MGMT_ADDR_INVALID; 1639 /* Fallback to BR/EDR type */
1640 return BDADDR_BREDR;
1251 } 1641 }
1252} 1642}
1253 1643
1254static int get_connections(struct sock *sk, u16 index) 1644static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1645 u16 data_len)
1255{ 1646{
1256 struct mgmt_rp_get_connections *rp; 1647 struct mgmt_rp_get_connections *rp;
1257 struct hci_dev *hdev;
1258 struct hci_conn *c; 1648 struct hci_conn *c;
1259 struct list_head *p;
1260 size_t rp_len; 1649 size_t rp_len;
1261 u16 count; 1650 int err;
1262 int i, err; 1651 u16 i;
1263 1652
1264 BT_DBG(""); 1653 BT_DBG("");
1265 1654
1266 hdev = hci_dev_get(index);
1267 if (!hdev)
1268 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
1269 MGMT_STATUS_INVALID_PARAMS);
1270
1271 hci_dev_lock(hdev); 1655 hci_dev_lock(hdev);
1272 1656
1273 count = 0; 1657 if (!hdev_is_powered(hdev)) {
1274 list_for_each(p, &hdev->conn_hash.list) { 1658 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1275 count++; 1659 MGMT_STATUS_NOT_POWERED);
1660 goto unlock;
1661 }
1662
1663 i = 0;
1664 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1665 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1666 i++;
1276 } 1667 }
1277 1668
1278 rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info)); 1669 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1279 rp = kmalloc(rp_len, GFP_ATOMIC); 1670 rp = kmalloc(rp_len, GFP_ATOMIC);
1280 if (!rp) { 1671 if (!rp) {
1281 err = -ENOMEM; 1672 err = -ENOMEM;
1282 goto unlock; 1673 goto unlock;
1283 } 1674 }
1284 1675
1285 put_unaligned_le16(count, &rp->conn_count);
1286
1287 i = 0; 1676 i = 0;
1288 list_for_each_entry(c, &hdev->conn_hash.list, list) { 1677 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1678 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1679 continue;
1289 bacpy(&rp->addr[i].bdaddr, &c->dst); 1680 bacpy(&rp->addr[i].bdaddr, &c->dst);
1290 rp->addr[i].type = link_to_mgmt(c->type, c->dst_type); 1681 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1291 if (rp->addr[i].type == MGMT_ADDR_INVALID) 1682 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1292 continue; 1683 continue;
1293 i++; 1684 i++;
1294 } 1685 }
1295 1686
1687 rp->conn_count = cpu_to_le16(i);
1688
1296 /* Recalculate length in case of filtered SCO connections, etc */ 1689 /* Recalculate length in case of filtered SCO connections, etc */
1297 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); 1690 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1298 1691
1299 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); 1692 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1693 rp_len);
1300 1694
1301unlock:
1302 kfree(rp); 1695 kfree(rp);
1696
1697unlock:
1303 hci_dev_unlock(hdev); 1698 hci_dev_unlock(hdev);
1304 hci_dev_put(hdev);
1305 return err; 1699 return err;
1306} 1700}
1307 1701
1308static int send_pin_code_neg_reply(struct sock *sk, u16 index, 1702static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1309 struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp) 1703 struct mgmt_cp_pin_code_neg_reply *cp)
1310{ 1704{
1311 struct pending_cmd *cmd; 1705 struct pending_cmd *cmd;
1312 int err; 1706 int err;
1313 1707
1314 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, 1708 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1315 sizeof(*cp)); 1709 sizeof(*cp));
1316 if (!cmd) 1710 if (!cmd)
1317 return -ENOMEM; 1711 return -ENOMEM;
1318 1712
1319 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr), 1713 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1320 &cp->bdaddr); 1714 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1321 if (err < 0) 1715 if (err < 0)
1322 mgmt_pending_remove(cmd); 1716 mgmt_pending_remove(cmd);
1323 1717
1324 return err; 1718 return err;
1325} 1719}
1326 1720
1327static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, 1721static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1328 u16 len) 1722 u16 len)
1329{ 1723{
1330 struct hci_dev *hdev;
1331 struct hci_conn *conn; 1724 struct hci_conn *conn;
1332 struct mgmt_cp_pin_code_reply *cp; 1725 struct mgmt_cp_pin_code_reply *cp = data;
1333 struct mgmt_cp_pin_code_neg_reply ncp;
1334 struct hci_cp_pin_code_reply reply; 1726 struct hci_cp_pin_code_reply reply;
1335 struct pending_cmd *cmd; 1727 struct pending_cmd *cmd;
1336 int err; 1728 int err;
1337 1729
1338 BT_DBG(""); 1730 BT_DBG("");
1339 1731
1340 cp = (void *) data;
1341
1342 if (len != sizeof(*cp))
1343 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1344 MGMT_STATUS_INVALID_PARAMS);
1345
1346 hdev = hci_dev_get(index);
1347 if (!hdev)
1348 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1349 MGMT_STATUS_INVALID_PARAMS);
1350
1351 hci_dev_lock(hdev); 1732 hci_dev_lock(hdev);
1352 1733
1353 if (!test_bit(HCI_UP, &hdev->flags)) { 1734 if (!hdev_is_powered(hdev)) {
1354 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, 1735 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1355 MGMT_STATUS_NOT_POWERED); 1736 MGMT_STATUS_NOT_POWERED);
1356 goto failed; 1737 goto failed;
1357 } 1738 }
1358 1739
1359 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1740 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1360 if (!conn) { 1741 if (!conn) {
1361 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, 1742 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1362 MGMT_STATUS_NOT_CONNECTED); 1743 MGMT_STATUS_NOT_CONNECTED);
1363 goto failed; 1744 goto failed;
1364 } 1745 }
1365 1746
1366 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) { 1747 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1367 bacpy(&ncp.bdaddr, &cp->bdaddr); 1748 struct mgmt_cp_pin_code_neg_reply ncp;
1749
1750 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1368 1751
1369 BT_ERR("PIN code is not 16 bytes long"); 1752 BT_ERR("PIN code is not 16 bytes long");
1370 1753
1371 err = send_pin_code_neg_reply(sk, index, hdev, &ncp); 1754 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1372 if (err >= 0) 1755 if (err >= 0)
1373 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, 1756 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1374 MGMT_STATUS_INVALID_PARAMS); 1757 MGMT_STATUS_INVALID_PARAMS);
1375 1758
1376 goto failed; 1759 goto failed;
1377 } 1760 }
@@ -1382,7 +1765,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1382 goto failed; 1765 goto failed;
1383 } 1766 }
1384 1767
1385 bacpy(&reply.bdaddr, &cp->bdaddr); 1768 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1386 reply.pin_len = cp->pin_len; 1769 reply.pin_len = cp->pin_len;
1387 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); 1770 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1388 1771
@@ -1392,67 +1775,39 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1392 1775
1393failed: 1776failed:
1394 hci_dev_unlock(hdev); 1777 hci_dev_unlock(hdev);
1395 hci_dev_put(hdev);
1396
1397 return err; 1778 return err;
1398} 1779}
1399 1780
1400static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, 1781static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1401 u16 len) 1782 void *data, u16 len)
1402{ 1783{
1403 struct hci_dev *hdev; 1784 struct mgmt_cp_pin_code_neg_reply *cp = data;
1404 struct mgmt_cp_pin_code_neg_reply *cp;
1405 int err; 1785 int err;
1406 1786
1407 BT_DBG(""); 1787 BT_DBG("");
1408 1788
1409 cp = (void *) data;
1410
1411 if (len != sizeof(*cp))
1412 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1413 MGMT_STATUS_INVALID_PARAMS);
1414
1415 hdev = hci_dev_get(index);
1416 if (!hdev)
1417 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1418 MGMT_STATUS_INVALID_PARAMS);
1419
1420 hci_dev_lock(hdev); 1789 hci_dev_lock(hdev);
1421 1790
1422 if (!test_bit(HCI_UP, &hdev->flags)) { 1791 if (!hdev_is_powered(hdev)) {
1423 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1792 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
1424 MGMT_STATUS_NOT_POWERED); 1793 MGMT_STATUS_NOT_POWERED);
1425 goto failed; 1794 goto failed;
1426 } 1795 }
1427 1796
1428 err = send_pin_code_neg_reply(sk, index, hdev, cp); 1797 err = send_pin_code_neg_reply(sk, hdev, cp);
1429 1798
1430failed: 1799failed:
1431 hci_dev_unlock(hdev); 1800 hci_dev_unlock(hdev);
1432 hci_dev_put(hdev);
1433
1434 return err; 1801 return err;
1435} 1802}
1436 1803
1437static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, 1804static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1438 u16 len) 1805 u16 len)
1439{ 1806{
1440 struct hci_dev *hdev; 1807 struct mgmt_cp_set_io_capability *cp = data;
1441 struct mgmt_cp_set_io_capability *cp;
1442 1808
1443 BT_DBG(""); 1809 BT_DBG("");
1444 1810
1445 cp = (void *) data;
1446
1447 if (len != sizeof(*cp))
1448 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
1449 MGMT_STATUS_INVALID_PARAMS);
1450
1451 hdev = hci_dev_get(index);
1452 if (!hdev)
1453 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
1454 MGMT_STATUS_INVALID_PARAMS);
1455
1456 hci_dev_lock(hdev); 1811 hci_dev_lock(hdev);
1457 1812
1458 hdev->io_capability = cp->io_capability; 1813 hdev->io_capability = cp->io_capability;
@@ -1461,9 +1816,9 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1461 hdev->io_capability); 1816 hdev->io_capability);
1462 1817
1463 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1464 hci_dev_put(hdev);
1465 1819
1466 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1820 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
1821 0);
1467} 1822}
1468 1823
1469static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1824static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
@@ -1490,10 +1845,10 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
1490 struct hci_conn *conn = cmd->user_data; 1845 struct hci_conn *conn = cmd->user_data;
1491 1846
1492 bacpy(&rp.addr.bdaddr, &conn->dst); 1847 bacpy(&rp.addr.bdaddr, &conn->dst);
1493 rp.addr.type = link_to_mgmt(conn->type, conn->dst_type); 1848 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
1494 rp.status = status;
1495 1849
1496 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp)); 1850 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
1851 &rp, sizeof(rp));
1497 1852
1498 /* So we don't get further callbacks for this connection */ 1853 /* So we don't get further callbacks for this connection */
1499 conn->connect_cfm_cb = NULL; 1854 conn->connect_cfm_cb = NULL;
@@ -1515,13 +1870,13 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1515 if (!cmd) 1870 if (!cmd)
1516 BT_DBG("Unable to find a pending command"); 1871 BT_DBG("Unable to find a pending command");
1517 else 1872 else
1518 pairing_complete(cmd, status); 1873 pairing_complete(cmd, mgmt_status(status));
1519} 1874}
1520 1875
1521static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) 1876static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1877 u16 len)
1522{ 1878{
1523 struct hci_dev *hdev; 1879 struct mgmt_cp_pair_device *cp = data;
1524 struct mgmt_cp_pair_device *cp;
1525 struct mgmt_rp_pair_device rp; 1880 struct mgmt_rp_pair_device rp;
1526 struct pending_cmd *cmd; 1881 struct pending_cmd *cmd;
1527 u8 sec_level, auth_type; 1882 u8 sec_level, auth_type;
@@ -1530,48 +1885,42 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1530 1885
1531 BT_DBG(""); 1886 BT_DBG("");
1532 1887
1533 cp = (void *) data;
1534
1535 if (len != sizeof(*cp))
1536 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
1537 MGMT_STATUS_INVALID_PARAMS);
1538
1539 hdev = hci_dev_get(index);
1540 if (!hdev)
1541 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
1542 MGMT_STATUS_INVALID_PARAMS);
1543
1544 hci_dev_lock(hdev); 1888 hci_dev_lock(hdev);
1545 1889
1890 if (!hdev_is_powered(hdev)) {
1891 err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1892 MGMT_STATUS_NOT_POWERED);
1893 goto unlock;
1894 }
1895
1546 sec_level = BT_SECURITY_MEDIUM; 1896 sec_level = BT_SECURITY_MEDIUM;
1547 if (cp->io_cap == 0x03) 1897 if (cp->io_cap == 0x03)
1548 auth_type = HCI_AT_DEDICATED_BONDING; 1898 auth_type = HCI_AT_DEDICATED_BONDING;
1549 else 1899 else
1550 auth_type = HCI_AT_DEDICATED_BONDING_MITM; 1900 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1551 1901
1552 if (cp->addr.type == MGMT_ADDR_BREDR) 1902 if (cp->addr.type == BDADDR_BREDR)
1553 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level, 1903 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
1554 auth_type); 1904 cp->addr.type, sec_level, auth_type);
1555 else 1905 else
1556 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level, 1906 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
1557 auth_type); 1907 cp->addr.type, sec_level, auth_type);
1558 1908
1559 memset(&rp, 0, sizeof(rp)); 1909 memset(&rp, 0, sizeof(rp));
1560 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 1910 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1561 rp.addr.type = cp->addr.type; 1911 rp.addr.type = cp->addr.type;
1562 1912
1563 if (IS_ERR(conn)) { 1913 if (IS_ERR(conn)) {
1564 rp.status = -PTR_ERR(conn); 1914 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1565 err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE, 1915 MGMT_STATUS_CONNECT_FAILED, &rp,
1566 &rp, sizeof(rp)); 1916 sizeof(rp));
1567 goto unlock; 1917 goto unlock;
1568 } 1918 }
1569 1919
1570 if (conn->connect_cfm_cb) { 1920 if (conn->connect_cfm_cb) {
1571 hci_conn_put(conn); 1921 hci_conn_put(conn);
1572 rp.status = EBUSY; 1922 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1573 err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE, 1923 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1574 &rp, sizeof(rp));
1575 goto unlock; 1924 goto unlock;
1576 } 1925 }
1577 1926
@@ -1583,7 +1932,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1583 } 1932 }
1584 1933
1585 /* For LE, just connecting isn't a proof that the pairing finished */ 1934 /* For LE, just connecting isn't a proof that the pairing finished */
1586 if (cp->addr.type == MGMT_ADDR_BREDR) 1935 if (cp->addr.type == BDADDR_BREDR)
1587 conn->connect_cfm_cb = pairing_complete_cb; 1936 conn->connect_cfm_cb = pairing_complete_cb;
1588 1937
1589 conn->security_cfm_cb = pairing_complete_cb; 1938 conn->security_cfm_cb = pairing_complete_cb;
@@ -1599,58 +1948,88 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1599 1948
1600unlock: 1949unlock:
1601 hci_dev_unlock(hdev); 1950 hci_dev_unlock(hdev);
1602 hci_dev_put(hdev);
1603
1604 return err; 1951 return err;
1605} 1952}
1606 1953
1607static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr, 1954static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1608 u16 mgmt_op, u16 hci_op, __le32 passkey) 1955 u16 len)
1609{ 1956{
1957 struct mgmt_addr_info *addr = data;
1610 struct pending_cmd *cmd; 1958 struct pending_cmd *cmd;
1611 struct hci_dev *hdev;
1612 struct hci_conn *conn; 1959 struct hci_conn *conn;
1613 int err; 1960 int err;
1614 1961
1615 hdev = hci_dev_get(index); 1962 BT_DBG("");
1616 if (!hdev)
1617 return cmd_status(sk, index, mgmt_op,
1618 MGMT_STATUS_INVALID_PARAMS);
1619 1963
1620 hci_dev_lock(hdev); 1964 hci_dev_lock(hdev);
1621 1965
1622 if (!test_bit(HCI_UP, &hdev->flags)) { 1966 if (!hdev_is_powered(hdev)) {
1623 err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED); 1967 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1968 MGMT_STATUS_NOT_POWERED);
1969 goto unlock;
1970 }
1971
1972 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
1973 if (!cmd) {
1974 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1975 MGMT_STATUS_INVALID_PARAMS);
1976 goto unlock;
1977 }
1978
1979 conn = cmd->user_data;
1980
1981 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
1982 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1983 MGMT_STATUS_INVALID_PARAMS);
1984 goto unlock;
1985 }
1986
1987 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
1988
1989 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
1990 addr, sizeof(*addr));
1991unlock:
1992 hci_dev_unlock(hdev);
1993 return err;
1994}
1995
1996static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
1997 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
1998 u16 hci_op, __le32 passkey)
1999{
2000 struct pending_cmd *cmd;
2001 struct hci_conn *conn;
2002 int err;
2003
2004 hci_dev_lock(hdev);
2005
2006 if (!hdev_is_powered(hdev)) {
2007 err = cmd_status(sk, hdev->id, mgmt_op,
2008 MGMT_STATUS_NOT_POWERED);
1624 goto done; 2009 goto done;
1625 } 2010 }
1626 2011
1627 /* 2012 if (type == BDADDR_BREDR)
1628 * Check for an existing ACL link, if present pair via 2013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
1629 * HCI commands. 2014 else
1630 *
1631 * If no ACL link is present, check for an LE link and if
1632 * present, pair via the SMP engine.
1633 *
1634 * If neither ACL nor LE links are present, fail with error.
1635 */
1636 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
1637 if (!conn) {
1638 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 2015 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
1639 if (!conn) {
1640 err = cmd_status(sk, index, mgmt_op,
1641 MGMT_STATUS_NOT_CONNECTED);
1642 goto done;
1643 }
1644 2016
2017 if (!conn) {
2018 err = cmd_status(sk, hdev->id, mgmt_op,
2019 MGMT_STATUS_NOT_CONNECTED);
2020 goto done;
2021 }
2022
2023 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
1645 /* Continue with pairing via SMP */ 2024 /* Continue with pairing via SMP */
1646 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 2025 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
1647 2026
1648 if (!err) 2027 if (!err)
1649 err = cmd_status(sk, index, mgmt_op, 2028 err = cmd_status(sk, hdev->id, mgmt_op,
1650 MGMT_STATUS_SUCCESS); 2029 MGMT_STATUS_SUCCESS);
1651 else 2030 else
1652 err = cmd_status(sk, index, mgmt_op, 2031 err = cmd_status(sk, hdev->id, mgmt_op,
1653 MGMT_STATUS_FAILED); 2032 MGMT_STATUS_FAILED);
1654 2033
1655 goto done; 2034 goto done;
1656 } 2035 }
@@ -1676,94 +2055,96 @@ static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
1676 2055
1677done: 2056done:
1678 hci_dev_unlock(hdev); 2057 hci_dev_unlock(hdev);
1679 hci_dev_put(hdev);
1680
1681 return err; 2058 return err;
1682} 2059}
1683 2060
1684static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len) 2061static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2062 u16 len)
1685{ 2063{
1686 struct mgmt_cp_user_confirm_reply *cp = (void *) data; 2064 struct mgmt_cp_user_confirm_reply *cp = data;
1687 2065
1688 BT_DBG(""); 2066 BT_DBG("");
1689 2067
1690 if (len != sizeof(*cp)) 2068 if (len != sizeof(*cp))
1691 return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY, 2069 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
1692 MGMT_STATUS_INVALID_PARAMS); 2070 MGMT_STATUS_INVALID_PARAMS);
1693 2071
1694 return user_pairing_resp(sk, index, &cp->bdaddr, 2072 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
1695 MGMT_OP_USER_CONFIRM_REPLY, 2073 MGMT_OP_USER_CONFIRM_REPLY,
1696 HCI_OP_USER_CONFIRM_REPLY, 0); 2074 HCI_OP_USER_CONFIRM_REPLY, 0);
1697} 2075}
1698 2076
1699static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data, 2077static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
1700 u16 len) 2078 void *data, u16 len)
1701{ 2079{
1702 struct mgmt_cp_user_confirm_neg_reply *cp = data; 2080 struct mgmt_cp_user_confirm_neg_reply *cp = data;
1703 2081
1704 BT_DBG(""); 2082 BT_DBG("");
1705 2083
1706 if (len != sizeof(*cp)) 2084 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
1707 return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY, 2085 MGMT_OP_USER_CONFIRM_NEG_REPLY,
1708 MGMT_STATUS_INVALID_PARAMS); 2086 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
1709
1710 return user_pairing_resp(sk, index, &cp->bdaddr,
1711 MGMT_OP_USER_CONFIRM_NEG_REPLY,
1712 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
1713} 2087}
1714 2088
1715static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len) 2089static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2090 u16 len)
1716{ 2091{
1717 struct mgmt_cp_user_passkey_reply *cp = (void *) data; 2092 struct mgmt_cp_user_passkey_reply *cp = data;
1718 2093
1719 BT_DBG(""); 2094 BT_DBG("");
1720 2095
1721 if (len != sizeof(*cp)) 2096 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
1722 return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY, 2097 MGMT_OP_USER_PASSKEY_REPLY,
1723 EINVAL); 2098 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
1724
1725 return user_pairing_resp(sk, index, &cp->bdaddr,
1726 MGMT_OP_USER_PASSKEY_REPLY,
1727 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
1728} 2099}
1729 2100
1730static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data, 2101static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
1731 u16 len) 2102 void *data, u16 len)
1732{ 2103{
1733 struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data; 2104 struct mgmt_cp_user_passkey_neg_reply *cp = data;
1734 2105
1735 BT_DBG(""); 2106 BT_DBG("");
1736 2107
1737 if (len != sizeof(*cp)) 2108 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
1738 return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY, 2109 MGMT_OP_USER_PASSKEY_NEG_REPLY,
1739 EINVAL); 2110 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2111}
2112
2113static int update_name(struct hci_dev *hdev, const char *name)
2114{
2115 struct hci_cp_write_local_name cp;
1740 2116
1741 return user_pairing_resp(sk, index, &cp->bdaddr, 2117 memcpy(cp.name, name, sizeof(cp.name));
1742 MGMT_OP_USER_PASSKEY_NEG_REPLY, 2118
1743 HCI_OP_USER_PASSKEY_NEG_REPLY, 0); 2119 return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
1744} 2120}
1745 2121
1746static int set_local_name(struct sock *sk, u16 index, unsigned char *data, 2122static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
1747 u16 len) 2123 u16 len)
1748{ 2124{
1749 struct mgmt_cp_set_local_name *mgmt_cp = (void *) data; 2125 struct mgmt_cp_set_local_name *cp = data;
1750 struct hci_cp_write_local_name hci_cp;
1751 struct hci_dev *hdev;
1752 struct pending_cmd *cmd; 2126 struct pending_cmd *cmd;
1753 int err; 2127 int err;
1754 2128
1755 BT_DBG(""); 2129 BT_DBG("");
1756 2130
1757 if (len != sizeof(*mgmt_cp)) 2131 hci_dev_lock(hdev);
1758 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
1759 MGMT_STATUS_INVALID_PARAMS);
1760 2132
1761 hdev = hci_dev_get(index); 2133 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
1762 if (!hdev)
1763 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
1764 MGMT_STATUS_INVALID_PARAMS);
1765 2134
1766 hci_dev_lock(hdev); 2135 if (!hdev_is_powered(hdev)) {
2136 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2137
2138 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2139 data, len);
2140 if (err < 0)
2141 goto failed;
2142
2143 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2144 sk);
2145
2146 goto failed;
2147 }
1767 2148
1768 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); 2149 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
1769 if (!cmd) { 2150 if (!cmd) {
@@ -1771,49 +2152,40 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1771 goto failed; 2152 goto failed;
1772 } 2153 }
1773 2154
1774 memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name)); 2155 err = update_name(hdev, cp->name);
1775 err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
1776 &hci_cp);
1777 if (err < 0) 2156 if (err < 0)
1778 mgmt_pending_remove(cmd); 2157 mgmt_pending_remove(cmd);
1779 2158
1780failed: 2159failed:
1781 hci_dev_unlock(hdev); 2160 hci_dev_unlock(hdev);
1782 hci_dev_put(hdev);
1783
1784 return err; 2161 return err;
1785} 2162}
1786 2163
1787static int read_local_oob_data(struct sock *sk, u16 index) 2164static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2165 void *data, u16 data_len)
1788{ 2166{
1789 struct hci_dev *hdev;
1790 struct pending_cmd *cmd; 2167 struct pending_cmd *cmd;
1791 int err; 2168 int err;
1792 2169
1793 BT_DBG("hci%u", index); 2170 BT_DBG("%s", hdev->name);
1794
1795 hdev = hci_dev_get(index);
1796 if (!hdev)
1797 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1798 MGMT_STATUS_INVALID_PARAMS);
1799 2171
1800 hci_dev_lock(hdev); 2172 hci_dev_lock(hdev);
1801 2173
1802 if (!test_bit(HCI_UP, &hdev->flags)) { 2174 if (!hdev_is_powered(hdev)) {
1803 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 2175 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
1804 MGMT_STATUS_NOT_POWERED); 2176 MGMT_STATUS_NOT_POWERED);
1805 goto unlock; 2177 goto unlock;
1806 } 2178 }
1807 2179
1808 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { 2180 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1809 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 2181 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
1810 MGMT_STATUS_NOT_SUPPORTED); 2182 MGMT_STATUS_NOT_SUPPORTED);
1811 goto unlock; 2183 goto unlock;
1812 } 2184 }
1813 2185
1814 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { 2186 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
1815 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 2187 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
1816 MGMT_STATUS_BUSY); 2188 MGMT_STATUS_BUSY);
1817 goto unlock; 2189 goto unlock;
1818 } 2190 }
1819 2191
@@ -1829,104 +2201,118 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1829 2201
1830unlock: 2202unlock:
1831 hci_dev_unlock(hdev); 2203 hci_dev_unlock(hdev);
1832 hci_dev_put(hdev);
1833
1834 return err; 2204 return err;
1835} 2205}
1836 2206
1837static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data, 2207static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
1838 u16 len) 2208 void *data, u16 len)
1839{ 2209{
1840 struct hci_dev *hdev; 2210 struct mgmt_cp_add_remote_oob_data *cp = data;
1841 struct mgmt_cp_add_remote_oob_data *cp = (void *) data; 2211 u8 status;
1842 int err; 2212 int err;
1843 2213
1844 BT_DBG("hci%u ", index); 2214 BT_DBG("%s ", hdev->name);
1845
1846 if (len != sizeof(*cp))
1847 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1848 MGMT_STATUS_INVALID_PARAMS);
1849
1850 hdev = hci_dev_get(index);
1851 if (!hdev)
1852 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1853 MGMT_STATUS_INVALID_PARAMS);
1854 2215
1855 hci_dev_lock(hdev); 2216 hci_dev_lock(hdev);
1856 2217
1857 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, 2218 if (!hdev_is_powered(hdev)) {
1858 cp->randomizer); 2219 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
2220 MGMT_STATUS_NOT_POWERED, &cp->addr,
2221 sizeof(cp->addr));
2222 goto unlock;
2223 }
2224
2225 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2226 cp->randomizer);
1859 if (err < 0) 2227 if (err < 0)
1860 err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, 2228 status = MGMT_STATUS_FAILED;
1861 MGMT_STATUS_FAILED);
1862 else 2229 else
1863 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 2230 status = 0;
1864 0);
1865 2231
1866 hci_dev_unlock(hdev); 2232 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
1867 hci_dev_put(hdev); 2233 &cp->addr, sizeof(cp->addr));
1868 2234
2235unlock:
2236 hci_dev_unlock(hdev);
1869 return err; 2237 return err;
1870} 2238}
1871 2239
1872static int remove_remote_oob_data(struct sock *sk, u16 index, 2240static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
1873 unsigned char *data, u16 len) 2241 void *data, u16 len)
1874{ 2242{
1875 struct hci_dev *hdev; 2243 struct mgmt_cp_remove_remote_oob_data *cp = data;
1876 struct mgmt_cp_remove_remote_oob_data *cp = (void *) data; 2244 u8 status;
1877 int err; 2245 int err;
1878 2246
1879 BT_DBG("hci%u ", index); 2247 BT_DBG("%s", hdev->name);
2248
2249 hci_dev_lock(hdev);
1880 2250
1881 if (len != sizeof(*cp)) 2251 if (!hdev_is_powered(hdev)) {
1882 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2252 err = cmd_complete(sk, hdev->id,
1883 MGMT_STATUS_INVALID_PARAMS); 2253 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2254 MGMT_STATUS_NOT_POWERED, &cp->addr,
2255 sizeof(cp->addr));
2256 goto unlock;
2257 }
1884 2258
1885 hdev = hci_dev_get(index); 2259 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
1886 if (!hdev) 2260 if (err < 0)
1887 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2261 status = MGMT_STATUS_INVALID_PARAMS;
1888 MGMT_STATUS_INVALID_PARAMS); 2262 else
2263 status = 0;
2264
2265 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2266 status, &cp->addr, sizeof(cp->addr));
2267
2268unlock:
2269 hci_dev_unlock(hdev);
2270 return err;
2271}
2272
2273int mgmt_interleaved_discovery(struct hci_dev *hdev)
2274{
2275 int err;
2276
2277 BT_DBG("%s", hdev->name);
1889 2278
1890 hci_dev_lock(hdev); 2279 hci_dev_lock(hdev);
1891 2280
1892 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); 2281 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
1893 if (err < 0) 2282 if (err < 0)
1894 err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2283 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1895 MGMT_STATUS_INVALID_PARAMS);
1896 else
1897 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1898 NULL, 0);
1899 2284
1900 hci_dev_unlock(hdev); 2285 hci_dev_unlock(hdev);
1901 hci_dev_put(hdev);
1902 2286
1903 return err; 2287 return err;
1904} 2288}
1905 2289
1906static int start_discovery(struct sock *sk, u16 index, 2290static int start_discovery(struct sock *sk, struct hci_dev *hdev,
1907 unsigned char *data, u16 len) 2291 void *data, u16 len)
1908{ 2292{
1909 struct mgmt_cp_start_discovery *cp = (void *) data; 2293 struct mgmt_cp_start_discovery *cp = data;
1910 struct pending_cmd *cmd; 2294 struct pending_cmd *cmd;
1911 struct hci_dev *hdev;
1912 int err; 2295 int err;
1913 2296
1914 BT_DBG("hci%u", index); 2297 BT_DBG("%s", hdev->name);
1915 2298
1916 if (len != sizeof(*cp)) 2299 hci_dev_lock(hdev);
1917 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
1918 MGMT_STATUS_INVALID_PARAMS);
1919 2300
1920 hdev = hci_dev_get(index); 2301 if (!hdev_is_powered(hdev)) {
1921 if (!hdev) 2302 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
1922 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, 2303 MGMT_STATUS_NOT_POWERED);
1923 MGMT_STATUS_INVALID_PARAMS); 2304 goto failed;
2305 }
1924 2306
1925 hci_dev_lock(hdev); 2307 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2308 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2309 MGMT_STATUS_BUSY);
2310 goto failed;
2311 }
1926 2312
1927 if (!test_bit(HCI_UP, &hdev->flags)) { 2313 if (hdev->discovery.state != DISCOVERY_STOPPED) {
1928 err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, 2314 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
1929 MGMT_STATUS_NOT_POWERED); 2315 MGMT_STATUS_BUSY);
1930 goto failed; 2316 goto failed;
1931 } 2317 }
1932 2318
@@ -1936,179 +2322,402 @@ static int start_discovery(struct sock *sk, u16 index,
1936 goto failed; 2322 goto failed;
1937 } 2323 }
1938 2324
1939 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR); 2325 hdev->discovery.type = cp->type;
2326
2327 switch (hdev->discovery.type) {
2328 case DISCOV_TYPE_BREDR:
2329 if (lmp_bredr_capable(hdev))
2330 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2331 else
2332 err = -ENOTSUPP;
2333 break;
2334
2335 case DISCOV_TYPE_LE:
2336 if (lmp_host_le_capable(hdev))
2337 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2338 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2339 else
2340 err = -ENOTSUPP;
2341 break;
2342
2343 case DISCOV_TYPE_INTERLEAVED:
2344 if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev))
2345 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2346 LE_SCAN_WIN,
2347 LE_SCAN_TIMEOUT_BREDR_LE);
2348 else
2349 err = -ENOTSUPP;
2350 break;
2351
2352 default:
2353 err = -EINVAL;
2354 }
2355
1940 if (err < 0) 2356 if (err < 0)
1941 mgmt_pending_remove(cmd); 2357 mgmt_pending_remove(cmd);
2358 else
2359 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1942 2360
1943failed: 2361failed:
1944 hci_dev_unlock(hdev); 2362 hci_dev_unlock(hdev);
1945 hci_dev_put(hdev);
1946
1947 return err; 2363 return err;
1948} 2364}
1949 2365
1950static int stop_discovery(struct sock *sk, u16 index) 2366static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2367 u16 len)
1951{ 2368{
1952 struct hci_dev *hdev; 2369 struct mgmt_cp_stop_discovery *mgmt_cp = data;
1953 struct pending_cmd *cmd; 2370 struct pending_cmd *cmd;
2371 struct hci_cp_remote_name_req_cancel cp;
2372 struct inquiry_entry *e;
1954 int err; 2373 int err;
1955 2374
1956 BT_DBG("hci%u", index); 2375 BT_DBG("%s", hdev->name);
1957
1958 hdev = hci_dev_get(index);
1959 if (!hdev)
1960 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
1961 MGMT_STATUS_INVALID_PARAMS);
1962 2376
1963 hci_dev_lock(hdev); 2377 hci_dev_lock(hdev);
1964 2378
2379 if (!hci_discovery_active(hdev)) {
2380 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2381 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2382 sizeof(mgmt_cp->type));
2383 goto unlock;
2384 }
2385
2386 if (hdev->discovery.type != mgmt_cp->type) {
2387 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2388 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2389 sizeof(mgmt_cp->type));
2390 goto unlock;
2391 }
2392
1965 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0); 2393 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
1966 if (!cmd) { 2394 if (!cmd) {
1967 err = -ENOMEM; 2395 err = -ENOMEM;
1968 goto failed; 2396 goto unlock;
2397 }
2398
2399 switch (hdev->discovery.state) {
2400 case DISCOVERY_FINDING:
2401 if (test_bit(HCI_INQUIRY, &hdev->flags))
2402 err = hci_cancel_inquiry(hdev);
2403 else
2404 err = hci_cancel_le_scan(hdev);
2405
2406 break;
2407
2408 case DISCOVERY_RESOLVING:
2409 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2410 NAME_PENDING);
2411 if (!e) {
2412 mgmt_pending_remove(cmd);
2413 err = cmd_complete(sk, hdev->id,
2414 MGMT_OP_STOP_DISCOVERY, 0,
2415 &mgmt_cp->type,
2416 sizeof(mgmt_cp->type));
2417 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2418 goto unlock;
2419 }
2420
2421 bacpy(&cp.bdaddr, &e->data.bdaddr);
2422 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2423 sizeof(cp), &cp);
2424
2425 break;
2426
2427 default:
2428 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2429 err = -EFAULT;
1969 } 2430 }
1970 2431
1971 err = hci_cancel_inquiry(hdev);
1972 if (err < 0) 2432 if (err < 0)
1973 mgmt_pending_remove(cmd); 2433 mgmt_pending_remove(cmd);
2434 else
2435 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1974 2436
1975failed: 2437unlock:
1976 hci_dev_unlock(hdev); 2438 hci_dev_unlock(hdev);
1977 hci_dev_put(hdev); 2439 return err;
2440}
2441
2442static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2443 u16 len)
2444{
2445 struct mgmt_cp_confirm_name *cp = data;
2446 struct inquiry_entry *e;
2447 int err;
2448
2449 BT_DBG("%s", hdev->name);
2450
2451 hci_dev_lock(hdev);
2452
2453 if (!hci_discovery_active(hdev)) {
2454 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2455 MGMT_STATUS_FAILED);
2456 goto failed;
2457 }
2458
2459 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2460 if (!e) {
2461 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2462 MGMT_STATUS_INVALID_PARAMS);
2463 goto failed;
2464 }
2465
2466 if (cp->name_known) {
2467 e->name_state = NAME_KNOWN;
2468 list_del(&e->list);
2469 } else {
2470 e->name_state = NAME_NEEDED;
2471 hci_inquiry_cache_update_resolve(hdev, e);
2472 }
1978 2473
2474 err = 0;
2475
2476failed:
2477 hci_dev_unlock(hdev);
1979 return err; 2478 return err;
1980} 2479}
1981 2480
1982static int block_device(struct sock *sk, u16 index, unsigned char *data, 2481static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
1983 u16 len) 2482 u16 len)
1984{ 2483{
1985 struct hci_dev *hdev; 2484 struct mgmt_cp_block_device *cp = data;
1986 struct mgmt_cp_block_device *cp = (void *) data; 2485 u8 status;
1987 int err; 2486 int err;
1988 2487
1989 BT_DBG("hci%u", index); 2488 BT_DBG("%s", hdev->name);
2489
2490 hci_dev_lock(hdev);
1990 2491
1991 if (len != sizeof(*cp)) 2492 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
1992 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 2493 if (err < 0)
1993 MGMT_STATUS_INVALID_PARAMS); 2494 status = MGMT_STATUS_FAILED;
2495 else
2496 status = 0;
2497
2498 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2499 &cp->addr, sizeof(cp->addr));
2500
2501 hci_dev_unlock(hdev);
2502
2503 return err;
2504}
2505
2506static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2507 u16 len)
2508{
2509 struct mgmt_cp_unblock_device *cp = data;
2510 u8 status;
2511 int err;
1994 2512
1995 hdev = hci_dev_get(index); 2513 BT_DBG("%s", hdev->name);
1996 if (!hdev)
1997 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1998 MGMT_STATUS_INVALID_PARAMS);
1999 2514
2000 hci_dev_lock(hdev); 2515 hci_dev_lock(hdev);
2001 2516
2002 err = hci_blacklist_add(hdev, &cp->bdaddr); 2517 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2003 if (err < 0) 2518 if (err < 0)
2004 err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 2519 status = MGMT_STATUS_INVALID_PARAMS;
2005 MGMT_STATUS_FAILED);
2006 else 2520 else
2007 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, 2521 status = 0;
2008 NULL, 0); 2522
2523 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2524 &cp->addr, sizeof(cp->addr));
2009 2525
2010 hci_dev_unlock(hdev); 2526 hci_dev_unlock(hdev);
2011 hci_dev_put(hdev);
2012 2527
2013 return err; 2528 return err;
2014} 2529}
2015 2530
2016static int unblock_device(struct sock *sk, u16 index, unsigned char *data, 2531static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2017 u16 len) 2532 u16 len)
2018{ 2533{
2019 struct hci_dev *hdev; 2534 struct mgmt_cp_set_device_id *cp = data;
2020 struct mgmt_cp_unblock_device *cp = (void *) data;
2021 int err; 2535 int err;
2536 __u16 source;
2022 2537
2023 BT_DBG("hci%u", index); 2538 BT_DBG("%s", hdev->name);
2024 2539
2025 if (len != sizeof(*cp)) 2540 source = __le16_to_cpu(cp->source);
2026 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
2027 MGMT_STATUS_INVALID_PARAMS);
2028 2541
2029 hdev = hci_dev_get(index); 2542 if (source > 0x0002)
2030 if (!hdev) 2543 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2031 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, 2544 MGMT_STATUS_INVALID_PARAMS);
2032 MGMT_STATUS_INVALID_PARAMS);
2033 2545
2034 hci_dev_lock(hdev); 2546 hci_dev_lock(hdev);
2035 2547
2036 err = hci_blacklist_del(hdev, &cp->bdaddr); 2548 hdev->devid_source = source;
2549 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2550 hdev->devid_product = __le16_to_cpu(cp->product);
2551 hdev->devid_version = __le16_to_cpu(cp->version);
2037 2552
2038 if (err < 0) 2553 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2039 err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, 2554
2040 MGMT_STATUS_INVALID_PARAMS); 2555 update_eir(hdev);
2041 else
2042 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
2043 NULL, 0);
2044 2556
2045 hci_dev_unlock(hdev); 2557 hci_dev_unlock(hdev);
2046 hci_dev_put(hdev);
2047 2558
2048 return err; 2559 return err;
2049} 2560}
2050 2561
2051static int set_fast_connectable(struct sock *sk, u16 index, 2562static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2052 unsigned char *data, u16 len) 2563 void *data, u16 len)
2053{ 2564{
2054 struct hci_dev *hdev; 2565 struct mgmt_mode *cp = data;
2055 struct mgmt_mode *cp = (void *) data;
2056 struct hci_cp_write_page_scan_activity acp; 2566 struct hci_cp_write_page_scan_activity acp;
2057 u8 type; 2567 u8 type;
2058 int err; 2568 int err;
2059 2569
2060 BT_DBG("hci%u", index); 2570 BT_DBG("%s", hdev->name);
2061 2571
2062 if (len != sizeof(*cp)) 2572 if (!hdev_is_powered(hdev))
2063 return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2573 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2064 MGMT_STATUS_INVALID_PARAMS); 2574 MGMT_STATUS_NOT_POWERED);
2065 2575
2066 hdev = hci_dev_get(index); 2576 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2067 if (!hdev) 2577 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2068 return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2578 MGMT_STATUS_REJECTED);
2069 MGMT_STATUS_INVALID_PARAMS);
2070 2579
2071 hci_dev_lock(hdev); 2580 hci_dev_lock(hdev);
2072 2581
2073 if (cp->val) { 2582 if (cp->val) {
2074 type = PAGE_SCAN_TYPE_INTERLACED; 2583 type = PAGE_SCAN_TYPE_INTERLACED;
2075 acp.interval = 0x0024; /* 22.5 msec page scan interval */ 2584
2585 /* 22.5 msec page scan interval */
2586 acp.interval = __constant_cpu_to_le16(0x0024);
2076 } else { 2587 } else {
2077 type = PAGE_SCAN_TYPE_STANDARD; /* default */ 2588 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2078 acp.interval = 0x0800; /* default 1.28 sec page scan */ 2589
2590 /* default 1.28 sec page scan */
2591 acp.interval = __constant_cpu_to_le16(0x0800);
2079 } 2592 }
2080 2593
2081 acp.window = 0x0012; /* default 11.25 msec page scan window */ 2594 /* default 11.25 msec page scan window */
2595 acp.window = __constant_cpu_to_le16(0x0012);
2082 2596
2083 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 2597 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2084 sizeof(acp), &acp); 2598 &acp);
2085 if (err < 0) { 2599 if (err < 0) {
2086 err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2600 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2087 MGMT_STATUS_FAILED); 2601 MGMT_STATUS_FAILED);
2088 goto done; 2602 goto done;
2089 } 2603 }
2090 2604
2091 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); 2605 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2092 if (err < 0) { 2606 if (err < 0) {
2093 err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2607 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2094 MGMT_STATUS_FAILED); 2608 MGMT_STATUS_FAILED);
2095 goto done; 2609 goto done;
2096 } 2610 }
2097 2611
2098 err = cmd_complete(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2612 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2099 NULL, 0); 2613 NULL, 0);
2100done: 2614done:
2101 hci_dev_unlock(hdev); 2615 hci_dev_unlock(hdev);
2102 hci_dev_put(hdev);
2103
2104 return err; 2616 return err;
2105} 2617}
2106 2618
2619static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2620 void *cp_data, u16 len)
2621{
2622 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2623 u16 key_count, expected_len;
2624 int i;
2625
2626 key_count = __le16_to_cpu(cp->key_count);
2627
2628 expected_len = sizeof(*cp) + key_count *
2629 sizeof(struct mgmt_ltk_info);
2630 if (expected_len != len) {
2631 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2632 len, expected_len);
2633 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2634 EINVAL);
2635 }
2636
2637 BT_DBG("%s key_count %u", hdev->name, key_count);
2638
2639 hci_dev_lock(hdev);
2640
2641 hci_smp_ltks_clear(hdev);
2642
2643 for (i = 0; i < key_count; i++) {
2644 struct mgmt_ltk_info *key = &cp->keys[i];
2645 u8 type;
2646
2647 if (key->master)
2648 type = HCI_SMP_LTK;
2649 else
2650 type = HCI_SMP_LTK_SLAVE;
2651
2652 hci_add_ltk(hdev, &key->addr.bdaddr,
2653 bdaddr_to_le(key->addr.type),
2654 type, 0, key->authenticated, key->val,
2655 key->enc_size, key->ediv, key->rand);
2656 }
2657
2658 hci_dev_unlock(hdev);
2659
2660 return 0;
2661}
2662
2663static const struct mgmt_handler {
2664 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2665 u16 data_len);
2666 bool var_len;
2667 size_t data_len;
2668} mgmt_handlers[] = {
2669 { NULL }, /* 0x0000 (no command) */
2670 { read_version, false, MGMT_READ_VERSION_SIZE },
2671 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
2672 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
2673 { read_controller_info, false, MGMT_READ_INFO_SIZE },
2674 { set_powered, false, MGMT_SETTING_SIZE },
2675 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
2676 { set_connectable, false, MGMT_SETTING_SIZE },
2677 { set_fast_connectable, false, MGMT_SETTING_SIZE },
2678 { set_pairable, false, MGMT_SETTING_SIZE },
2679 { set_link_security, false, MGMT_SETTING_SIZE },
2680 { set_ssp, false, MGMT_SETTING_SIZE },
2681 { set_hs, false, MGMT_SETTING_SIZE },
2682 { set_le, false, MGMT_SETTING_SIZE },
2683 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
2684 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
2685 { add_uuid, false, MGMT_ADD_UUID_SIZE },
2686 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
2687 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
2688 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
2689 { disconnect, false, MGMT_DISCONNECT_SIZE },
2690 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
2691 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
2692 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
2693 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
2694 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
2695 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
2696 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
2697 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
2698 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
2699 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
2700 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
2701 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
2702 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
2703 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
2704 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
2705 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
2706 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
2707 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
2708 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
2709 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
2710};
2711
2712
2107int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 2713int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2108{ 2714{
2109 unsigned char *buf; 2715 void *buf;
2716 u8 *cp;
2110 struct mgmt_hdr *hdr; 2717 struct mgmt_hdr *hdr;
2111 u16 opcode, index, len; 2718 u16 opcode, index, len;
2719 struct hci_dev *hdev = NULL;
2720 const struct mgmt_handler *handler;
2112 int err; 2721 int err;
2113 2722
2114 BT_DBG("got %zu bytes", msglen); 2723 BT_DBG("got %zu bytes", msglen);
@@ -2125,127 +2734,64 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2125 goto done; 2734 goto done;
2126 } 2735 }
2127 2736
2128 hdr = (struct mgmt_hdr *) buf; 2737 hdr = buf;
2129 opcode = get_unaligned_le16(&hdr->opcode); 2738 opcode = __le16_to_cpu(hdr->opcode);
2130 index = get_unaligned_le16(&hdr->index); 2739 index = __le16_to_cpu(hdr->index);
2131 len = get_unaligned_le16(&hdr->len); 2740 len = __le16_to_cpu(hdr->len);
2132 2741
2133 if (len != msglen - sizeof(*hdr)) { 2742 if (len != msglen - sizeof(*hdr)) {
2134 err = -EINVAL; 2743 err = -EINVAL;
2135 goto done; 2744 goto done;
2136 } 2745 }
2137 2746
2138 switch (opcode) { 2747 if (index != MGMT_INDEX_NONE) {
2139 case MGMT_OP_READ_VERSION: 2748 hdev = hci_dev_get(index);
2140 err = read_version(sk); 2749 if (!hdev) {
2141 break; 2750 err = cmd_status(sk, index, opcode,
2142 case MGMT_OP_READ_INDEX_LIST: 2751 MGMT_STATUS_INVALID_INDEX);
2143 err = read_index_list(sk); 2752 goto done;
2144 break; 2753 }
2145 case MGMT_OP_READ_INFO: 2754 }
2146 err = read_controller_info(sk, index); 2755
2147 break; 2756 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2148 case MGMT_OP_SET_POWERED: 2757 mgmt_handlers[opcode].func == NULL) {
2149 err = set_powered(sk, index, buf + sizeof(*hdr), len);
2150 break;
2151 case MGMT_OP_SET_DISCOVERABLE:
2152 err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
2153 break;
2154 case MGMT_OP_SET_CONNECTABLE:
2155 err = set_connectable(sk, index, buf + sizeof(*hdr), len);
2156 break;
2157 case MGMT_OP_SET_FAST_CONNECTABLE:
2158 err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
2159 len);
2160 break;
2161 case MGMT_OP_SET_PAIRABLE:
2162 err = set_pairable(sk, index, buf + sizeof(*hdr), len);
2163 break;
2164 case MGMT_OP_ADD_UUID:
2165 err = add_uuid(sk, index, buf + sizeof(*hdr), len);
2166 break;
2167 case MGMT_OP_REMOVE_UUID:
2168 err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
2169 break;
2170 case MGMT_OP_SET_DEV_CLASS:
2171 err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
2172 break;
2173 case MGMT_OP_LOAD_LINK_KEYS:
2174 err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
2175 break;
2176 case MGMT_OP_REMOVE_KEYS:
2177 err = remove_keys(sk, index, buf + sizeof(*hdr), len);
2178 break;
2179 case MGMT_OP_DISCONNECT:
2180 err = disconnect(sk, index, buf + sizeof(*hdr), len);
2181 break;
2182 case MGMT_OP_GET_CONNECTIONS:
2183 err = get_connections(sk, index);
2184 break;
2185 case MGMT_OP_PIN_CODE_REPLY:
2186 err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
2187 break;
2188 case MGMT_OP_PIN_CODE_NEG_REPLY:
2189 err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
2190 break;
2191 case MGMT_OP_SET_IO_CAPABILITY:
2192 err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
2193 break;
2194 case MGMT_OP_PAIR_DEVICE:
2195 err = pair_device(sk, index, buf + sizeof(*hdr), len);
2196 break;
2197 case MGMT_OP_USER_CONFIRM_REPLY:
2198 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len);
2199 break;
2200 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
2201 err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr),
2202 len);
2203 break;
2204 case MGMT_OP_USER_PASSKEY_REPLY:
2205 err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len);
2206 break;
2207 case MGMT_OP_USER_PASSKEY_NEG_REPLY:
2208 err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr),
2209 len);
2210 break;
2211 case MGMT_OP_SET_LOCAL_NAME:
2212 err = set_local_name(sk, index, buf + sizeof(*hdr), len);
2213 break;
2214 case MGMT_OP_READ_LOCAL_OOB_DATA:
2215 err = read_local_oob_data(sk, index);
2216 break;
2217 case MGMT_OP_ADD_REMOTE_OOB_DATA:
2218 err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
2219 break;
2220 case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
2221 err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
2222 len);
2223 break;
2224 case MGMT_OP_START_DISCOVERY:
2225 err = start_discovery(sk, index, buf + sizeof(*hdr), len);
2226 break;
2227 case MGMT_OP_STOP_DISCOVERY:
2228 err = stop_discovery(sk, index);
2229 break;
2230 case MGMT_OP_BLOCK_DEVICE:
2231 err = block_device(sk, index, buf + sizeof(*hdr), len);
2232 break;
2233 case MGMT_OP_UNBLOCK_DEVICE:
2234 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
2235 break;
2236 default:
2237 BT_DBG("Unknown op %u", opcode); 2758 BT_DBG("Unknown op %u", opcode);
2238 err = cmd_status(sk, index, opcode, 2759 err = cmd_status(sk, index, opcode,
2239 MGMT_STATUS_UNKNOWN_COMMAND); 2760 MGMT_STATUS_UNKNOWN_COMMAND);
2240 break; 2761 goto done;
2762 }
2763
2764 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2765 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2766 err = cmd_status(sk, index, opcode,
2767 MGMT_STATUS_INVALID_INDEX);
2768 goto done;
2769 }
2770
2771 handler = &mgmt_handlers[opcode];
2772
2773 if ((handler->var_len && len < handler->data_len) ||
2774 (!handler->var_len && len != handler->data_len)) {
2775 err = cmd_status(sk, index, opcode,
2776 MGMT_STATUS_INVALID_PARAMS);
2777 goto done;
2241 } 2778 }
2242 2779
2780 if (hdev)
2781 mgmt_init_hdev(sk, hdev);
2782
2783 cp = buf + sizeof(*hdr);
2784
2785 err = handler->func(sk, hdev, cp, len);
2243 if (err < 0) 2786 if (err < 0)
2244 goto done; 2787 goto done;
2245 2788
2246 err = msglen; 2789 err = msglen;
2247 2790
2248done: 2791done:
2792 if (hdev)
2793 hci_dev_put(hdev);
2794
2249 kfree(buf); 2795 kfree(buf);
2250 return err; 2796 return err;
2251} 2797}
@@ -2265,7 +2811,7 @@ int mgmt_index_added(struct hci_dev *hdev)
2265 2811
2266int mgmt_index_removed(struct hci_dev *hdev) 2812int mgmt_index_removed(struct hci_dev *hdev)
2267{ 2813{
2268 u8 status = ENODEV; 2814 u8 status = MGMT_STATUS_INVALID_INDEX;
2269 2815
2270 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 2816 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2271 2817
@@ -2273,9 +2819,9 @@ int mgmt_index_removed(struct hci_dev *hdev)
2273} 2819}
2274 2820
2275struct cmd_lookup { 2821struct cmd_lookup {
2276 u8 val;
2277 struct sock *sk; 2822 struct sock *sk;
2278 struct hci_dev *hdev; 2823 struct hci_dev *hdev;
2824 u8 mgmt_status;
2279}; 2825};
2280 2826
2281static void settings_rsp(struct pending_cmd *cmd, void *data) 2827static void settings_rsp(struct pending_cmd *cmd, void *data)
@@ -2296,63 +2842,91 @@ static void settings_rsp(struct pending_cmd *cmd, void *data)
2296 2842
2297int mgmt_powered(struct hci_dev *hdev, u8 powered) 2843int mgmt_powered(struct hci_dev *hdev, u8 powered)
2298{ 2844{
2299 struct cmd_lookup match = { powered, NULL, hdev }; 2845 struct cmd_lookup match = { NULL, hdev };
2300 __le32 ev; 2846 int err;
2301 int ret; 2847
2848 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2849 return 0;
2302 2850
2303 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 2851 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
2304 2852
2305 if (!powered) { 2853 if (powered) {
2306 u8 status = ENETDOWN; 2854 u8 scan = 0;
2855
2856 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2857 scan |= SCAN_PAGE;
2858 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2859 scan |= SCAN_INQUIRY;
2860
2861 if (scan)
2862 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2863
2864 update_class(hdev);
2865 update_name(hdev, hdev->dev_name);
2866 update_eir(hdev);
2867 } else {
2868 u8 status = MGMT_STATUS_NOT_POWERED;
2307 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 2869 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2308 } 2870 }
2309 2871
2310 ev = cpu_to_le32(get_current_settings(hdev)); 2872 err = new_settings(hdev, match.sk);
2311
2312 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
2313 match.sk);
2314 2873
2315 if (match.sk) 2874 if (match.sk)
2316 sock_put(match.sk); 2875 sock_put(match.sk);
2317 2876
2318 return ret; 2877 return err;
2319} 2878}
2320 2879
2321int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) 2880int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
2322{ 2881{
2323 struct cmd_lookup match = { discoverable, NULL, hdev }; 2882 struct cmd_lookup match = { NULL, hdev };
2324 __le32 ev; 2883 bool changed = false;
2325 int ret; 2884 int err = 0;
2885
2886 if (discoverable) {
2887 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2888 changed = true;
2889 } else {
2890 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2891 changed = true;
2892 }
2326 2893
2327 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match); 2894 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
2895 &match);
2328 2896
2329 ev = cpu_to_le32(get_current_settings(hdev)); 2897 if (changed)
2898 err = new_settings(hdev, match.sk);
2330 2899
2331 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
2332 match.sk);
2333 if (match.sk) 2900 if (match.sk)
2334 sock_put(match.sk); 2901 sock_put(match.sk);
2335 2902
2336 return ret; 2903 return err;
2337} 2904}
2338 2905
2339int mgmt_connectable(struct hci_dev *hdev, u8 connectable) 2906int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
2340{ 2907{
2341 __le32 ev; 2908 struct cmd_lookup match = { NULL, hdev };
2342 struct cmd_lookup match = { connectable, NULL, hdev }; 2909 bool changed = false;
2343 int ret; 2910 int err = 0;
2344 2911
2345 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp, 2912 if (connectable) {
2346 &match); 2913 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2914 changed = true;
2915 } else {
2916 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2917 changed = true;
2918 }
2347 2919
2348 ev = cpu_to_le32(get_current_settings(hdev)); 2920 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
2921 &match);
2349 2922
2350 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk); 2923 if (changed)
2924 err = new_settings(hdev, match.sk);
2351 2925
2352 if (match.sk) 2926 if (match.sk)
2353 sock_put(match.sk); 2927 sock_put(match.sk);
2354 2928
2355 return ret; 2929 return err;
2356} 2930}
2357 2931
2358int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) 2932int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
@@ -2361,24 +2935,25 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2361 2935
2362 if (scan & SCAN_PAGE) 2936 if (scan & SCAN_PAGE)
2363 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, 2937 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
2364 cmd_status_rsp, &mgmt_err); 2938 cmd_status_rsp, &mgmt_err);
2365 2939
2366 if (scan & SCAN_INQUIRY) 2940 if (scan & SCAN_INQUIRY)
2367 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, 2941 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
2368 cmd_status_rsp, &mgmt_err); 2942 cmd_status_rsp, &mgmt_err);
2369 2943
2370 return 0; 2944 return 0;
2371} 2945}
2372 2946
2373int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 2947int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2374 u8 persistent) 2948 bool persistent)
2375{ 2949{
2376 struct mgmt_ev_new_link_key ev; 2950 struct mgmt_ev_new_link_key ev;
2377 2951
2378 memset(&ev, 0, sizeof(ev)); 2952 memset(&ev, 0, sizeof(ev));
2379 2953
2380 ev.store_hint = persistent; 2954 ev.store_hint = persistent;
2381 bacpy(&ev.key.bdaddr, &key->bdaddr); 2955 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2956 ev.key.addr.type = BDADDR_BREDR;
2382 ev.key.type = key->type; 2957 ev.key.type = key->type;
2383 memcpy(ev.key.val, key->val, 16); 2958 memcpy(ev.key.val, key->val, 16);
2384 ev.key.pin_len = key->pin_len; 2959 ev.key.pin_len = key->pin_len;
@@ -2386,15 +2961,54 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2386 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 2961 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
2387} 2962}
2388 2963
2389int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 2964int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
2390 u8 addr_type)
2391{ 2965{
2392 struct mgmt_addr_info ev; 2966 struct mgmt_ev_new_long_term_key ev;
2393 2967
2394 bacpy(&ev.bdaddr, bdaddr); 2968 memset(&ev, 0, sizeof(ev));
2395 ev.type = link_to_mgmt(link_type, addr_type); 2969
2970 ev.store_hint = persistent;
2971 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2972 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
2973 ev.key.authenticated = key->authenticated;
2974 ev.key.enc_size = key->enc_size;
2975 ev.key.ediv = key->ediv;
2976
2977 if (key->type == HCI_SMP_LTK)
2978 ev.key.master = 1;
2979
2980 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
2981 memcpy(ev.key.val, key->val, sizeof(key->val));
2982
2983 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
2984 NULL);
2985}
2986
2987int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2988 u8 addr_type, u32 flags, u8 *name, u8 name_len,
2989 u8 *dev_class)
2990{
2991 char buf[512];
2992 struct mgmt_ev_device_connected *ev = (void *) buf;
2993 u16 eir_len = 0;
2994
2995 bacpy(&ev->addr.bdaddr, bdaddr);
2996 ev->addr.type = link_to_bdaddr(link_type, addr_type);
2997
2998 ev->flags = __cpu_to_le32(flags);
2999
3000 if (name_len > 0)
3001 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3002 name, name_len);
3003
3004 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3005 eir_len = eir_append_data(ev->eir, eir_len,
3006 EIR_CLASS_OF_DEV, dev_class, 3);
3007
3008 ev->eir_len = cpu_to_le16(eir_len);
2396 3009
2397 return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL); 3010 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3011 sizeof(*ev) + eir_len, NULL);
2398} 3012}
2399 3013
2400static void disconnect_rsp(struct pending_cmd *cmd, void *data) 3014static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2403,10 +3017,11 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
2403 struct sock **sk = data; 3017 struct sock **sk = data;
2404 struct mgmt_rp_disconnect rp; 3018 struct mgmt_rp_disconnect rp;
2405 3019
2406 bacpy(&rp.bdaddr, &cp->bdaddr); 3020 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2407 rp.status = 0; 3021 rp.addr.type = cp->addr.type;
2408 3022
2409 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp)); 3023 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3024 sizeof(rp));
2410 3025
2411 *sk = cmd->sk; 3026 *sk = cmd->sk;
2412 sock_hold(*sk); 3027 sock_hold(*sk);
@@ -2414,25 +3029,25 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
2414 mgmt_pending_remove(cmd); 3029 mgmt_pending_remove(cmd);
2415} 3030}
2416 3031
2417static void remove_keys_rsp(struct pending_cmd *cmd, void *data) 3032static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
2418{ 3033{
2419 u8 *status = data; 3034 struct hci_dev *hdev = data;
2420 struct mgmt_cp_remove_keys *cp = cmd->param; 3035 struct mgmt_cp_unpair_device *cp = cmd->param;
2421 struct mgmt_rp_remove_keys rp; 3036 struct mgmt_rp_unpair_device rp;
2422 3037
2423 memset(&rp, 0, sizeof(rp)); 3038 memset(&rp, 0, sizeof(rp));
2424 bacpy(&rp.bdaddr, &cp->bdaddr); 3039 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2425 if (status != NULL) 3040 rp.addr.type = cp->addr.type;
2426 rp.status = *status; 3041
3042 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2427 3043
2428 cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp, 3044 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
2429 sizeof(rp));
2430 3045
2431 mgmt_pending_remove(cmd); 3046 mgmt_pending_remove(cmd);
2432} 3047}
2433 3048
2434int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3049int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
2435 u8 addr_type) 3050 u8 link_type, u8 addr_type)
2436{ 3051{
2437 struct mgmt_addr_info ev; 3052 struct mgmt_addr_info ev;
2438 struct sock *sk = NULL; 3053 struct sock *sk = NULL;
@@ -2441,52 +3056,51 @@ int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2441 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); 3056 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
2442 3057
2443 bacpy(&ev.bdaddr, bdaddr); 3058 bacpy(&ev.bdaddr, bdaddr);
2444 ev.type = link_to_mgmt(link_type, addr_type); 3059 ev.type = link_to_bdaddr(link_type, addr_type);
2445 3060
2446 err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk); 3061 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3062 sk);
2447 3063
2448 if (sk) 3064 if (sk)
2449 sock_put(sk); 3065 sock_put(sk);
2450 3066
2451 mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL); 3067 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3068 hdev);
2452 3069
2453 return err; 3070 return err;
2454} 3071}
2455 3072
2456int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) 3073int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3074 u8 link_type, u8 addr_type, u8 status)
2457{ 3075{
3076 struct mgmt_rp_disconnect rp;
2458 struct pending_cmd *cmd; 3077 struct pending_cmd *cmd;
2459 u8 mgmt_err = mgmt_status(status);
2460 int err; 3078 int err;
2461 3079
2462 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); 3080 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
2463 if (!cmd) 3081 if (!cmd)
2464 return -ENOENT; 3082 return -ENOENT;
2465 3083
2466 if (bdaddr) { 3084 bacpy(&rp.addr.bdaddr, bdaddr);
2467 struct mgmt_rp_disconnect rp; 3085 rp.addr.type = link_to_bdaddr(link_type, addr_type);
2468
2469 bacpy(&rp.bdaddr, bdaddr);
2470 rp.status = status;
2471 3086
2472 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 3087 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
2473 &rp, sizeof(rp)); 3088 mgmt_status(status), &rp, sizeof(rp));
2474 } else
2475 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT,
2476 mgmt_err);
2477 3089
2478 mgmt_pending_remove(cmd); 3090 mgmt_pending_remove(cmd);
2479 3091
3092 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3093 hdev);
2480 return err; 3094 return err;
2481} 3095}
2482 3096
2483int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3097int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2484 u8 addr_type, u8 status) 3098 u8 addr_type, u8 status)
2485{ 3099{
2486 struct mgmt_ev_connect_failed ev; 3100 struct mgmt_ev_connect_failed ev;
2487 3101
2488 bacpy(&ev.addr.bdaddr, bdaddr); 3102 bacpy(&ev.addr.bdaddr, bdaddr);
2489 ev.addr.type = link_to_mgmt(link_type, addr_type); 3103 ev.addr.type = link_to_bdaddr(link_type, addr_type);
2490 ev.status = mgmt_status(status); 3104 ev.status = mgmt_status(status);
2491 3105
2492 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); 3106 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -2496,15 +3110,16 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
2496{ 3110{
2497 struct mgmt_ev_pin_code_request ev; 3111 struct mgmt_ev_pin_code_request ev;
2498 3112
2499 bacpy(&ev.bdaddr, bdaddr); 3113 bacpy(&ev.addr.bdaddr, bdaddr);
3114 ev.addr.type = BDADDR_BREDR;
2500 ev.secure = secure; 3115 ev.secure = secure;
2501 3116
2502 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), 3117 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
2503 NULL); 3118 NULL);
2504} 3119}
2505 3120
2506int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3121int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2507 u8 status) 3122 u8 status)
2508{ 3123{
2509 struct pending_cmd *cmd; 3124 struct pending_cmd *cmd;
2510 struct mgmt_rp_pin_code_reply rp; 3125 struct mgmt_rp_pin_code_reply rp;
@@ -2514,11 +3129,11 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2514 if (!cmd) 3129 if (!cmd)
2515 return -ENOENT; 3130 return -ENOENT;
2516 3131
2517 bacpy(&rp.bdaddr, bdaddr); 3132 bacpy(&rp.addr.bdaddr, bdaddr);
2518 rp.status = mgmt_status(status); 3133 rp.addr.type = BDADDR_BREDR;
2519 3134
2520 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp, 3135 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2521 sizeof(rp)); 3136 mgmt_status(status), &rp, sizeof(rp));
2522 3137
2523 mgmt_pending_remove(cmd); 3138 mgmt_pending_remove(cmd);
2524 3139
@@ -2526,7 +3141,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2526} 3141}
2527 3142
2528int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3143int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2529 u8 status) 3144 u8 status)
2530{ 3145{
2531 struct pending_cmd *cmd; 3146 struct pending_cmd *cmd;
2532 struct mgmt_rp_pin_code_reply rp; 3147 struct mgmt_rp_pin_code_reply rp;
@@ -2536,11 +3151,11 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2536 if (!cmd) 3151 if (!cmd)
2537 return -ENOENT; 3152 return -ENOENT;
2538 3153
2539 bacpy(&rp.bdaddr, bdaddr); 3154 bacpy(&rp.addr.bdaddr, bdaddr);
2540 rp.status = mgmt_status(status); 3155 rp.addr.type = BDADDR_BREDR;
2541 3156
2542 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, 3157 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
2543 sizeof(rp)); 3158 mgmt_status(status), &rp, sizeof(rp));
2544 3159
2545 mgmt_pending_remove(cmd); 3160 mgmt_pending_remove(cmd);
2546 3161
@@ -2548,34 +3163,39 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2548} 3163}
2549 3164
2550int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3165int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
2551 __le32 value, u8 confirm_hint) 3166 u8 link_type, u8 addr_type, __le32 value,
3167 u8 confirm_hint)
2552{ 3168{
2553 struct mgmt_ev_user_confirm_request ev; 3169 struct mgmt_ev_user_confirm_request ev;
2554 3170
2555 BT_DBG("%s", hdev->name); 3171 BT_DBG("%s", hdev->name);
2556 3172
2557 bacpy(&ev.bdaddr, bdaddr); 3173 bacpy(&ev.addr.bdaddr, bdaddr);
3174 ev.addr.type = link_to_bdaddr(link_type, addr_type);
2558 ev.confirm_hint = confirm_hint; 3175 ev.confirm_hint = confirm_hint;
2559 put_unaligned_le32(value, &ev.value); 3176 ev.value = value;
2560 3177
2561 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), 3178 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
2562 NULL); 3179 NULL);
2563} 3180}
2564 3181
2565int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr) 3182int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 u8 link_type, u8 addr_type)
2566{ 3184{
2567 struct mgmt_ev_user_passkey_request ev; 3185 struct mgmt_ev_user_passkey_request ev;
2568 3186
2569 BT_DBG("%s", hdev->name); 3187 BT_DBG("%s", hdev->name);
2570 3188
2571 bacpy(&ev.bdaddr, bdaddr); 3189 bacpy(&ev.addr.bdaddr, bdaddr);
3190 ev.addr.type = link_to_bdaddr(link_type, addr_type);
2572 3191
2573 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), 3192 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
2574 NULL); 3193 NULL);
2575} 3194}
2576 3195
2577static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3196static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2578 u8 status, u8 opcode) 3197 u8 link_type, u8 addr_type, u8 status,
3198 u8 opcode)
2579{ 3199{
2580 struct pending_cmd *cmd; 3200 struct pending_cmd *cmd;
2581 struct mgmt_rp_user_confirm_reply rp; 3201 struct mgmt_rp_user_confirm_reply rp;
@@ -2585,9 +3205,10 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2585 if (!cmd) 3205 if (!cmd)
2586 return -ENOENT; 3206 return -ENOENT;
2587 3207
2588 bacpy(&rp.bdaddr, bdaddr); 3208 bacpy(&rp.addr.bdaddr, bdaddr);
2589 rp.status = mgmt_status(status); 3209 rp.addr.type = link_to_bdaddr(link_type, addr_type);
2590 err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp)); 3210 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3211 &rp, sizeof(rp));
2591 3212
2592 mgmt_pending_remove(cmd); 3213 mgmt_pending_remove(cmd);
2593 3214
@@ -2595,72 +3216,215 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2595} 3216}
2596 3217
2597int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3218int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2598 u8 status) 3219 u8 link_type, u8 addr_type, u8 status)
2599{ 3220{
2600 return user_pairing_resp_complete(hdev, bdaddr, status, 3221 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
2601 MGMT_OP_USER_CONFIRM_REPLY); 3222 status, MGMT_OP_USER_CONFIRM_REPLY);
2602} 3223}
2603 3224
2604int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, 3225int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2605 bdaddr_t *bdaddr, u8 status) 3226 u8 link_type, u8 addr_type, u8 status)
2606{ 3227{
2607 return user_pairing_resp_complete(hdev, bdaddr, status, 3228 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
2608 MGMT_OP_USER_CONFIRM_NEG_REPLY); 3229 status, MGMT_OP_USER_CONFIRM_NEG_REPLY);
2609} 3230}
2610 3231
2611int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3232int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2612 u8 status) 3233 u8 link_type, u8 addr_type, u8 status)
2613{ 3234{
2614 return user_pairing_resp_complete(hdev, bdaddr, status, 3235 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
2615 MGMT_OP_USER_PASSKEY_REPLY); 3236 status, MGMT_OP_USER_PASSKEY_REPLY);
2616} 3237}
2617 3238
2618int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, 3239int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2619 bdaddr_t *bdaddr, u8 status) 3240 u8 link_type, u8 addr_type, u8 status)
2620{ 3241{
2621 return user_pairing_resp_complete(hdev, bdaddr, status, 3242 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
2622 MGMT_OP_USER_PASSKEY_NEG_REPLY); 3243 status, MGMT_OP_USER_PASSKEY_NEG_REPLY);
2623} 3244}
2624 3245
2625int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) 3246int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3247 u8 addr_type, u8 status)
2626{ 3248{
2627 struct mgmt_ev_auth_failed ev; 3249 struct mgmt_ev_auth_failed ev;
2628 3250
2629 bacpy(&ev.bdaddr, bdaddr); 3251 bacpy(&ev.addr.bdaddr, bdaddr);
3252 ev.addr.type = link_to_bdaddr(link_type, addr_type);
2630 ev.status = mgmt_status(status); 3253 ev.status = mgmt_status(status);
2631 3254
2632 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); 3255 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
2633} 3256}
2634 3257
3258int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3259{
3260 struct cmd_lookup match = { NULL, hdev };
3261 bool changed = false;
3262 int err = 0;
3263
3264 if (status) {
3265 u8 mgmt_err = mgmt_status(status);
3266 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3267 cmd_status_rsp, &mgmt_err);
3268 return 0;
3269 }
3270
3271 if (test_bit(HCI_AUTH, &hdev->flags)) {
3272 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3273 changed = true;
3274 } else {
3275 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3276 changed = true;
3277 }
3278
3279 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3280 &match);
3281
3282 if (changed)
3283 err = new_settings(hdev, match.sk);
3284
3285 if (match.sk)
3286 sock_put(match.sk);
3287
3288 return err;
3289}
3290
3291static int clear_eir(struct hci_dev *hdev)
3292{
3293 struct hci_cp_write_eir cp;
3294
3295 if (!(hdev->features[6] & LMP_EXT_INQ))
3296 return 0;
3297
3298 memset(hdev->eir, 0, sizeof(hdev->eir));
3299
3300 memset(&cp, 0, sizeof(cp));
3301
3302 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3303}
3304
3305int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3306{
3307 struct cmd_lookup match = { NULL, hdev };
3308 bool changed = false;
3309 int err = 0;
3310
3311 if (status) {
3312 u8 mgmt_err = mgmt_status(status);
3313
3314 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3315 &hdev->dev_flags))
3316 err = new_settings(hdev, NULL);
3317
3318 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3319 &mgmt_err);
3320
3321 return err;
3322 }
3323
3324 if (enable) {
3325 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3326 changed = true;
3327 } else {
3328 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3329 changed = true;
3330 }
3331
3332 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3333
3334 if (changed)
3335 err = new_settings(hdev, match.sk);
3336
3337 if (match.sk)
3338 sock_put(match.sk);
3339
3340 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3341 update_eir(hdev);
3342 else
3343 clear_eir(hdev);
3344
3345 return err;
3346}
3347
3348static void class_rsp(struct pending_cmd *cmd, void *data)
3349{
3350 struct cmd_lookup *match = data;
3351
3352 cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
3353 match->hdev->dev_class, 3);
3354
3355 list_del(&cmd->list);
3356
3357 if (match->sk == NULL) {
3358 match->sk = cmd->sk;
3359 sock_hold(match->sk);
3360 }
3361
3362 mgmt_pending_free(cmd);
3363}
3364
3365int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3366 u8 status)
3367{
3368 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3369 int err = 0;
3370
3371 clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
3372
3373 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match);
3374 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
3375 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
3376
3377 if (!status)
3378 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3379 3, NULL);
3380
3381 if (match.sk)
3382 sock_put(match.sk);
3383
3384 return err;
3385}
3386
2635int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) 3387int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
2636{ 3388{
2637 struct pending_cmd *cmd; 3389 struct pending_cmd *cmd;
2638 struct mgmt_cp_set_local_name ev; 3390 struct mgmt_cp_set_local_name ev;
2639 int err; 3391 bool changed = false;
3392 int err = 0;
3393
3394 if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
3395 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3396 changed = true;
3397 }
2640 3398
2641 memset(&ev, 0, sizeof(ev)); 3399 memset(&ev, 0, sizeof(ev));
2642 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 3400 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3401 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
2643 3402
2644 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 3403 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2645 if (!cmd) 3404 if (!cmd)
2646 goto send_event; 3405 goto send_event;
2647 3406
3407 /* Always assume that either the short or the complete name has
3408 * changed if there was a pending mgmt command */
3409 changed = true;
3410
2648 if (status) { 3411 if (status) {
2649 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 3412 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2650 mgmt_status(status)); 3413 mgmt_status(status));
2651 goto failed; 3414 goto failed;
2652 } 3415 }
2653 3416
2654 update_eir(hdev); 3417 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
2655 3418 sizeof(ev));
2656 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
2657 sizeof(ev));
2658 if (err < 0) 3419 if (err < 0)
2659 goto failed; 3420 goto failed;
2660 3421
2661send_event: 3422send_event:
2662 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), 3423 if (changed)
2663 cmd ? cmd->sk : NULL); 3424 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3425 sizeof(ev), cmd ? cmd->sk : NULL);
3426
3427 update_eir(hdev);
2664 3428
2665failed: 3429failed:
2666 if (cmd) 3430 if (cmd)
@@ -2669,7 +3433,7 @@ failed:
2669} 3433}
2670 3434
2671int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, 3435int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2672 u8 *randomizer, u8 status) 3436 u8 *randomizer, u8 status)
2673{ 3437{
2674 struct pending_cmd *cmd; 3438 struct pending_cmd *cmd;
2675 int err; 3439 int err;
@@ -2681,9 +3445,8 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2681 return -ENOENT; 3445 return -ENOENT;
2682 3446
2683 if (status) { 3447 if (status) {
2684 err = cmd_status(cmd->sk, hdev->id, 3448 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2685 MGMT_OP_READ_LOCAL_OOB_DATA, 3449 mgmt_status(status));
2686 mgmt_status(status));
2687 } else { 3450 } else {
2688 struct mgmt_rp_read_local_oob_data rp; 3451 struct mgmt_rp_read_local_oob_data rp;
2689 3452
@@ -2691,8 +3454,8 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2691 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); 3454 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
2692 3455
2693 err = cmd_complete(cmd->sk, hdev->id, 3456 err = cmd_complete(cmd->sk, hdev->id,
2694 MGMT_OP_READ_LOCAL_OOB_DATA, 3457 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
2695 &rp, sizeof(rp)); 3458 sizeof(rp));
2696 } 3459 }
2697 3460
2698 mgmt_pending_remove(cmd); 3461 mgmt_pending_remove(cmd);
@@ -2700,48 +3463,120 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2700 return err; 3463 return err;
2701} 3464}
2702 3465
3466int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3467{
3468 struct cmd_lookup match = { NULL, hdev };
3469 bool changed = false;
3470 int err = 0;
3471
3472 if (status) {
3473 u8 mgmt_err = mgmt_status(status);
3474
3475 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3476 &hdev->dev_flags))
3477 err = new_settings(hdev, NULL);
3478
3479 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3480 &mgmt_err);
3481
3482 return err;
3483 }
3484
3485 if (enable) {
3486 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3487 changed = true;
3488 } else {
3489 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3490 changed = true;
3491 }
3492
3493 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3494
3495 if (changed)
3496 err = new_settings(hdev, match.sk);
3497
3498 if (match.sk)
3499 sock_put(match.sk);
3500
3501 return err;
3502}
3503
2703int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3504int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2704 u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir) 3505 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3506 ssp, u8 *eir, u16 eir_len)
2705{ 3507{
2706 struct mgmt_ev_device_found ev; 3508 char buf[512];
3509 struct mgmt_ev_device_found *ev = (void *) buf;
3510 size_t ev_size;
2707 3511
2708 memset(&ev, 0, sizeof(ev)); 3512 /* Leave 5 bytes for a potential CoD field */
3513 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3514 return -EINVAL;
2709 3515
2710 bacpy(&ev.addr.bdaddr, bdaddr); 3516 memset(buf, 0, sizeof(buf));
2711 ev.addr.type = link_to_mgmt(link_type, addr_type); 3517
2712 ev.rssi = rssi; 3518 bacpy(&ev->addr.bdaddr, bdaddr);
3519 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3520 ev->rssi = rssi;
3521 if (cfm_name)
3522 ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
3523 if (!ssp)
3524 ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2713 3525
2714 if (eir) 3526 if (eir_len > 0)
2715 memcpy(ev.eir, eir, sizeof(ev.eir)); 3527 memcpy(ev->eir, eir, eir_len);
2716 3528
2717 if (dev_class) 3529 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
2718 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class)); 3530 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3531 dev_class, 3);
2719 3532
2720 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL); 3533 ev->eir_len = cpu_to_le16(eir_len);
3534
3535 ev_size = sizeof(*ev) + eir_len;
3536
3537 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
2721} 3538}
2722 3539
2723int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name) 3540int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3541 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
2724{ 3542{
2725 struct mgmt_ev_remote_name ev; 3543 struct mgmt_ev_device_found *ev;
3544 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3545 u16 eir_len;
2726 3546
2727 memset(&ev, 0, sizeof(ev)); 3547 ev = (struct mgmt_ev_device_found *) buf;
2728 3548
2729 bacpy(&ev.bdaddr, bdaddr); 3549 memset(buf, 0, sizeof(buf));
2730 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 3550
3551 bacpy(&ev->addr.bdaddr, bdaddr);
3552 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3553 ev->rssi = rssi;
3554
3555 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3556 name_len);
2731 3557
2732 return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL); 3558 ev->eir_len = cpu_to_le16(eir_len);
3559
3560 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3561 sizeof(*ev) + eir_len, NULL);
2733} 3562}
2734 3563
2735int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) 3564int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2736{ 3565{
2737 struct pending_cmd *cmd; 3566 struct pending_cmd *cmd;
3567 u8 type;
2738 int err; 3568 int err;
2739 3569
3570 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3571
2740 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); 3572 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2741 if (!cmd) 3573 if (!cmd)
2742 return -ENOENT; 3574 return -ENOENT;
2743 3575
2744 err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status)); 3576 type = hdev->discovery.type;
3577
3578 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3579 &type, sizeof(type));
2745 mgmt_pending_remove(cmd); 3580 mgmt_pending_remove(cmd);
2746 3581
2747 return err; 3582 return err;
@@ -2756,7 +3591,8 @@ int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2756 if (!cmd) 3591 if (!cmd)
2757 return -ENOENT; 3592 return -ENOENT;
2758 3593
2759 err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status)); 3594 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3595 &hdev->discovery.type, sizeof(hdev->discovery.type));
2760 mgmt_pending_remove(cmd); 3596 mgmt_pending_remove(cmd);
2761 3597
2762 return err; 3598 return err;
@@ -2764,44 +3600,58 @@ int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2764 3600
2765int mgmt_discovering(struct hci_dev *hdev, u8 discovering) 3601int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
2766{ 3602{
3603 struct mgmt_ev_discovering ev;
2767 struct pending_cmd *cmd; 3604 struct pending_cmd *cmd;
2768 3605
3606 BT_DBG("%s discovering %u", hdev->name, discovering);
3607
2769 if (discovering) 3608 if (discovering)
2770 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); 3609 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2771 else 3610 else
2772 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); 3611 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2773 3612
2774 if (cmd != NULL) { 3613 if (cmd != NULL) {
2775 cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0); 3614 u8 type = hdev->discovery.type;
3615
3616 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
3617 sizeof(type));
2776 mgmt_pending_remove(cmd); 3618 mgmt_pending_remove(cmd);
2777 } 3619 }
2778 3620
2779 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering, 3621 memset(&ev, 0, sizeof(ev));
2780 sizeof(discovering), NULL); 3622 ev.type = hdev->discovery.type;
3623 ev.discovering = discovering;
3624
3625 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
2781} 3626}
2782 3627
2783int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr) 3628int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2784{ 3629{
2785 struct pending_cmd *cmd; 3630 struct pending_cmd *cmd;
2786 struct mgmt_ev_device_blocked ev; 3631 struct mgmt_ev_device_blocked ev;
2787 3632
2788 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev); 3633 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
2789 3634
2790 bacpy(&ev.bdaddr, bdaddr); 3635 bacpy(&ev.addr.bdaddr, bdaddr);
3636 ev.addr.type = type;
2791 3637
2792 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev), 3638 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
2793 cmd ? cmd->sk : NULL); 3639 cmd ? cmd->sk : NULL);
2794} 3640}
2795 3641
2796int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr) 3642int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2797{ 3643{
2798 struct pending_cmd *cmd; 3644 struct pending_cmd *cmd;
2799 struct mgmt_ev_device_unblocked ev; 3645 struct mgmt_ev_device_unblocked ev;
2800 3646
2801 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev); 3647 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
2802 3648
2803 bacpy(&ev.bdaddr, bdaddr); 3649 bacpy(&ev.addr.bdaddr, bdaddr);
3650 ev.addr.type = type;
2804 3651
2805 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev), 3652 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
2806 cmd ? cmd->sk : NULL); 3653 cmd ? cmd->sk : NULL);
2807} 3654}
3655
3656module_param(enable_hs, bool, 0644);
3657MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 22169c3f1482..e8707debb864 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -45,7 +45,6 @@
45#include <linux/security.h> 45#include <linux/security.h>
46#include <net/sock.h> 46#include <net/sock.h>
47 47
48#include <asm/system.h>
49#include <linux/uaccess.h> 48#include <linux/uaccess.h>
50 49
51#include <net/bluetooth/bluetooth.h> 50#include <net/bluetooth/bluetooth.h>
@@ -261,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
261 260
262 if (parent) { 261 if (parent) {
263 sk->sk_type = parent->sk_type; 262 sk->sk_type = parent->sk_type;
264 pi->dlc->defer_setup = bt_sk(parent)->defer_setup; 263 pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
264 &bt_sk(parent)->flags);
265 265
266 pi->sec_level = rfcomm_pi(parent)->sec_level; 266 pi->sec_level = rfcomm_pi(parent)->sec_level;
267 pi->role_switch = rfcomm_pi(parent)->role_switch; 267 pi->role_switch = rfcomm_pi(parent)->role_switch;
@@ -732,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
732 break; 732 break;
733 } 733 }
734 734
735 bt_sk(sk)->defer_setup = opt; 735 if (opt)
736 set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
737 else
738 clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
739
736 break; 740 break;
737 741
738 default: 742 default:
@@ -850,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
850 break; 854 break;
851 } 855 }
852 856
853 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) 857 if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
858 (u32 __user *) optval))
854 err = -EFAULT; 859 err = -EFAULT;
855 860
856 break; 861 break;
@@ -973,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
973done: 978done:
974 bh_unlock_sock(parent); 979 bh_unlock_sock(parent);
975 980
976 if (bt_sk(parent)->defer_setup) 981 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
977 parent->sk_state_change(parent); 982 parent->sk_state_change(parent);
978 983
979 return result; 984 return result;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index a2d4f5122a6a..d1820ff14aee 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -48,13 +48,12 @@
48static struct tty_driver *rfcomm_tty_driver; 48static struct tty_driver *rfcomm_tty_driver;
49 49
50struct rfcomm_dev { 50struct rfcomm_dev {
51 struct tty_port port;
51 struct list_head list; 52 struct list_head list;
52 atomic_t refcnt;
53 53
54 char name[12]; 54 char name[12];
55 int id; 55 int id;
56 unsigned long flags; 56 unsigned long flags;
57 atomic_t opened;
58 int err; 57 int err;
59 58
60 bdaddr_t src; 59 bdaddr_t src;
@@ -64,9 +63,7 @@ struct rfcomm_dev {
64 uint modem_status; 63 uint modem_status;
65 64
66 struct rfcomm_dlc *dlc; 65 struct rfcomm_dlc *dlc;
67 struct tty_struct *tty;
68 wait_queue_head_t wait; 66 wait_queue_head_t wait;
69 struct work_struct wakeup_task;
70 67
71 struct device *tty_dev; 68 struct device *tty_dev;
72 69
@@ -82,11 +79,18 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
82static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); 79static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
83static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); 80static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
84 81
85static void rfcomm_tty_wakeup(struct work_struct *work);
86
87/* ---- Device functions ---- */ 82/* ---- Device functions ---- */
88static void rfcomm_dev_destruct(struct rfcomm_dev *dev) 83
84/*
85 * The reason this isn't actually a race, as you no doubt have a little voice
86 * screaming at you in your head, is that the refcount should never actually
87 * reach zero unless the device has already been taken off the list, in
88 * rfcomm_dev_del(). And if that's not true, we'll hit the BUG() in
89 * rfcomm_dev_destruct() anyway.
90 */
91static void rfcomm_dev_destruct(struct tty_port *port)
89{ 92{
93 struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
90 struct rfcomm_dlc *dlc = dev->dlc; 94 struct rfcomm_dlc *dlc = dev->dlc;
91 95
92 BT_DBG("dev %p dlc %p", dev, dlc); 96 BT_DBG("dev %p dlc %p", dev, dlc);
@@ -113,23 +117,9 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
113 module_put(THIS_MODULE); 117 module_put(THIS_MODULE);
114} 118}
115 119
116static inline void rfcomm_dev_hold(struct rfcomm_dev *dev) 120static const struct tty_port_operations rfcomm_port_ops = {
117{ 121 .destruct = rfcomm_dev_destruct,
118 atomic_inc(&dev->refcnt); 122};
119}
120
121static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
122{
123 /* The reason this isn't actually a race, as you no
124 doubt have a little voice screaming at you in your
125 head, is that the refcount should never actually
126 reach zero unless the device has already been taken
127 off the list, in rfcomm_dev_del(). And if that's not
128 true, we'll hit the BUG() in rfcomm_dev_destruct()
129 anyway. */
130 if (atomic_dec_and_test(&dev->refcnt))
131 rfcomm_dev_destruct(dev);
132}
133 123
134static struct rfcomm_dev *__rfcomm_dev_get(int id) 124static struct rfcomm_dev *__rfcomm_dev_get(int id)
135{ 125{
@@ -154,7 +144,7 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
154 if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) 144 if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
155 dev = NULL; 145 dev = NULL;
156 else 146 else
157 rfcomm_dev_hold(dev); 147 tty_port_get(&dev->port);
158 } 148 }
159 149
160 spin_unlock(&rfcomm_dev_lock); 150 spin_unlock(&rfcomm_dev_lock);
@@ -196,7 +186,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
196static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) 186static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
197{ 187{
198 struct rfcomm_dev *dev, *entry; 188 struct rfcomm_dev *dev, *entry;
199 struct list_head *head = &rfcomm_dev_list, *p; 189 struct list_head *head = &rfcomm_dev_list;
200 int err = 0; 190 int err = 0;
201 191
202 BT_DBG("id %d channel %d", req->dev_id, req->channel); 192 BT_DBG("id %d channel %d", req->dev_id, req->channel);
@@ -215,7 +205,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
215 break; 205 break;
216 206
217 dev->id++; 207 dev->id++;
218 head = p; 208 head = &entry->list;
219 } 209 }
220 } else { 210 } else {
221 dev->id = req->dev_id; 211 dev->id = req->dev_id;
@@ -229,7 +219,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
229 if (entry->id > dev->id - 1) 219 if (entry->id > dev->id - 1)
230 break; 220 break;
231 221
232 head = p; 222 head = &entry->list;
233 } 223 }
234 } 224 }
235 225
@@ -241,7 +231,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
241 sprintf(dev->name, "rfcomm%d", dev->id); 231 sprintf(dev->name, "rfcomm%d", dev->id);
242 232
243 list_add(&dev->list, head); 233 list_add(&dev->list, head);
244 atomic_set(&dev->refcnt, 1);
245 234
246 bacpy(&dev->src, &req->src); 235 bacpy(&dev->src, &req->src);
247 bacpy(&dev->dst, &req->dst); 236 bacpy(&dev->dst, &req->dst);
@@ -250,10 +239,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
250 dev->flags = req->flags & 239 dev->flags = req->flags &
251 ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC)); 240 ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC));
252 241
253 atomic_set(&dev->opened, 0); 242 tty_port_init(&dev->port);
254 243 dev->port.ops = &rfcomm_port_ops;
255 init_waitqueue_head(&dev->wait); 244 init_waitqueue_head(&dev->wait);
256 INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
257 245
258 skb_queue_head_init(&dev->pending); 246 skb_queue_head_init(&dev->pending);
259 247
@@ -320,18 +308,23 @@ free:
320 308
321static void rfcomm_dev_del(struct rfcomm_dev *dev) 309static void rfcomm_dev_del(struct rfcomm_dev *dev)
322{ 310{
311 unsigned long flags;
323 BT_DBG("dev %p", dev); 312 BT_DBG("dev %p", dev);
324 313
325 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); 314 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
326 315
327 if (atomic_read(&dev->opened) > 0) 316 spin_lock_irqsave(&dev->port.lock, flags);
317 if (dev->port.count > 0) {
318 spin_unlock_irqrestore(&dev->port.lock, flags);
328 return; 319 return;
320 }
321 spin_unlock_irqrestore(&dev->port.lock, flags);
329 322
330 spin_lock(&rfcomm_dev_lock); 323 spin_lock(&rfcomm_dev_lock);
331 list_del_init(&dev->list); 324 list_del_init(&dev->list);
332 spin_unlock(&rfcomm_dev_lock); 325 spin_unlock(&rfcomm_dev_lock);
333 326
334 rfcomm_dev_put(dev); 327 tty_port_put(&dev->port);
335} 328}
336 329
337/* ---- Send buffer ---- */ 330/* ---- Send buffer ---- */
@@ -345,15 +338,16 @@ static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
345static void rfcomm_wfree(struct sk_buff *skb) 338static void rfcomm_wfree(struct sk_buff *skb)
346{ 339{
347 struct rfcomm_dev *dev = (void *) skb->sk; 340 struct rfcomm_dev *dev = (void *) skb->sk;
341 struct tty_struct *tty = dev->port.tty;
348 atomic_sub(skb->truesize, &dev->wmem_alloc); 342 atomic_sub(skb->truesize, &dev->wmem_alloc);
349 if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) 343 if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags) && tty)
350 queue_work(system_nrt_wq, &dev->wakeup_task); 344 tty_wakeup(tty);
351 rfcomm_dev_put(dev); 345 tty_port_put(&dev->port);
352} 346}
353 347
354static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) 348static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
355{ 349{
356 rfcomm_dev_hold(dev); 350 tty_port_get(&dev->port);
357 atomic_add(skb->truesize, &dev->wmem_alloc); 351 atomic_add(skb->truesize, &dev->wmem_alloc);
358 skb->sk = (void *) dev; 352 skb->sk = (void *) dev;
359 skb->destructor = rfcomm_wfree; 353 skb->destructor = rfcomm_wfree;
@@ -432,7 +426,7 @@ static int rfcomm_release_dev(void __user *arg)
432 return -ENODEV; 426 return -ENODEV;
433 427
434 if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) { 428 if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) {
435 rfcomm_dev_put(dev); 429 tty_port_put(&dev->port);
436 return -EPERM; 430 return -EPERM;
437 } 431 }
438 432
@@ -440,12 +434,12 @@ static int rfcomm_release_dev(void __user *arg)
440 rfcomm_dlc_close(dev->dlc, 0); 434 rfcomm_dlc_close(dev->dlc, 0);
441 435
442 /* Shut down TTY synchronously before freeing rfcomm_dev */ 436 /* Shut down TTY synchronously before freeing rfcomm_dev */
443 if (dev->tty) 437 if (dev->port.tty)
444 tty_vhangup(dev->tty); 438 tty_vhangup(dev->port.tty);
445 439
446 if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) 440 if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
447 rfcomm_dev_del(dev); 441 rfcomm_dev_del(dev);
448 rfcomm_dev_put(dev); 442 tty_port_put(&dev->port);
449 return 0; 443 return 0;
450} 444}
451 445
@@ -523,7 +517,7 @@ static int rfcomm_get_dev_info(void __user *arg)
523 if (copy_to_user(arg, &di, sizeof(di))) 517 if (copy_to_user(arg, &di, sizeof(di)))
524 err = -EFAULT; 518 err = -EFAULT;
525 519
526 rfcomm_dev_put(dev); 520 tty_port_put(&dev->port);
527 return err; 521 return err;
528} 522}
529 523
@@ -559,7 +553,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
559 return; 553 return;
560 } 554 }
561 555
562 tty = dev->tty; 556 tty = dev->port.tty;
563 if (!tty || !skb_queue_empty(&dev->pending)) { 557 if (!tty || !skb_queue_empty(&dev->pending)) {
564 skb_queue_tail(&dev->pending, skb); 558 skb_queue_tail(&dev->pending, skb);
565 return; 559 return;
@@ -585,13 +579,13 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
585 wake_up_interruptible(&dev->wait); 579 wake_up_interruptible(&dev->wait);
586 580
587 if (dlc->state == BT_CLOSED) { 581 if (dlc->state == BT_CLOSED) {
588 if (!dev->tty) { 582 if (!dev->port.tty) {
589 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { 583 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
590 /* Drop DLC lock here to avoid deadlock 584 /* Drop DLC lock here to avoid deadlock
591 * 1. rfcomm_dev_get will take rfcomm_dev_lock 585 * 1. rfcomm_dev_get will take rfcomm_dev_lock
592 * but in rfcomm_dev_add there's lock order: 586 * but in rfcomm_dev_add there's lock order:
593 * rfcomm_dev_lock -> dlc lock 587 * rfcomm_dev_lock -> dlc lock
594 * 2. rfcomm_dev_put will deadlock if it's 588 * 2. tty_port_put will deadlock if it's
595 * the last reference 589 * the last reference
596 */ 590 */
597 rfcomm_dlc_unlock(dlc); 591 rfcomm_dlc_unlock(dlc);
@@ -601,11 +595,11 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
601 } 595 }
602 596
603 rfcomm_dev_del(dev); 597 rfcomm_dev_del(dev);
604 rfcomm_dev_put(dev); 598 tty_port_put(&dev->port);
605 rfcomm_dlc_lock(dlc); 599 rfcomm_dlc_lock(dlc);
606 } 600 }
607 } else 601 } else
608 tty_hangup(dev->tty); 602 tty_hangup(dev->port.tty);
609 } 603 }
610} 604}
611 605
@@ -618,8 +612,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
618 BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig); 612 BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
619 613
620 if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) { 614 if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
621 if (dev->tty && !C_CLOCAL(dev->tty)) 615 if (dev->port.tty && !C_CLOCAL(dev->port.tty))
622 tty_hangup(dev->tty); 616 tty_hangup(dev->port.tty);
623 } 617 }
624 618
625 dev->modem_status = 619 dev->modem_status =
@@ -630,21 +624,9 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
630} 624}
631 625
632/* ---- TTY functions ---- */ 626/* ---- TTY functions ---- */
633static void rfcomm_tty_wakeup(struct work_struct *work)
634{
635 struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
636 wakeup_task);
637 struct tty_struct *tty = dev->tty;
638 if (!tty)
639 return;
640
641 BT_DBG("dev %p tty %p", dev, tty);
642 tty_wakeup(tty);
643}
644
645static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) 627static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
646{ 628{
647 struct tty_struct *tty = dev->tty; 629 struct tty_struct *tty = dev->port.tty;
648 struct sk_buff *skb; 630 struct sk_buff *skb;
649 int inserted = 0; 631 int inserted = 0;
650 632
@@ -671,6 +653,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
671 DECLARE_WAITQUEUE(wait, current); 653 DECLARE_WAITQUEUE(wait, current);
672 struct rfcomm_dev *dev; 654 struct rfcomm_dev *dev;
673 struct rfcomm_dlc *dlc; 655 struct rfcomm_dlc *dlc;
656 unsigned long flags;
674 int err, id; 657 int err, id;
675 658
676 id = tty->index; 659 id = tty->index;
@@ -686,10 +669,14 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
686 return -ENODEV; 669 return -ENODEV;
687 670
688 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), 671 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
689 dev->channel, atomic_read(&dev->opened)); 672 dev->channel, dev->port.count);
690 673
691 if (atomic_inc_return(&dev->opened) > 1) 674 spin_lock_irqsave(&dev->port.lock, flags);
675 if (++dev->port.count > 1) {
676 spin_unlock_irqrestore(&dev->port.lock, flags);
692 return 0; 677 return 0;
678 }
679 spin_unlock_irqrestore(&dev->port.lock, flags);
693 680
694 dlc = dev->dlc; 681 dlc = dev->dlc;
695 682
@@ -697,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
697 684
698 rfcomm_dlc_lock(dlc); 685 rfcomm_dlc_lock(dlc);
699 tty->driver_data = dev; 686 tty->driver_data = dev;
700 dev->tty = tty; 687 dev->port.tty = tty;
701 rfcomm_dlc_unlock(dlc); 688 rfcomm_dlc_unlock(dlc);
702 set_bit(RFCOMM_TTY_ATTACHED, &dev->flags); 689 set_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
703 690
@@ -744,13 +731,17 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
744static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) 731static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
745{ 732{
746 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 733 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
734 unsigned long flags;
735
747 if (!dev) 736 if (!dev)
748 return; 737 return;
749 738
750 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, 739 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
751 atomic_read(&dev->opened)); 740 dev->port.count);
752 741
753 if (atomic_dec_and_test(&dev->opened)) { 742 spin_lock_irqsave(&dev->port.lock, flags);
743 if (!--dev->port.count) {
744 spin_unlock_irqrestore(&dev->port.lock, flags);
754 if (dev->tty_dev->parent) 745 if (dev->tty_dev->parent)
755 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); 746 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
756 747
@@ -758,11 +749,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
758 rfcomm_dlc_close(dev->dlc, 0); 749 rfcomm_dlc_close(dev->dlc, 0);
759 750
760 clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); 751 clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
761 cancel_work_sync(&dev->wakeup_task);
762 752
763 rfcomm_dlc_lock(dev->dlc); 753 rfcomm_dlc_lock(dev->dlc);
764 tty->driver_data = NULL; 754 tty->driver_data = NULL;
765 dev->tty = NULL; 755 dev->port.tty = NULL;
766 rfcomm_dlc_unlock(dev->dlc); 756 rfcomm_dlc_unlock(dev->dlc);
767 757
768 if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) { 758 if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
@@ -770,11 +760,12 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
770 list_del_init(&dev->list); 760 list_del_init(&dev->list);
771 spin_unlock(&rfcomm_dev_lock); 761 spin_unlock(&rfcomm_dev_lock);
772 762
773 rfcomm_dev_put(dev); 763 tty_port_put(&dev->port);
774 } 764 }
775 } 765 } else
766 spin_unlock_irqrestore(&dev->port.lock, flags);
776 767
777 rfcomm_dev_put(dev); 768 tty_port_put(&dev->port);
778} 769}
779 770
780static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) 771static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -1083,7 +1074,7 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
1083 if (rfcomm_dev_get(dev->id) == NULL) 1074 if (rfcomm_dev_get(dev->id) == NULL)
1084 return; 1075 return;
1085 rfcomm_dev_del(dev); 1076 rfcomm_dev_del(dev);
1086 rfcomm_dev_put(dev); 1077 tty_port_put(&dev->port);
1087 } 1078 }
1088} 1079}
1089 1080
@@ -1157,7 +1148,6 @@ int __init rfcomm_init_ttys(void)
1157 if (!rfcomm_tty_driver) 1148 if (!rfcomm_tty_driver)
1158 return -ENOMEM; 1149 return -ENOMEM;
1159 1150
1160 rfcomm_tty_driver->owner = THIS_MODULE;
1161 rfcomm_tty_driver->driver_name = "rfcomm"; 1151 rfcomm_tty_driver->driver_name = "rfcomm";
1162 rfcomm_tty_driver->name = "rfcomm"; 1152 rfcomm_tty_driver->name = "rfcomm";
1163 rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR; 1153 rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8bf26d1bc5c1..cbdd313659a7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -44,7 +44,6 @@
44#include <linux/security.h> 44#include <linux/security.h>
45#include <net/sock.h> 45#include <net/sock.h>
46 46
47#include <asm/system.h>
48#include <linux/uaccess.h> 47#include <linux/uaccess.h>
49 48
50#include <net/bluetooth/bluetooth.h> 49#include <net/bluetooth/bluetooth.h>
@@ -62,8 +61,6 @@ static struct bt_sock_list sco_sk_list = {
62static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent); 61static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
63static void sco_chan_del(struct sock *sk, int err); 62static void sco_chan_del(struct sock *sk, int err);
64 63
65static int sco_conn_del(struct hci_conn *conn, int err);
66
67static void sco_sock_close(struct sock *sk); 64static void sco_sock_close(struct sock *sk);
68static void sco_sock_kill(struct sock *sk); 65static void sco_sock_kill(struct sock *sk);
69 66
@@ -96,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk)
96} 93}
97 94
98/* ---- SCO connections ---- */ 95/* ---- SCO connections ---- */
99static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) 96static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
100{ 97{
101 struct hci_dev *hdev = hcon->hdev; 98 struct hci_dev *hdev = hcon->hdev;
102 struct sco_conn *conn = hcon->sco_data; 99 struct sco_conn *conn = hcon->sco_data;
103 100
104 if (conn || status) 101 if (conn)
105 return conn; 102 return conn;
106 103
107 conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC); 104 conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
@@ -196,13 +193,14 @@ static int sco_connect(struct sock *sk)
196 else 193 else
197 type = SCO_LINK; 194 type = SCO_LINK;
198 195
199 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); 196 hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
197 HCI_AT_NO_BONDING);
200 if (IS_ERR(hcon)) { 198 if (IS_ERR(hcon)) {
201 err = PTR_ERR(hcon); 199 err = PTR_ERR(hcon);
202 goto done; 200 goto done;
203 } 201 }
204 202
205 conn = sco_conn_add(hcon, 0); 203 conn = sco_conn_add(hcon);
206 if (!conn) { 204 if (!conn) {
207 hci_conn_put(hcon); 205 hci_conn_put(hcon);
208 err = -ENOMEM; 206 err = -ENOMEM;
@@ -234,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
234{ 232{
235 struct sco_conn *conn = sco_pi(sk)->conn; 233 struct sco_conn *conn = sco_pi(sk)->conn;
236 struct sk_buff *skb; 234 struct sk_buff *skb;
237 int err, count; 235 int err;
238 236
239 /* Check outgoing MTU */ 237 /* Check outgoing MTU */
240 if (len > conn->mtu) 238 if (len > conn->mtu)
@@ -242,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
242 240
243 BT_DBG("sk %p len %d", sk, len); 241 BT_DBG("sk %p len %d", sk, len);
244 242
245 count = min_t(unsigned int, conn->mtu, len); 243 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
246 skb = bt_skb_send_alloc(sk, count,
247 msg->msg_flags & MSG_DONTWAIT, &err);
248 if (!skb) 244 if (!skb)
249 return err; 245 return err;
250 246
251 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { 247 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
252 kfree_skb(skb); 248 kfree_skb(skb);
253 return -EFAULT; 249 return -EFAULT;
254 } 250 }
255 251
256 hci_send_sco(conn->hcon, skb); 252 hci_send_sco(conn->hcon, skb);
257 253
258 return count; 254 return len;
259} 255}
260 256
261static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 257static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -278,17 +274,20 @@ drop:
278} 274}
279 275
280/* -------- Socket interface ---------- */ 276/* -------- Socket interface ---------- */
281static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba) 277static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
282{ 278{
283 struct sock *sk;
284 struct hlist_node *node; 279 struct hlist_node *node;
280 struct sock *sk;
281
282 sk_for_each(sk, node, &sco_sk_list.head) {
283 if (sk->sk_state != BT_LISTEN)
284 continue;
285 285
286 sk_for_each(sk, node, &sco_sk_list.head)
287 if (!bacmp(&bt_sk(sk)->src, ba)) 286 if (!bacmp(&bt_sk(sk)->src, ba))
288 goto found; 287 return sk;
289 sk = NULL; 288 }
290found: 289
291 return sk; 290 return NULL;
292} 291}
293 292
294/* Find socket listening on source bdaddr. 293/* Find socket listening on source bdaddr.
@@ -467,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
467{ 466{
468 struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; 467 struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
469 struct sock *sk = sock->sk; 468 struct sock *sk = sock->sk;
470 bdaddr_t *src = &sa->sco_bdaddr;
471 int err = 0; 469 int err = 0;
472 470
473 BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); 471 BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
@@ -482,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
482 goto done; 480 goto done;
483 } 481 }
484 482
485 write_lock(&sco_sk_list.lock); 483 if (sk->sk_type != SOCK_SEQPACKET) {
486 484 err = -EINVAL;
487 if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) { 485 goto done;
488 err = -EADDRINUSE;
489 } else {
490 /* Save source address */
491 bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
492 sk->sk_state = BT_BOUND;
493 } 486 }
494 487
495 write_unlock(&sco_sk_list.lock); 488 bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
489
490 sk->sk_state = BT_BOUND;
496 491
497done: 492done:
498 release_sock(sk); 493 release_sock(sk);
@@ -538,21 +533,38 @@ done:
538static int sco_sock_listen(struct socket *sock, int backlog) 533static int sco_sock_listen(struct socket *sock, int backlog)
539{ 534{
540 struct sock *sk = sock->sk; 535 struct sock *sk = sock->sk;
536 bdaddr_t *src = &bt_sk(sk)->src;
541 int err = 0; 537 int err = 0;
542 538
543 BT_DBG("sk %p backlog %d", sk, backlog); 539 BT_DBG("sk %p backlog %d", sk, backlog);
544 540
545 lock_sock(sk); 541 lock_sock(sk);
546 542
547 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) { 543 if (sk->sk_state != BT_BOUND) {
548 err = -EBADFD; 544 err = -EBADFD;
549 goto done; 545 goto done;
550 } 546 }
551 547
548 if (sk->sk_type != SOCK_SEQPACKET) {
549 err = -EINVAL;
550 goto done;
551 }
552
553 write_lock(&sco_sk_list.lock);
554
555 if (__sco_get_sock_listen_by_addr(src)) {
556 err = -EADDRINUSE;
557 goto unlock;
558 }
559
552 sk->sk_max_ack_backlog = backlog; 560 sk->sk_max_ack_backlog = backlog;
553 sk->sk_ack_backlog = 0; 561 sk->sk_ack_backlog = 0;
562
554 sk->sk_state = BT_LISTEN; 563 sk->sk_state = BT_LISTEN;
555 564
565unlock:
566 write_unlock(&sco_sk_list.lock);
567
556done: 568done:
557 release_sock(sk); 569 release_sock(sk);
558 return err; 570 return err;
@@ -924,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
924 if (!status) { 936 if (!status) {
925 struct sco_conn *conn; 937 struct sco_conn *conn;
926 938
927 conn = sco_conn_add(hcon, status); 939 conn = sco_conn_add(hcon);
928 if (conn) 940 if (conn)
929 sco_conn_ready(conn); 941 sco_conn_ready(conn);
930 } else 942 } else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 32c47de30344..6fc7c4708f3e 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -29,7 +29,7 @@
29#include <linux/scatterlist.h> 29#include <linux/scatterlist.h>
30#include <crypto/b128ops.h> 30#include <crypto/b128ops.h>
31 31
32#define SMP_TIMEOUT 30000 /* 30 seconds */ 32#define SMP_TIMEOUT msecs_to_jiffies(30000)
33 33
34static inline void swap128(u8 src[16], u8 dst[16]) 34static inline void swap128(u8 src[16], u8 dst[16])
35{ 35{
@@ -186,8 +186,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
186 hci_send_acl(conn->hchan, skb, 0); 186 hci_send_acl(conn->hchan, skb, 0);
187 187
188 cancel_delayed_work_sync(&conn->security_timer); 188 cancel_delayed_work_sync(&conn->security_timer);
189 schedule_delayed_work(&conn->security_timer, 189 schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT);
190 msecs_to_jiffies(SMP_TIMEOUT));
191} 190}
192 191
193static __u8 authreq_to_seclevel(__u8 authreq) 192static __u8 authreq_to_seclevel(__u8 authreq)
@@ -217,7 +216,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
217{ 216{
218 u8 dist_keys = 0; 217 u8 dist_keys = 0;
219 218
220 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) { 219 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) {
221 dist_keys = SMP_DIST_ENC_KEY; 220 dist_keys = SMP_DIST_ENC_KEY;
222 authreq |= SMP_AUTH_BONDING; 221 authreq |= SMP_AUTH_BONDING;
223 } else { 222 } else {
@@ -250,21 +249,27 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
250 (max_key_size < SMP_MIN_ENC_KEY_SIZE)) 249 (max_key_size < SMP_MIN_ENC_KEY_SIZE))
251 return SMP_ENC_KEY_SIZE; 250 return SMP_ENC_KEY_SIZE;
252 251
253 smp->smp_key_size = max_key_size; 252 smp->enc_key_size = max_key_size;
254 253
255 return 0; 254 return 0;
256} 255}
257 256
258static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) 257static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
259{ 258{
259 struct hci_conn *hcon = conn->hcon;
260
260 if (send) 261 if (send)
261 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), 262 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
262 &reason); 263 &reason);
263 264
264 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend); 265 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
265 mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason); 266 mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
266 cancel_delayed_work_sync(&conn->security_timer); 267 hcon->dst_type, reason);
267 smp_chan_destroy(conn); 268
269 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
270 cancel_delayed_work_sync(&conn->security_timer);
271 smp_chan_destroy(conn);
272 }
268} 273}
269 274
270#define JUST_WORKS 0x00 275#define JUST_WORKS 0x00
@@ -305,7 +310,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
305 remote_io > SMP_IO_KEYBOARD_DISPLAY) 310 remote_io > SMP_IO_KEYBOARD_DISPLAY)
306 method = JUST_WORKS; 311 method = JUST_WORKS;
307 else 312 else
308 method = gen_method[local_io][remote_io]; 313 method = gen_method[remote_io][local_io];
309 314
310 /* If not bonding, don't ask user to confirm a Zero TK */ 315 /* If not bonding, don't ask user to confirm a Zero TK */
311 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) 316 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -346,9 +351,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
346 hci_dev_lock(hcon->hdev); 351 hci_dev_lock(hcon->hdev);
347 352
348 if (method == REQ_PASSKEY) 353 if (method == REQ_PASSKEY)
349 ret = mgmt_user_passkey_request(hcon->hdev, conn->dst); 354 ret = mgmt_user_passkey_request(hcon->hdev, conn->dst,
355 hcon->type, hcon->dst_type);
350 else 356 else
351 ret = mgmt_user_confirm_request(hcon->hdev, conn->dst, 357 ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
358 hcon->type, hcon->dst_type,
352 cpu_to_le32(passkey), 0); 359 cpu_to_le32(passkey), 0);
353 360
354 hci_dev_unlock(hcon->hdev); 361 hci_dev_unlock(hcon->hdev);
@@ -377,12 +384,11 @@ static void confirm_work(struct work_struct *work)
377 384
378 if (conn->hcon->out) 385 if (conn->hcon->out)
379 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0, 386 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
380 conn->src, conn->hcon->dst_type, conn->dst, 387 conn->src, conn->hcon->dst_type, conn->dst, res);
381 res);
382 else 388 else
383 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 389 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
384 conn->hcon->dst_type, conn->dst, 0, conn->src, 390 conn->hcon->dst_type, conn->dst, 0, conn->src,
385 res); 391 res);
386 if (ret) { 392 if (ret) {
387 reason = SMP_UNSPECIFIED; 393 reason = SMP_UNSPECIFIED;
388 goto error; 394 goto error;
@@ -417,12 +423,10 @@ static void random_work(struct work_struct *work)
417 423
418 if (hcon->out) 424 if (hcon->out)
419 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0, 425 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
420 conn->src, hcon->dst_type, conn->dst, 426 conn->src, hcon->dst_type, conn->dst, res);
421 res);
422 else 427 else
423 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 428 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
424 hcon->dst_type, conn->dst, 0, conn->src, 429 hcon->dst_type, conn->dst, 0, conn->src, res);
425 res);
426 if (ret) { 430 if (ret) {
427 reason = SMP_UNSPECIFIED; 431 reason = SMP_UNSPECIFIED;
428 goto error; 432 goto error;
@@ -446,16 +450,16 @@ static void random_work(struct work_struct *work)
446 smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key); 450 smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
447 swap128(key, stk); 451 swap128(key, stk);
448 452
449 memset(stk + smp->smp_key_size, 0, 453 memset(stk + smp->enc_key_size, 0,
450 SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size); 454 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
451 455
452 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) { 456 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) {
453 reason = SMP_UNSPECIFIED; 457 reason = SMP_UNSPECIFIED;
454 goto error; 458 goto error;
455 } 459 }
456 460
457 hci_le_start_enc(hcon, ediv, rand, stk); 461 hci_le_start_enc(hcon, ediv, rand, stk);
458 hcon->enc_key_size = smp->smp_key_size; 462 hcon->enc_key_size = smp->enc_key_size;
459 } else { 463 } else {
460 u8 stk[16], r[16], rand[8]; 464 u8 stk[16], r[16], rand[8];
461 __le16 ediv; 465 __le16 ediv;
@@ -469,11 +473,12 @@ static void random_work(struct work_struct *work)
469 smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key); 473 smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
470 swap128(key, stk); 474 swap128(key, stk);
471 475
472 memset(stk + smp->smp_key_size, 0, 476 memset(stk + smp->enc_key_size, 0,
473 SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size); 477 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
474 478
475 hci_add_ltk(hcon->hdev, 0, conn->dst, smp->smp_key_size, 479 hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type,
476 ediv, rand, stk); 480 HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
481 ediv, rand);
477 } 482 }
478 483
479 return; 484 return;
@@ -506,7 +511,7 @@ void smp_chan_destroy(struct l2cap_conn *conn)
506{ 511{
507 struct smp_chan *smp = conn->smp_chan; 512 struct smp_chan *smp = conn->smp_chan;
508 513
509 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); 514 BUG_ON(!smp);
510 515
511 if (smp->tfm) 516 if (smp->tfm)
512 crypto_free_blkcipher(smp->tfm); 517 crypto_free_blkcipher(smp->tfm);
@@ -571,7 +576,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
571 if (conn->hcon->link_mode & HCI_LM_MASTER) 576 if (conn->hcon->link_mode & HCI_LM_MASTER)
572 return SMP_CMD_NOTSUPP; 577 return SMP_CMD_NOTSUPP;
573 578
574 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend)) 579 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
575 smp = smp_chan_create(conn); 580 smp = smp_chan_create(conn);
576 581
577 smp = conn->smp_chan; 582 smp = conn->smp_chan;
@@ -584,6 +589,8 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
584 if (req->auth_req & SMP_AUTH_BONDING) 589 if (req->auth_req & SMP_AUTH_BONDING)
585 auth = req->auth_req; 590 auth = req->auth_req;
586 591
592 conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
593
587 build_pairing_cmd(conn, req, &rsp, auth); 594 build_pairing_cmd(conn, req, &rsp, auth);
588 595
589 key_size = min(req->max_key_size, rsp.max_key_size); 596 key_size = min(req->max_key_size, rsp.max_key_size);
@@ -698,23 +705,18 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
698 705
699static u8 smp_ltk_encrypt(struct l2cap_conn *conn) 706static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
700{ 707{
701 struct link_key *key; 708 struct smp_ltk *key;
702 struct key_master_id *master;
703 struct hci_conn *hcon = conn->hcon; 709 struct hci_conn *hcon = conn->hcon;
704 710
705 key = hci_find_link_key_type(hcon->hdev, conn->dst, 711 key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type);
706 HCI_LK_SMP_LTK);
707 if (!key) 712 if (!key)
708 return 0; 713 return 0;
709 714
710 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, 715 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
711 &hcon->pend))
712 return 1; 716 return 1;
713 717
714 master = (void *) key->data; 718 hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
715 hci_le_start_enc(hcon, master->ediv, master->rand, 719 hcon->enc_key_size = key->enc_size;
716 key->val);
717 hcon->enc_key_size = key->pin_len;
718 720
719 return 1; 721 return 1;
720 722
@@ -733,7 +735,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
733 if (smp_ltk_encrypt(conn)) 735 if (smp_ltk_encrypt(conn))
734 return 0; 736 return 0;
735 737
736 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) 738 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
737 return 0; 739 return 0;
738 740
739 smp = smp_chan_create(conn); 741 smp = smp_chan_create(conn);
@@ -772,7 +774,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
772 if (smp_ltk_encrypt(conn)) 774 if (smp_ltk_encrypt(conn))
773 goto done; 775 goto done;
774 776
775 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) 777 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
776 return 0; 778 return 0;
777 779
778 smp = smp_chan_create(conn); 780 smp = smp_chan_create(conn);
@@ -817,13 +819,19 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
817{ 819{
818 struct smp_cmd_master_ident *rp = (void *) skb->data; 820 struct smp_cmd_master_ident *rp = (void *) skb->data;
819 struct smp_chan *smp = conn->smp_chan; 821 struct smp_chan *smp = conn->smp_chan;
822 struct hci_dev *hdev = conn->hcon->hdev;
823 struct hci_conn *hcon = conn->hcon;
824 u8 authenticated;
820 825
821 skb_pull(skb, sizeof(*rp)); 826 skb_pull(skb, sizeof(*rp));
822 827
823 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size, 828 hci_dev_lock(hdev);
824 rp->ediv, rp->rand, smp->tk); 829 authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH);
825 830 hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
831 HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size,
832 rp->ediv, rp->rand);
826 smp_distribute_keys(conn, 1); 833 smp_distribute_keys(conn, 1);
834 hci_dev_unlock(hdev);
827 835
828 return 0; 836 return 0;
829} 837}
@@ -908,7 +916,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
908 916
909 BT_DBG("conn %p force %d", conn, force); 917 BT_DBG("conn %p force %d", conn, force);
910 918
911 if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend)) 919 if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
912 return 0; 920 return 0;
913 921
914 rsp = (void *) &smp->prsp[1]; 922 rsp = (void *) &smp->prsp[1];
@@ -933,6 +941,8 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
933 if (*keydist & SMP_DIST_ENC_KEY) { 941 if (*keydist & SMP_DIST_ENC_KEY) {
934 struct smp_cmd_encrypt_info enc; 942 struct smp_cmd_encrypt_info enc;
935 struct smp_cmd_master_ident ident; 943 struct smp_cmd_master_ident ident;
944 struct hci_conn *hcon = conn->hcon;
945 u8 authenticated;
936 __le16 ediv; 946 __le16 ediv;
937 947
938 get_random_bytes(enc.ltk, sizeof(enc.ltk)); 948 get_random_bytes(enc.ltk, sizeof(enc.ltk));
@@ -941,10 +951,12 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
941 951
942 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); 952 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
943 953
944 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size, 954 authenticated = hcon->sec_level == BT_SECURITY_HIGH;
945 ediv, ident.rand, enc.ltk); 955 hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
956 HCI_SMP_LTK_SLAVE, 1, authenticated,
957 enc.ltk, smp->enc_key_size, ediv, ident.rand);
946 958
947 ident.ediv = cpu_to_le16(ediv); 959 ident.ediv = ediv;
948 960
949 smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); 961 smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
950 962
@@ -982,7 +994,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
982 } 994 }
983 995
984 if (conn->hcon->out || force) { 996 if (conn->hcon->out || force) {
985 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); 997 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
986 cancel_delayed_work_sync(&conn->security_timer); 998 cancel_delayed_work_sync(&conn->security_timer);
987 smp_chan_destroy(conn); 999 smp_chan_destroy(conn);
988 } 1000 }
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 71773b014e0c..929e48aed444 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -167,10 +167,11 @@ static int br_set_mac_address(struct net_device *dev, void *p)
167 struct sockaddr *addr = p; 167 struct sockaddr *addr = p;
168 168
169 if (!is_valid_ether_addr(addr->sa_data)) 169 if (!is_valid_ether_addr(addr->sa_data))
170 return -EINVAL; 170 return -EADDRNOTAVAIL;
171 171
172 spin_lock_bh(&br->lock); 172 spin_lock_bh(&br->lock);
173 if (compare_ether_addr(dev->dev_addr, addr->sa_data)) { 173 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
174 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
174 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 175 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
175 br_fdb_change_mac_address(br, addr->sa_data); 176 br_fdb_change_mac_address(br, addr->sa_data);
176 br_stp_change_bridge_id(br, addr->sa_data); 177 br_stp_change_bridge_id(br, addr->sa_data);
@@ -316,6 +317,9 @@ static const struct net_device_ops br_netdev_ops = {
316 .ndo_add_slave = br_add_slave, 317 .ndo_add_slave = br_add_slave,
317 .ndo_del_slave = br_del_slave, 318 .ndo_del_slave = br_del_slave,
318 .ndo_fix_features = br_fix_features, 319 .ndo_fix_features = br_fix_features,
320 .ndo_fdb_add = br_fdb_add,
321 .ndo_fdb_del = br_fdb_delete,
322 .ndo_fdb_dump = br_fdb_dump,
319}; 323};
320 324
321static void br_dev_free(struct net_device *dev) 325static void br_dev_free(struct net_device *dev)
@@ -334,7 +338,7 @@ void br_dev_setup(struct net_device *dev)
334{ 338{
335 struct net_bridge *br = netdev_priv(dev); 339 struct net_bridge *br = netdev_priv(dev);
336 340
337 random_ether_addr(dev->dev_addr); 341 eth_hw_addr_random(dev);
338 ether_setup(dev); 342 ether_setup(dev);
339 343
340 dev->netdev_ops = &br_netdev_ops; 344 dev->netdev_ops = &br_netdev_ops;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 5ba0c844d508..d21f32383517 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -107,8 +107,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
107 struct net_bridge_port *op; 107 struct net_bridge_port *op;
108 list_for_each_entry(op, &br->port_list, list) { 108 list_for_each_entry(op, &br->port_list, list) {
109 if (op != p && 109 if (op != p &&
110 !compare_ether_addr(op->dev->dev_addr, 110 ether_addr_equal(op->dev->dev_addr,
111 f->addr.addr)) { 111 f->addr.addr)) {
112 f->dst = op; 112 f->dst = op;
113 goto insert; 113 goto insert;
114 } 114 }
@@ -214,8 +214,8 @@ void br_fdb_delete_by_port(struct net_bridge *br,
214 struct net_bridge_port *op; 214 struct net_bridge_port *op;
215 list_for_each_entry(op, &br->port_list, list) { 215 list_for_each_entry(op, &br->port_list, list) {
216 if (op != p && 216 if (op != p &&
217 !compare_ether_addr(op->dev->dev_addr, 217 ether_addr_equal(op->dev->dev_addr,
218 f->addr.addr)) { 218 f->addr.addr)) {
219 f->dst = op; 219 f->dst = op;
220 goto skip_delete; 220 goto skip_delete;
221 } 221 }
@@ -237,7 +237,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
237 struct net_bridge_fdb_entry *fdb; 237 struct net_bridge_fdb_entry *fdb;
238 238
239 hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) { 239 hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
240 if (!compare_ether_addr(fdb->addr.addr, addr)) { 240 if (ether_addr_equal(fdb->addr.addr, addr)) {
241 if (unlikely(has_expired(br, fdb))) 241 if (unlikely(has_expired(br, fdb)))
242 break; 242 break;
243 return fdb; 243 return fdb;
@@ -331,7 +331,7 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
331 struct net_bridge_fdb_entry *fdb; 331 struct net_bridge_fdb_entry *fdb;
332 332
333 hlist_for_each_entry(fdb, h, head, hlist) { 333 hlist_for_each_entry(fdb, h, head, hlist) {
334 if (!compare_ether_addr(fdb->addr.addr, addr)) 334 if (ether_addr_equal(fdb->addr.addr, addr))
335 return fdb; 335 return fdb;
336 } 336 }
337 return NULL; 337 return NULL;
@@ -344,7 +344,7 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
344 struct net_bridge_fdb_entry *fdb; 344 struct net_bridge_fdb_entry *fdb;
345 345
346 hlist_for_each_entry_rcu(fdb, h, head, hlist) { 346 hlist_for_each_entry_rcu(fdb, h, head, hlist) {
347 if (!compare_ether_addr(fdb->addr.addr, addr)) 347 if (ether_addr_equal(fdb->addr.addr, addr))
348 return fdb; 348 return fdb;
349 } 349 }
350 return NULL; 350 return NULL;
@@ -487,14 +487,14 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
487 ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; 487 ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
488 ndm->ndm_state = fdb_to_nud(fdb); 488 ndm->ndm_state = fdb_to_nud(fdb);
489 489
490 NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr); 490 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
491 491 goto nla_put_failure;
492 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 492 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
493 ci.ndm_confirmed = 0; 493 ci.ndm_confirmed = 0;
494 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 494 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
495 ci.ndm_refcnt = 0; 495 ci.ndm_refcnt = 0;
496 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); 496 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
497 497 goto nla_put_failure;
498 return nlmsg_end(skb, nlh); 498 return nlmsg_end(skb, nlh);
499 499
500nla_put_failure: 500nla_put_failure:
@@ -535,44 +535,38 @@ errout:
535} 535}
536 536
537/* Dump information about entries, in response to GETNEIGH */ 537/* Dump information about entries, in response to GETNEIGH */
538int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 538int br_fdb_dump(struct sk_buff *skb,
539 struct netlink_callback *cb,
540 struct net_device *dev,
541 int idx)
539{ 542{
540 struct net *net = sock_net(skb->sk); 543 struct net_bridge *br = netdev_priv(dev);
541 struct net_device *dev; 544 int i;
542 int idx = 0;
543
544 rcu_read_lock();
545 for_each_netdev_rcu(net, dev) {
546 struct net_bridge *br = netdev_priv(dev);
547 int i;
548 545
549 if (!(dev->priv_flags & IFF_EBRIDGE)) 546 if (!(dev->priv_flags & IFF_EBRIDGE))
550 continue; 547 goto out;
551 548
552 for (i = 0; i < BR_HASH_SIZE; i++) { 549 for (i = 0; i < BR_HASH_SIZE; i++) {
553 struct hlist_node *h; 550 struct hlist_node *h;
554 struct net_bridge_fdb_entry *f; 551 struct net_bridge_fdb_entry *f;
555
556 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
557 if (idx < cb->args[0])
558 goto skip;
559 552
560 if (fdb_fill_info(skb, br, f, 553 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
561 NETLINK_CB(cb->skb).pid, 554 if (idx < cb->args[0])
562 cb->nlh->nlmsg_seq, 555 goto skip;
563 RTM_NEWNEIGH, 556
564 NLM_F_MULTI) < 0) 557 if (fdb_fill_info(skb, br, f,
565 break; 558 NETLINK_CB(cb->skb).pid,
559 cb->nlh->nlmsg_seq,
560 RTM_NEWNEIGH,
561 NLM_F_MULTI) < 0)
562 break;
566skip: 563skip:
567 ++idx; 564 ++idx;
568 }
569 } 565 }
570 } 566 }
571 rcu_read_unlock();
572 567
573 cb->args[0] = idx; 568out:
574 569 return idx;
575 return skb->len;
576} 570}
577 571
578/* Update (create or replace) forwarding database entry */ 572/* Update (create or replace) forwarding database entry */
@@ -614,43 +608,11 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
614} 608}
615 609
616/* Add new permanent fdb entry with RTM_NEWNEIGH */ 610/* Add new permanent fdb entry with RTM_NEWNEIGH */
617int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 611int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
612 unsigned char *addr, u16 nlh_flags)
618{ 613{
619 struct net *net = sock_net(skb->sk);
620 struct ndmsg *ndm;
621 struct nlattr *tb[NDA_MAX+1];
622 struct net_device *dev;
623 struct net_bridge_port *p; 614 struct net_bridge_port *p;
624 const __u8 *addr; 615 int err = 0;
625 int err;
626
627 ASSERT_RTNL();
628 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
629 if (err < 0)
630 return err;
631
632 ndm = nlmsg_data(nlh);
633 if (ndm->ndm_ifindex == 0) {
634 pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
635 return -EINVAL;
636 }
637
638 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
639 if (dev == NULL) {
640 pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
641 return -ENODEV;
642 }
643
644 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
645 pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
646 return -EINVAL;
647 }
648
649 addr = nla_data(tb[NDA_LLADDR]);
650 if (!is_valid_ether_addr(addr)) {
651 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
652 return -EINVAL;
653 }
654 616
655 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { 617 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
656 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); 618 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
@@ -670,14 +632,14 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
670 rcu_read_unlock(); 632 rcu_read_unlock();
671 } else { 633 } else {
672 spin_lock_bh(&p->br->hash_lock); 634 spin_lock_bh(&p->br->hash_lock);
673 err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); 635 err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags);
674 spin_unlock_bh(&p->br->hash_lock); 636 spin_unlock_bh(&p->br->hash_lock);
675 } 637 }
676 638
677 return err; 639 return err;
678} 640}
679 641
680static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) 642static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
681{ 643{
682 struct net_bridge *br = p->br; 644 struct net_bridge *br = p->br;
683 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 645 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
@@ -692,40 +654,12 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
692} 654}
693 655
694/* Remove neighbor entry with RTM_DELNEIGH */ 656/* Remove neighbor entry with RTM_DELNEIGH */
695int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 657int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
658 unsigned char *addr)
696{ 659{
697 struct net *net = sock_net(skb->sk);
698 struct ndmsg *ndm;
699 struct net_bridge_port *p; 660 struct net_bridge_port *p;
700 struct nlattr *llattr;
701 const __u8 *addr;
702 struct net_device *dev;
703 int err; 661 int err;
704 662
705 ASSERT_RTNL();
706 if (nlmsg_len(nlh) < sizeof(*ndm))
707 return -EINVAL;
708
709 ndm = nlmsg_data(nlh);
710 if (ndm->ndm_ifindex == 0) {
711 pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
712 return -EINVAL;
713 }
714
715 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
716 if (dev == NULL) {
717 pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
718 return -ENODEV;
719 }
720
721 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
722 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
723 pr_info("bridge: RTM_DELNEIGH with invalid address\n");
724 return -EINVAL;
725 }
726
727 addr = nla_data(llattr);
728
729 p = br_port_get_rtnl(dev); 663 p = br_port_get_rtnl(dev);
730 if (p == NULL) { 664 if (p == NULL) {
731 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", 665 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f65344e711..e9466d412707 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -34,7 +34,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
34 p->state == BR_STATE_FORWARDING); 34 p->state == BR_STATE_FORWARDING);
35} 35}
36 36
37static inline unsigned packet_length(const struct sk_buff *skb) 37static inline unsigned int packet_length(const struct sk_buff *skb)
38{ 38{
39 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); 39 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
40} 40}
@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
47 kfree_skb(skb); 47 kfree_skb(skb);
48 } else { 48 } else {
49 skb_push(skb, ETH_HLEN); 49 skb_push(skb, ETH_HLEN);
50 br_drop_fake_rtable(skb);
50 dev_queue_xmit(skb); 51 dev_queue_xmit(skb);
51 } 52 }
52 53
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5a31731be4d0..76f15fda0212 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -216,7 +216,7 @@ forward:
216 } 216 }
217 /* fall through */ 217 /* fall through */
218 case BR_STATE_LEARNING: 218 case BR_STATE_LEARNING:
219 if (!compare_ether_addr(p->br->dev->dev_addr, dest)) 219 if (ether_addr_equal(p->br->dev->dev_addr, dest))
220 skb->pkt_type = PACKET_HOST; 220 skb->pkt_type = PACKET_HOST;
221 221
222 NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, 222 NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 702a1ae9220b..b66581208cb2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -36,6 +36,8 @@
36#define mlock_dereference(X, br) \ 36#define mlock_dereference(X, br) \
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38 38
39static void br_multicast_start_querier(struct net_bridge *br);
40
39#if IS_ENABLED(CONFIG_IPV6) 41#if IS_ENABLED(CONFIG_IPV6)
40static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) 42static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
41{ 43{
@@ -241,7 +243,6 @@ static void br_multicast_group_expired(unsigned long data)
241 hlist_del_rcu(&mp->hlist[mdb->ver]); 243 hlist_del_rcu(&mp->hlist[mdb->ver]);
242 mdb->size--; 244 mdb->size--;
243 245
244 del_timer(&mp->query_timer);
245 call_rcu_bh(&mp->rcu, br_multicast_free_group); 246 call_rcu_bh(&mp->rcu, br_multicast_free_group);
246 247
247out: 248out:
@@ -271,7 +272,6 @@ static void br_multicast_del_pg(struct net_bridge *br,
271 rcu_assign_pointer(*pp, p->next); 272 rcu_assign_pointer(*pp, p->next);
272 hlist_del_init(&p->mglist); 273 hlist_del_init(&p->mglist);
273 del_timer(&p->timer); 274 del_timer(&p->timer);
274 del_timer(&p->query_timer);
275 call_rcu_bh(&p->rcu, br_multicast_free_pg); 275 call_rcu_bh(&p->rcu, br_multicast_free_pg);
276 276
277 if (!mp->ports && !mp->mglist && 277 if (!mp->ports && !mp->mglist &&
@@ -460,8 +460,8 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
460 hopopt[3] = 2; /* Length of RA Option */ 460 hopopt[3] = 2; /* Length of RA Option */
461 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 461 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
462 hopopt[5] = 0; 462 hopopt[5] = 0;
463 hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ 463 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
464 hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ 464 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
465 465
466 skb_put(skb, sizeof(*ip6h) + 8); 466 skb_put(skb, sizeof(*ip6h) + 8);
467 467
@@ -507,74 +507,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
507 return NULL; 507 return NULL;
508} 508}
509 509
510static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
511{
512 struct net_bridge *br = mp->br;
513 struct sk_buff *skb;
514
515 skb = br_multicast_alloc_query(br, &mp->addr);
516 if (!skb)
517 goto timer;
518
519 netif_rx(skb);
520
521timer:
522 if (++mp->queries_sent < br->multicast_last_member_count)
523 mod_timer(&mp->query_timer,
524 jiffies + br->multicast_last_member_interval);
525}
526
527static void br_multicast_group_query_expired(unsigned long data)
528{
529 struct net_bridge_mdb_entry *mp = (void *)data;
530 struct net_bridge *br = mp->br;
531
532 spin_lock(&br->multicast_lock);
533 if (!netif_running(br->dev) || !mp->mglist ||
534 mp->queries_sent >= br->multicast_last_member_count)
535 goto out;
536
537 br_multicast_send_group_query(mp);
538
539out:
540 spin_unlock(&br->multicast_lock);
541}
542
543static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
544{
545 struct net_bridge_port *port = pg->port;
546 struct net_bridge *br = port->br;
547 struct sk_buff *skb;
548
549 skb = br_multicast_alloc_query(br, &pg->addr);
550 if (!skb)
551 goto timer;
552
553 br_deliver(port, skb);
554
555timer:
556 if (++pg->queries_sent < br->multicast_last_member_count)
557 mod_timer(&pg->query_timer,
558 jiffies + br->multicast_last_member_interval);
559}
560
561static void br_multicast_port_group_query_expired(unsigned long data)
562{
563 struct net_bridge_port_group *pg = (void *)data;
564 struct net_bridge_port *port = pg->port;
565 struct net_bridge *br = port->br;
566
567 spin_lock(&br->multicast_lock);
568 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
569 pg->queries_sent >= br->multicast_last_member_count)
570 goto out;
571
572 br_multicast_send_port_group_query(pg);
573
574out:
575 spin_unlock(&br->multicast_lock);
576}
577
578static struct net_bridge_mdb_entry *br_multicast_get_group( 510static struct net_bridge_mdb_entry *br_multicast_get_group(
579 struct net_bridge *br, struct net_bridge_port *port, 511 struct net_bridge *br, struct net_bridge_port *port,
580 struct br_ip *group, int hash) 512 struct br_ip *group, int hash)
@@ -582,8 +514,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
582 struct net_bridge_mdb_htable *mdb; 514 struct net_bridge_mdb_htable *mdb;
583 struct net_bridge_mdb_entry *mp; 515 struct net_bridge_mdb_entry *mp;
584 struct hlist_node *p; 516 struct hlist_node *p;
585 unsigned count = 0; 517 unsigned int count = 0;
586 unsigned max; 518 unsigned int max;
587 int elasticity; 519 int elasticity;
588 int err; 520 int err;
589 521
@@ -690,8 +622,6 @@ rehash:
690 mp->addr = *group; 622 mp->addr = *group;
691 setup_timer(&mp->timer, br_multicast_group_expired, 623 setup_timer(&mp->timer, br_multicast_group_expired,
692 (unsigned long)mp); 624 (unsigned long)mp);
693 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
694 (unsigned long)mp);
695 625
696 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 626 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
697 mdb->size++; 627 mdb->size++;
@@ -746,8 +676,6 @@ static int br_multicast_add_group(struct net_bridge *br,
746 hlist_add_head(&p->mglist, &port->mglist); 676 hlist_add_head(&p->mglist, &port->mglist);
747 setup_timer(&p->timer, br_multicast_port_group_expired, 677 setup_timer(&p->timer, br_multicast_port_group_expired,
748 (unsigned long)p); 678 (unsigned long)p);
749 setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
750 (unsigned long)p);
751 679
752 rcu_assign_pointer(*pp, p); 680 rcu_assign_pointer(*pp, p);
753 681
@@ -814,6 +742,20 @@ static void br_multicast_local_router_expired(unsigned long data)
814{ 742{
815} 743}
816 744
745static void br_multicast_querier_expired(unsigned long data)
746{
747 struct net_bridge *br = (void *)data;
748
749 spin_lock(&br->multicast_lock);
750 if (!netif_running(br->dev) || br->multicast_disabled)
751 goto out;
752
753 br_multicast_start_querier(br);
754
755out:
756 spin_unlock(&br->multicast_lock);
757}
758
817static void __br_multicast_send_query(struct net_bridge *br, 759static void __br_multicast_send_query(struct net_bridge *br,
818 struct net_bridge_port *port, 760 struct net_bridge_port *port,
819 struct br_ip *ip) 761 struct br_ip *ip)
@@ -840,6 +782,7 @@ static void br_multicast_send_query(struct net_bridge *br,
840 struct br_ip br_group; 782 struct br_ip br_group;
841 783
842 if (!netif_running(br->dev) || br->multicast_disabled || 784 if (!netif_running(br->dev) || br->multicast_disabled ||
785 !br->multicast_querier ||
843 timer_pending(&br->multicast_querier_timer)) 786 timer_pending(&br->multicast_querier_timer))
844 return; 787 return;
845 788
@@ -1291,9 +1234,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1291 time_after(mp->timer.expires, time) : 1234 time_after(mp->timer.expires, time) :
1292 try_to_del_timer_sync(&mp->timer) >= 0)) { 1235 try_to_del_timer_sync(&mp->timer) >= 0)) {
1293 mod_timer(&mp->timer, time); 1236 mod_timer(&mp->timer, time);
1294
1295 mp->queries_sent = 0;
1296 mod_timer(&mp->query_timer, now);
1297 } 1237 }
1298 1238
1299 goto out; 1239 goto out;
@@ -1310,9 +1250,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1310 time_after(p->timer.expires, time) : 1250 time_after(p->timer.expires, time) :
1311 try_to_del_timer_sync(&p->timer) >= 0)) { 1251 try_to_del_timer_sync(&p->timer) >= 0)) {
1312 mod_timer(&p->timer, time); 1252 mod_timer(&p->timer, time);
1313
1314 p->queries_sent = 0;
1315 mod_timer(&p->query_timer, now);
1316 } 1253 }
1317 1254
1318 break; 1255 break;
@@ -1361,8 +1298,8 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1361 struct sk_buff *skb2 = skb; 1298 struct sk_buff *skb2 = skb;
1362 const struct iphdr *iph; 1299 const struct iphdr *iph;
1363 struct igmphdr *ih; 1300 struct igmphdr *ih;
1364 unsigned len; 1301 unsigned int len;
1365 unsigned offset; 1302 unsigned int offset;
1366 int err; 1303 int err;
1367 1304
1368 /* We treat OOM as packet loss for now. */ 1305 /* We treat OOM as packet loss for now. */
@@ -1462,7 +1399,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1462 u8 icmp6_type; 1399 u8 icmp6_type;
1463 u8 nexthdr; 1400 u8 nexthdr;
1464 __be16 frag_off; 1401 __be16 frag_off;
1465 unsigned len; 1402 unsigned int len;
1466 int offset; 1403 int offset;
1467 int err; 1404 int err;
1468 1405
@@ -1628,6 +1565,7 @@ void br_multicast_init(struct net_bridge *br)
1628 br->hash_max = 512; 1565 br->hash_max = 512;
1629 1566
1630 br->multicast_router = 1; 1567 br->multicast_router = 1;
1568 br->multicast_querier = 0;
1631 br->multicast_last_member_count = 2; 1569 br->multicast_last_member_count = 2;
1632 br->multicast_startup_query_count = 2; 1570 br->multicast_startup_query_count = 2;
1633 1571
@@ -1642,7 +1580,7 @@ void br_multicast_init(struct net_bridge *br)
1642 setup_timer(&br->multicast_router_timer, 1580 setup_timer(&br->multicast_router_timer,
1643 br_multicast_local_router_expired, 0); 1581 br_multicast_local_router_expired, 0);
1644 setup_timer(&br->multicast_querier_timer, 1582 setup_timer(&br->multicast_querier_timer,
1645 br_multicast_local_router_expired, 0); 1583 br_multicast_querier_expired, (unsigned long)br);
1646 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1584 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1647 (unsigned long)br); 1585 (unsigned long)br);
1648} 1586}
@@ -1681,7 +1619,6 @@ void br_multicast_stop(struct net_bridge *br)
1681 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1619 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1682 hlist[ver]) { 1620 hlist[ver]) {
1683 del_timer(&mp->timer); 1621 del_timer(&mp->timer);
1684 del_timer(&mp->query_timer);
1685 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1622 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1686 } 1623 }
1687 } 1624 }
@@ -1770,9 +1707,23 @@ unlock:
1770 return err; 1707 return err;
1771} 1708}
1772 1709
1773int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1710static void br_multicast_start_querier(struct net_bridge *br)
1774{ 1711{
1775 struct net_bridge_port *port; 1712 struct net_bridge_port *port;
1713
1714 br_multicast_open(br);
1715
1716 list_for_each_entry(port, &br->port_list, list) {
1717 if (port->state == BR_STATE_DISABLED ||
1718 port->state == BR_STATE_BLOCKING)
1719 continue;
1720
1721 __br_multicast_enable_port(port);
1722 }
1723}
1724
1725int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1726{
1776 int err = 0; 1727 int err = 0;
1777 struct net_bridge_mdb_htable *mdb; 1728 struct net_bridge_mdb_htable *mdb;
1778 1729
@@ -1802,14 +1753,7 @@ rollback:
1802 goto rollback; 1753 goto rollback;
1803 } 1754 }
1804 1755
1805 br_multicast_open(br); 1756 br_multicast_start_querier(br);
1806 list_for_each_entry(port, &br->port_list, list) {
1807 if (port->state == BR_STATE_DISABLED ||
1808 port->state == BR_STATE_BLOCKING)
1809 continue;
1810
1811 __br_multicast_enable_port(port);
1812 }
1813 1757
1814unlock: 1758unlock:
1815 spin_unlock_bh(&br->multicast_lock); 1759 spin_unlock_bh(&br->multicast_lock);
@@ -1817,6 +1761,24 @@ unlock:
1817 return err; 1761 return err;
1818} 1762}
1819 1763
1764int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1765{
1766 val = !!val;
1767
1768 spin_lock_bh(&br->multicast_lock);
1769 if (br->multicast_querier == val)
1770 goto unlock;
1771
1772 br->multicast_querier = val;
1773 if (val)
1774 br_multicast_start_querier(br);
1775
1776unlock:
1777 spin_unlock_bh(&br->multicast_lock);
1778
1779 return 0;
1780}
1781
1820int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1782int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1821{ 1783{
1822 int err = -ENOENT; 1784 int err = -ENOENT;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f3817133..e41456bd3cc6 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -54,12 +54,14 @@ static int brnf_call_ip6tables __read_mostly = 1;
54static int brnf_call_arptables __read_mostly = 1; 54static int brnf_call_arptables __read_mostly = 1;
55static int brnf_filter_vlan_tagged __read_mostly = 0; 55static int brnf_filter_vlan_tagged __read_mostly = 0;
56static int brnf_filter_pppoe_tagged __read_mostly = 0; 56static int brnf_filter_pppoe_tagged __read_mostly = 0;
57static int brnf_pass_vlan_indev __read_mostly = 0;
57#else 58#else
58#define brnf_call_iptables 1 59#define brnf_call_iptables 1
59#define brnf_call_ip6tables 1 60#define brnf_call_ip6tables 1
60#define brnf_call_arptables 1 61#define brnf_call_arptables 1
61#define brnf_filter_vlan_tagged 0 62#define brnf_filter_vlan_tagged 0
62#define brnf_filter_pppoe_tagged 0 63#define brnf_filter_pppoe_tagged 0
64#define brnf_pass_vlan_indev 0
63#endif 65#endif
64 66
65#define IS_IP(skb) \ 67#define IS_IP(skb) \
@@ -156,7 +158,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
156 rt->dst.dev = br->dev; 158 rt->dst.dev = br->dev;
157 rt->dst.path = &rt->dst; 159 rt->dst.path = &rt->dst;
158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true); 160 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
159 rt->dst.flags = DST_NOXFRM | DST_NOPEER; 161 rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
160 rt->dst.ops = &fake_dst_ops; 162 rt->dst.ops = &fake_dst_ops;
161} 163}
162 164
@@ -503,6 +505,19 @@ bridged_dnat:
503 return 0; 505 return 0;
504} 506}
505 507
508static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
509{
510 struct net_device *vlan, *br;
511
512 br = bridge_parent(dev);
513 if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
514 return br;
515
516 vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK);
517
518 return vlan ? vlan : br;
519}
520
506/* Some common code for IPv4/IPv6 */ 521/* Some common code for IPv4/IPv6 */
507static struct net_device *setup_pre_routing(struct sk_buff *skb) 522static struct net_device *setup_pre_routing(struct sk_buff *skb)
508{ 523{
@@ -515,7 +530,7 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb)
515 530
516 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; 531 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
517 nf_bridge->physindev = skb->dev; 532 nf_bridge->physindev = skb->dev;
518 skb->dev = bridge_parent(skb->dev); 533 skb->dev = brnf_get_logical_dev(skb, skb->dev);
519 if (skb->protocol == htons(ETH_P_8021Q)) 534 if (skb->protocol == htons(ETH_P_8021Q))
520 nf_bridge->mask |= BRNF_8021Q; 535 nf_bridge->mask |= BRNF_8021Q;
521 else if (skb->protocol == htons(ETH_P_PPP_SES)) 536 else if (skb->protocol == htons(ETH_P_PPP_SES))
@@ -543,7 +558,7 @@ static int check_hbh_len(struct sk_buff *skb)
543 int optlen = nh[off + 1] + 2; 558 int optlen = nh[off + 1] + 2;
544 559
545 switch (nh[off]) { 560 switch (nh[off]) {
546 case IPV6_TLV_PAD0: 561 case IPV6_TLV_PAD1:
547 optlen = 1; 562 optlen = 1;
548 break; 563 break;
549 564
@@ -694,11 +709,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
694 const struct net_device *out, 709 const struct net_device *out,
695 int (*okfn)(struct sk_buff *)) 710 int (*okfn)(struct sk_buff *))
696{ 711{
697 struct rtable *rt = skb_rtable(skb); 712 br_drop_fake_rtable(skb);
698
699 if (rt && rt == bridge_parent_rtable(in))
700 skb_dst_drop(skb);
701
702 return NF_ACCEPT; 713 return NF_ACCEPT;
703} 714}
704 715
@@ -778,7 +789,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
778 else 789 else
779 skb->protocol = htons(ETH_P_IPV6); 790 skb->protocol = htons(ETH_P_IPV6);
780 791
781 NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, 792 NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
782 br_nf_forward_finish); 793 br_nf_forward_finish);
783 794
784 return NF_STOLEN; 795 return NF_STOLEN;
@@ -1006,12 +1017,13 @@ static ctl_table brnf_table[] = {
1006 .mode = 0644, 1017 .mode = 0644,
1007 .proc_handler = brnf_sysctl_call_tables, 1018 .proc_handler = brnf_sysctl_call_tables,
1008 }, 1019 },
1009 { } 1020 {
1010}; 1021 .procname = "bridge-nf-pass-vlan-input-dev",
1011 1022 .data = &brnf_pass_vlan_indev,
1012static struct ctl_path brnf_path[] = { 1023 .maxlen = sizeof(int),
1013 { .procname = "net", }, 1024 .mode = 0644,
1014 { .procname = "bridge", }, 1025 .proc_handler = brnf_sysctl_call_tables,
1026 },
1015 { } 1027 { }
1016}; 1028};
1017#endif 1029#endif
@@ -1030,7 +1042,7 @@ int __init br_netfilter_init(void)
1030 return ret; 1042 return ret;
1031 } 1043 }
1032#ifdef CONFIG_SYSCTL 1044#ifdef CONFIG_SYSCTL
1033 brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); 1045 brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
1034 if (brnf_sysctl_header == NULL) { 1046 if (brnf_sysctl_header == NULL) {
1035 printk(KERN_WARNING 1047 printk(KERN_WARNING
1036 "br_netfilter: can't register to sysctl.\n"); 1048 "br_netfilter: can't register to sysctl.\n");
@@ -1047,7 +1059,7 @@ void br_netfilter_fini(void)
1047{ 1059{
1048 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); 1060 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1049#ifdef CONFIG_SYSCTL 1061#ifdef CONFIG_SYSCTL
1050 unregister_sysctl_table(brnf_sysctl_header); 1062 unregister_net_sysctl_table(brnf_sysctl_header);
1051#endif 1063#endif
1052 dst_entries_destroy(&fake_dst_ops); 1064 dst_entries_destroy(&fake_dst_ops);
1053} 1065}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a1daf8227ed1..2080485515f1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -60,20 +60,17 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
60 hdr->ifi_flags = dev_get_flags(dev); 60 hdr->ifi_flags = dev_get_flags(dev);
61 hdr->ifi_change = 0; 61 hdr->ifi_change = 0;
62 62
63 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 63 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
64 NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex); 64 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
65 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); 65 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
66 NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate); 66 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
67 67 (dev->addr_len &&
68 if (dev->addr_len) 68 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
69 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); 69 (dev->ifindex != dev->iflink &&
70 70 nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
71 if (dev->ifindex != dev->iflink) 71 (event == RTM_NEWLINK &&
72 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); 72 nla_put_u8(skb, IFLA_PROTINFO, port->state)))
73 73 goto nla_put_failure;
74 if (event == RTM_NEWLINK)
75 NLA_PUT_U8(skb, IFLA_PROTINFO, port->state);
76
77 return nlmsg_end(skb, nlh); 74 return nlmsg_end(skb, nlh);
78 75
79nla_put_failure: 76nla_put_failure:
@@ -91,7 +88,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
91 int err = -ENOBUFS; 88 int err = -ENOBUFS;
92 89
93 br_debug(port->br, "port %u(%s) event %d\n", 90 br_debug(port->br, "port %u(%s) event %d\n",
94 (unsigned)port->port_no, port->dev->name, event); 91 (unsigned int)port->port_no, port->dev->name, event);
95 92
96 skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); 93 skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
97 if (skb == NULL) 94 if (skb == NULL)
@@ -235,18 +232,6 @@ int __init br_netlink_init(void)
235 br_rtm_setlink, NULL, NULL); 232 br_rtm_setlink, NULL, NULL);
236 if (err) 233 if (err)
237 goto err3; 234 goto err3;
238 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH,
239 br_fdb_add, NULL, NULL);
240 if (err)
241 goto err3;
242 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH,
243 br_fdb_delete, NULL, NULL);
244 if (err)
245 goto err3;
246 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH,
247 NULL, br_fdb_dump, NULL);
248 if (err)
249 goto err3;
250 235
251 return 0; 236 return 0;
252 237
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0b67a63ad7a8..1a8ad4fb9a6b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,9 +82,7 @@ struct net_bridge_port_group {
82 struct hlist_node mglist; 82 struct hlist_node mglist;
83 struct rcu_head rcu; 83 struct rcu_head rcu;
84 struct timer_list timer; 84 struct timer_list timer;
85 struct timer_list query_timer;
86 struct br_ip addr; 85 struct br_ip addr;
87 u32 queries_sent;
88}; 86};
89 87
90struct net_bridge_mdb_entry 88struct net_bridge_mdb_entry
@@ -94,10 +92,8 @@ struct net_bridge_mdb_entry
94 struct net_bridge_port_group __rcu *ports; 92 struct net_bridge_port_group __rcu *ports;
95 struct rcu_head rcu; 93 struct rcu_head rcu;
96 struct timer_list timer; 94 struct timer_list timer;
97 struct timer_list query_timer;
98 struct br_ip addr; 95 struct br_ip addr;
99 bool mglist; 96 bool mglist;
100 u32 queries_sent;
101}; 97};
102 98
103struct net_bridge_mdb_htable 99struct net_bridge_mdb_htable
@@ -228,6 +224,7 @@ struct net_bridge
228 unsigned char multicast_router; 224 unsigned char multicast_router;
229 225
230 u8 multicast_disabled:1; 226 u8 multicast_disabled:1;
227 u8 multicast_querier:1;
231 228
232 u32 hash_elasticity; 229 u32 hash_elasticity;
233 u32 hash_max; 230 u32 hash_max;
@@ -363,9 +360,18 @@ extern int br_fdb_insert(struct net_bridge *br,
363extern void br_fdb_update(struct net_bridge *br, 360extern void br_fdb_update(struct net_bridge *br,
364 struct net_bridge_port *source, 361 struct net_bridge_port *source,
365 const unsigned char *addr); 362 const unsigned char *addr);
366extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb); 363
367extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); 364extern int br_fdb_delete(struct ndmsg *ndm,
368extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); 365 struct net_device *dev,
366 unsigned char *addr);
367extern int br_fdb_add(struct ndmsg *nlh,
368 struct net_device *dev,
369 unsigned char *addr,
370 u16 nlh_flags);
371extern int br_fdb_dump(struct sk_buff *skb,
372 struct netlink_callback *cb,
373 struct net_device *dev,
374 int idx);
369 375
370/* br_forward.c */ 376/* br_forward.c */
371extern void br_deliver(const struct net_bridge_port *to, 377extern void br_deliver(const struct net_bridge_port *to,
@@ -421,6 +427,7 @@ extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
421extern int br_multicast_set_port_router(struct net_bridge_port *p, 427extern int br_multicast_set_port_router(struct net_bridge_port *p,
422 unsigned long val); 428 unsigned long val);
423extern int br_multicast_toggle(struct net_bridge *br, unsigned long val); 429extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
430extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
424extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val); 431extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
425 432
426static inline bool br_multicast_is_router(struct net_bridge *br) 433static inline bool br_multicast_is_router(struct net_bridge *br)
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 05ed9bc7e426..0c0fe36e7aa9 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -29,10 +29,9 @@
29#define BR_MIN_PATH_COST 1 29#define BR_MIN_PATH_COST 1
30#define BR_MAX_PATH_COST 65535 30#define BR_MAX_PATH_COST 65535
31 31
32struct br_config_bpdu 32struct br_config_bpdu {
33{ 33 unsigned int topology_change:1;
34 unsigned topology_change:1; 34 unsigned int topology_change_ack:1;
35 unsigned topology_change_ack:1;
36 bridge_id root; 35 bridge_id root;
37 int root_path_cost; 36 int root_path_cost;
38 bridge_id bridge_id; 37 bridge_id bridge_id;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 8c836d96ba76..af9a12099ba4 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -32,7 +32,7 @@ static const char *const br_port_state_names[] = {
32void br_log_state(const struct net_bridge_port *p) 32void br_log_state(const struct net_bridge_port *p)
33{ 33{
34 br_info(p->br, "port %u(%s) entered %s state\n", 34 br_info(p->br, "port %u(%s) entered %s state\n",
35 (unsigned) p->port_no, p->dev->name, 35 (unsigned int) p->port_no, p->dev->name,
36 br_port_state_names[p->state]); 36 br_port_state_names[p->state]);
37} 37}
38 38
@@ -478,7 +478,7 @@ void br_received_tcn_bpdu(struct net_bridge_port *p)
478{ 478{
479 if (br_is_designated_port(p)) { 479 if (br_is_designated_port(p)) {
480 br_info(p->br, "port %u(%s) received tcn bpdu\n", 480 br_info(p->br, "port %u(%s) received tcn bpdu\n",
481 (unsigned) p->port_no, p->dev->name); 481 (unsigned int) p->port_no, p->dev->name);
482 482
483 br_topology_change_detection(p->br); 483 br_topology_change_detection(p->br);
484 br_topology_change_acknowledge(p); 484 br_topology_change_acknowledge(p);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index e16aade51ae0..fd30a6022dea 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -167,7 +167,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
167 if (p->state == BR_STATE_DISABLED) 167 if (p->state == BR_STATE_DISABLED)
168 goto out; 168 goto out;
169 169
170 if (compare_ether_addr(dest, br->group_addr) != 0) 170 if (!ether_addr_equal(dest, br->group_addr))
171 goto out; 171 goto out;
172 172
173 buf = skb_pull(skb, 3); 173 buf = skb_pull(skb, 3);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index f494496373d6..9d5a414a3943 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -178,7 +178,7 @@ void br_stp_set_enabled(struct net_bridge *br, unsigned long val)
178/* called under bridge lock */ 178/* called under bridge lock */
179void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) 179void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
180{ 180{
181 /* should be aligned on 2 bytes for compare_ether_addr() */ 181 /* should be aligned on 2 bytes for ether_addr_equal() */
182 unsigned short oldaddr_aligned[ETH_ALEN >> 1]; 182 unsigned short oldaddr_aligned[ETH_ALEN >> 1];
183 unsigned char *oldaddr = (unsigned char *)oldaddr_aligned; 183 unsigned char *oldaddr = (unsigned char *)oldaddr_aligned;
184 struct net_bridge_port *p; 184 struct net_bridge_port *p;
@@ -191,12 +191,11 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
191 memcpy(br->dev->dev_addr, addr, ETH_ALEN); 191 memcpy(br->dev->dev_addr, addr, ETH_ALEN);
192 192
193 list_for_each_entry(p, &br->port_list, list) { 193 list_for_each_entry(p, &br->port_list, list) {
194 if (!compare_ether_addr(p->designated_bridge.addr, oldaddr)) 194 if (ether_addr_equal(p->designated_bridge.addr, oldaddr))
195 memcpy(p->designated_bridge.addr, addr, ETH_ALEN); 195 memcpy(p->designated_bridge.addr, addr, ETH_ALEN);
196 196
197 if (!compare_ether_addr(p->designated_root.addr, oldaddr)) 197 if (ether_addr_equal(p->designated_root.addr, oldaddr))
198 memcpy(p->designated_root.addr, addr, ETH_ALEN); 198 memcpy(p->designated_root.addr, addr, ETH_ALEN);
199
200 } 199 }
201 200
202 br_configuration_update(br); 201 br_configuration_update(br);
@@ -205,7 +204,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
205 br_become_root_bridge(br); 204 br_become_root_bridge(br);
206} 205}
207 206
208/* should be aligned on 2 bytes for compare_ether_addr() */ 207/* should be aligned on 2 bytes for ether_addr_equal() */
209static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; 208static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
210 209
211/* called under bridge lock */ 210/* called under bridge lock */
@@ -227,7 +226,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
227 226
228 } 227 }
229 228
230 if (compare_ether_addr(br->bridge_id.addr, addr) == 0) 229 if (ether_addr_equal(br->bridge_id.addr, addr))
231 return false; /* no change */ 230 return false; /* no change */
232 231
233 br_stp_change_bridge_id(br, addr); 232 br_stp_change_bridge_id(br, addr);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 58de2a0f9975..a6747e673426 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -56,7 +56,7 @@ static void br_message_age_timer_expired(unsigned long arg)
56 return; 56 return;
57 57
58 br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", 58 br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n",
59 (unsigned) p->port_no, p->dev->name, 59 (unsigned int) p->port_no, p->dev->name,
60 id->prio[0], id->prio[1], &id->addr); 60 id->prio[0], id->prio[1], &id->addr);
61 61
62 /* 62 /*
@@ -84,7 +84,7 @@ static void br_forward_delay_timer_expired(unsigned long arg)
84 struct net_bridge *br = p->br; 84 struct net_bridge *br = p->br;
85 85
86 br_debug(br, "port %u(%s) forward delay timer\n", 86 br_debug(br, "port %u(%s) forward delay timer\n",
87 (unsigned) p->port_no, p->dev->name); 87 (unsigned int) p->port_no, p->dev->name);
88 spin_lock(&br->lock); 88 spin_lock(&br->lock);
89 if (p->state == BR_STATE_LISTENING) { 89 if (p->state == BR_STATE_LISTENING) {
90 p->state = BR_STATE_LEARNING; 90 p->state = BR_STATE_LEARNING;
@@ -131,7 +131,7 @@ static void br_hold_timer_expired(unsigned long arg)
131 struct net_bridge_port *p = (struct net_bridge_port *) arg; 131 struct net_bridge_port *p = (struct net_bridge_port *) arg;
132 132
133 br_debug(p->br, "port %u(%s) hold timer expired\n", 133 br_debug(p->br, "port %u(%s) hold timer expired\n",
134 (unsigned) p->port_no, p->dev->name); 134 (unsigned int) p->port_no, p->dev->name);
135 135
136 spin_lock(&p->br->lock); 136 spin_lock(&p->br->lock);
137 if (p->config_pending) 137 if (p->config_pending)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c236c0e43984..c5c059333eab 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -297,7 +297,7 @@ static ssize_t store_group_addr(struct device *d,
297 const char *buf, size_t len) 297 const char *buf, size_t len)
298{ 298{
299 struct net_bridge *br = to_bridge(d); 299 struct net_bridge *br = to_bridge(d);
300 unsigned new_addr[6]; 300 unsigned int new_addr[6];
301 int i; 301 int i;
302 302
303 if (!capable(CAP_NET_ADMIN)) 303 if (!capable(CAP_NET_ADMIN))
@@ -379,6 +379,23 @@ static ssize_t store_multicast_snooping(struct device *d,
379static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, 379static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
380 show_multicast_snooping, store_multicast_snooping); 380 show_multicast_snooping, store_multicast_snooping);
381 381
382static ssize_t show_multicast_querier(struct device *d,
383 struct device_attribute *attr,
384 char *buf)
385{
386 struct net_bridge *br = to_bridge(d);
387 return sprintf(buf, "%d\n", br->multicast_querier);
388}
389
390static ssize_t store_multicast_querier(struct device *d,
391 struct device_attribute *attr,
392 const char *buf, size_t len)
393{
394 return store_bridge_parm(d, buf, len, br_multicast_set_querier);
395}
396static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR,
397 show_multicast_querier, store_multicast_querier);
398
382static ssize_t show_hash_elasticity(struct device *d, 399static ssize_t show_hash_elasticity(struct device *d,
383 struct device_attribute *attr, char *buf) 400 struct device_attribute *attr, char *buf)
384{ 401{
@@ -702,6 +719,7 @@ static struct attribute *bridge_attrs[] = {
702#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 719#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
703 &dev_attr_multicast_router.attr, 720 &dev_attr_multicast_router.attr,
704 &dev_attr_multicast_snooping.attr, 721 &dev_attr_multicast_snooping.attr,
722 &dev_attr_multicast_querier.attr,
705 &dev_attr_hash_elasticity.attr, 723 &dev_attr_hash_elasticity.attr,
706 &dev_attr_hash_max.attr, 724 &dev_attr_hash_max.attr,
707 &dev_attr_multicast_last_member_count.attr, 725 &dev_attr_multicast_last_member_count.attr,
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 5b33a2e634a6..071d87214dde 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -164,8 +164,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
164 !(info->bitmask & EBT_STP_MASK)) 164 !(info->bitmask & EBT_STP_MASK))
165 return -EINVAL; 165 return -EINVAL;
166 /* Make sure the match only receives stp frames */ 166 /* Make sure the match only receives stp frames */
167 if (compare_ether_addr(e->destmac, bridge_ula) || 167 if (!ether_addr_equal(e->destmac, bridge_ula) ||
168 compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) 168 !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
169 return -EINVAL; 169 return -EINVAL;
170 170
171 return 0; 171 return 0;
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index 936361e5a2b6..d3694953b1d7 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -25,7 +25,7 @@ config CAIF_DEBUG
25 bool "Enable Debug" 25 bool "Enable Debug"
26 depends on CAIF 26 depends on CAIF
27 default n 27 default n
28 --- help --- 28 ---help---
29 Enable the inclusion of debug code in the CAIF stack. 29 Enable the inclusion of debug code in the CAIF stack.
30 Be aware that doing this will impact performance. 30 Be aware that doing this will impact performance.
31 If unsure say N. 31 If unsure say N.
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 82c57069415f..aa6f716524fd 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -162,7 +162,6 @@ void caif_flow_cb(struct sk_buff *skb)
162static int transmit(struct cflayer *layer, struct cfpkt *pkt) 162static int transmit(struct cflayer *layer, struct cfpkt *pkt)
163{ 163{
164 int err, high = 0, qlen = 0; 164 int err, high = 0, qlen = 0;
165 struct caif_dev_common *caifdev;
166 struct caif_device_entry *caifd = 165 struct caif_device_entry *caifd =
167 container_of(layer, struct caif_device_entry, layer); 166 container_of(layer, struct caif_device_entry, layer);
168 struct sk_buff *skb; 167 struct sk_buff *skb;
@@ -174,7 +173,6 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
174 skb->dev = caifd->netdev; 173 skb->dev = caifd->netdev;
175 skb_reset_network_header(skb); 174 skb_reset_network_header(skb);
176 skb->protocol = htons(ETH_P_CAIF); 175 skb->protocol = htons(ETH_P_CAIF);
177 caifdev = netdev_priv(caifd->netdev);
178 176
179 /* Check if we need to handle xoff */ 177 /* Check if we need to handle xoff */
180 if (likely(caifd->netdev->tx_queue_len == 0)) 178 if (likely(caifd->netdev->tx_queue_len == 0))
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a97d97a3a512..fb8944355264 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -19,7 +19,7 @@
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/caif/caif_socket.h> 21#include <linux/caif/caif_socket.h>
22#include <linux/atomic.h> 22#include <linux/pkt_sched.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <net/caif/caif_layer.h> 25#include <net/caif/caif_layer.h>
@@ -43,34 +43,9 @@ enum caif_states {
43#define TX_FLOW_ON_BIT 1 43#define TX_FLOW_ON_BIT 1
44#define RX_FLOW_ON_BIT 2 44#define RX_FLOW_ON_BIT 2
45 45
46static struct dentry *debugfsdir;
47
48#ifdef CONFIG_DEBUG_FS
49struct debug_fs_counter {
50 atomic_t caif_nr_socks;
51 atomic_t caif_sock_create;
52 atomic_t num_connect_req;
53 atomic_t num_connect_resp;
54 atomic_t num_connect_fail_resp;
55 atomic_t num_disconnect;
56 atomic_t num_remote_shutdown_ind;
57 atomic_t num_tx_flow_off_ind;
58 atomic_t num_tx_flow_on_ind;
59 atomic_t num_rx_flow_off;
60 atomic_t num_rx_flow_on;
61};
62static struct debug_fs_counter cnt;
63#define dbfs_atomic_inc(v) atomic_inc_return(v)
64#define dbfs_atomic_dec(v) atomic_dec_return(v)
65#else
66#define dbfs_atomic_inc(v) 0
67#define dbfs_atomic_dec(v) 0
68#endif
69
70struct caifsock { 46struct caifsock {
71 struct sock sk; /* must be first member */ 47 struct sock sk; /* must be first member */
72 struct cflayer layer; 48 struct cflayer layer;
73 char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
74 u32 flow_state; 49 u32 flow_state;
75 struct caif_connect_request conn_req; 50 struct caif_connect_request conn_req;
76 struct mutex readlock; 51 struct mutex readlock;
@@ -155,13 +130,11 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
155 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 130 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
156 131
157 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
158 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
159 if (net_ratelimit()) 134 net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n",
160 pr_debug("sending flow OFF (queue len = %d %d)\n", 135 atomic_read(&cf_sk->sk.sk_rmem_alloc),
161 atomic_read(&cf_sk->sk.sk_rmem_alloc), 136 sk_rcvbuf_lowwater(cf_sk));
162 sk_rcvbuf_lowwater(cf_sk));
163 set_rx_flow_off(cf_sk); 137 set_rx_flow_off(cf_sk);
164 dbfs_atomic_inc(&cnt.num_rx_flow_off);
165 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 138 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
166 } 139 }
167 140
@@ -170,9 +143,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
170 return err; 143 return err;
171 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { 144 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
172 set_rx_flow_off(cf_sk); 145 set_rx_flow_off(cf_sk);
173 if (net_ratelimit()) 146 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
174 pr_debug("sending flow OFF due to rmem_schedule\n");
175 dbfs_atomic_inc(&cnt.num_rx_flow_off);
176 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 147 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
177 } 148 }
178 skb->dev = NULL; 149 skb->dev = NULL;
@@ -233,14 +204,12 @@ static void caif_ctrl_cb(struct cflayer *layr,
233 switch (flow) { 204 switch (flow) {
234 case CAIF_CTRLCMD_FLOW_ON_IND: 205 case CAIF_CTRLCMD_FLOW_ON_IND:
235 /* OK from modem to start sending again */ 206 /* OK from modem to start sending again */
236 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
237 set_tx_flow_on(cf_sk); 207 set_tx_flow_on(cf_sk);
238 cf_sk->sk.sk_state_change(&cf_sk->sk); 208 cf_sk->sk.sk_state_change(&cf_sk->sk);
239 break; 209 break;
240 210
241 case CAIF_CTRLCMD_FLOW_OFF_IND: 211 case CAIF_CTRLCMD_FLOW_OFF_IND:
242 /* Modem asks us to shut up */ 212 /* Modem asks us to shut up */
243 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
244 set_tx_flow_off(cf_sk); 213 set_tx_flow_off(cf_sk);
245 cf_sk->sk.sk_state_change(&cf_sk->sk); 214 cf_sk->sk.sk_state_change(&cf_sk->sk);
246 break; 215 break;
@@ -249,7 +218,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
249 /* We're now connected */ 218 /* We're now connected */
250 caif_client_register_refcnt(&cf_sk->layer, 219 caif_client_register_refcnt(&cf_sk->layer,
251 cfsk_hold, cfsk_put); 220 cfsk_hold, cfsk_put);
252 dbfs_atomic_inc(&cnt.num_connect_resp);
253 cf_sk->sk.sk_state = CAIF_CONNECTED; 221 cf_sk->sk.sk_state = CAIF_CONNECTED;
254 set_tx_flow_on(cf_sk); 222 set_tx_flow_on(cf_sk);
255 cf_sk->sk.sk_state_change(&cf_sk->sk); 223 cf_sk->sk.sk_state_change(&cf_sk->sk);
@@ -263,7 +231,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
263 231
264 case CAIF_CTRLCMD_INIT_FAIL_RSP: 232 case CAIF_CTRLCMD_INIT_FAIL_RSP:
265 /* Connect request failed */ 233 /* Connect request failed */
266 dbfs_atomic_inc(&cnt.num_connect_fail_resp);
267 cf_sk->sk.sk_err = ECONNREFUSED; 234 cf_sk->sk.sk_err = ECONNREFUSED;
268 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 235 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
269 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; 236 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
@@ -277,7 +244,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
277 244
278 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: 245 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
279 /* Modem has closed this connection, or device is down. */ 246 /* Modem has closed this connection, or device is down. */
280 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
281 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; 247 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
282 cf_sk->sk.sk_err = ECONNRESET; 248 cf_sk->sk.sk_err = ECONNRESET;
283 set_rx_flow_on(cf_sk); 249 set_rx_flow_on(cf_sk);
@@ -297,7 +263,6 @@ static void caif_check_flow_release(struct sock *sk)
297 return; 263 return;
298 264
299 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 265 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
300 dbfs_atomic_inc(&cnt.num_rx_flow_on);
301 set_rx_flow_on(cf_sk); 266 set_rx_flow_on(cf_sk);
302 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); 267 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
303 } 268 }
@@ -538,6 +503,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
538 503
539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 504 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
540 memset(skb->cb, 0, sizeof(struct caif_payload_info)); 505 memset(skb->cb, 0, sizeof(struct caif_payload_info));
506 cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
541 507
542 if (cf_sk->layer.dn == NULL) { 508 if (cf_sk->layer.dn == NULL) {
543 kfree_skb(skb); 509 kfree_skb(skb);
@@ -856,7 +822,6 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
856 /*ifindex = id of the interface.*/ 822 /*ifindex = id of the interface.*/
857 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; 823 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
858 824
859 dbfs_atomic_inc(&cnt.num_connect_req);
860 cf_sk->layer.receive = caif_sktrecv_cb; 825 cf_sk->layer.receive = caif_sktrecv_cb;
861 826
862 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, 827 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
@@ -945,8 +910,6 @@ static int caif_release(struct socket *sock)
945 spin_unlock_bh(&sk->sk_receive_queue.lock); 910 spin_unlock_bh(&sk->sk_receive_queue.lock);
946 sock->sk = NULL; 911 sock->sk = NULL;
947 912
948 dbfs_atomic_inc(&cnt.num_disconnect);
949
950 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); 913 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
951 if (cf_sk->debugfs_socket_dir != NULL) 914 if (cf_sk->debugfs_socket_dir != NULL)
952 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); 915 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
@@ -1054,14 +1017,12 @@ static void caif_sock_destructor(struct sock *sk)
1054 return; 1017 return;
1055 } 1018 }
1056 sk_stream_kill_queues(&cf_sk->sk); 1019 sk_stream_kill_queues(&cf_sk->sk);
1057 dbfs_atomic_dec(&cnt.caif_nr_socks);
1058 caif_free_client(&cf_sk->layer); 1020 caif_free_client(&cf_sk->layer);
1059} 1021}
1060 1022
1061static int caif_create(struct net *net, struct socket *sock, int protocol, 1023static int caif_create(struct net *net, struct socket *sock, int protocol,
1062 int kern) 1024 int kern)
1063{ 1025{
1064 int num;
1065 struct sock *sk = NULL; 1026 struct sock *sk = NULL;
1066 struct caifsock *cf_sk = NULL; 1027 struct caifsock *cf_sk = NULL;
1067 static struct proto prot = {.name = "PF_CAIF", 1028 static struct proto prot = {.name = "PF_CAIF",
@@ -1100,6 +1061,18 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1100 /* Store the protocol */ 1061 /* Store the protocol */
1101 sk->sk_protocol = (unsigned char) protocol; 1062 sk->sk_protocol = (unsigned char) protocol;
1102 1063
1064 /* Initialize default priority for well-known cases */
1065 switch (protocol) {
1066 case CAIFPROTO_AT:
1067 sk->sk_priority = TC_PRIO_CONTROL;
1068 break;
1069 case CAIFPROTO_RFM:
1070 sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1071 break;
1072 default:
1073 sk->sk_priority = TC_PRIO_BESTEFFORT;
1074 }
1075
1103 /* 1076 /*
1104 * Lock in order to try to stop someone from opening the socket 1077 * Lock in order to try to stop someone from opening the socket
1105 * too early. 1078 * too early.
@@ -1119,37 +1092,8 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1119 set_rx_flow_on(cf_sk); 1092 set_rx_flow_on(cf_sk);
1120 1093
1121 /* Set default options on configuration */ 1094 /* Set default options on configuration */
1122 cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
1123 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1095 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1124 cf_sk->conn_req.protocol = protocol; 1096 cf_sk->conn_req.protocol = protocol;
1125 /* Increase the number of sockets created. */
1126 dbfs_atomic_inc(&cnt.caif_nr_socks);
1127 num = dbfs_atomic_inc(&cnt.caif_sock_create);
1128#ifdef CONFIG_DEBUG_FS
1129 if (!IS_ERR(debugfsdir)) {
1130
1131 /* Fill in some information concerning the misc socket. */
1132 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", num);
1133
1134 cf_sk->debugfs_socket_dir =
1135 debugfs_create_dir(cf_sk->name, debugfsdir);
1136
1137 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1138 cf_sk->debugfs_socket_dir,
1139 (u32 *) &cf_sk->sk.sk_state);
1140 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1141 cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1142 debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
1143 cf_sk->debugfs_socket_dir,
1144 (u32 *) &cf_sk->sk.sk_rmem_alloc);
1145 debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
1146 cf_sk->debugfs_socket_dir,
1147 (u32 *) &cf_sk->sk.sk_wmem_alloc);
1148 debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1149 cf_sk->debugfs_socket_dir,
1150 (u32 *) &cf_sk->layer.id);
1151 }
1152#endif
1153 release_sock(&cf_sk->sk); 1097 release_sock(&cf_sk->sk);
1154 return 0; 1098 return 0;
1155} 1099}
@@ -1161,7 +1105,7 @@ static struct net_proto_family caif_family_ops = {
1161 .owner = THIS_MODULE, 1105 .owner = THIS_MODULE,
1162}; 1106};
1163 1107
1164static int af_caif_init(void) 1108static int __init caif_sktinit_module(void)
1165{ 1109{
1166 int err = sock_register(&caif_family_ops); 1110 int err = sock_register(&caif_family_ops);
1167 if (!err) 1111 if (!err)
@@ -1169,54 +1113,9 @@ static int af_caif_init(void)
1169 return 0; 1113 return 0;
1170} 1114}
1171 1115
1172static int __init caif_sktinit_module(void)
1173{
1174#ifdef CONFIG_DEBUG_FS
1175 debugfsdir = debugfs_create_dir("caif_sk", NULL);
1176 if (!IS_ERR(debugfsdir)) {
1177 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1178 debugfsdir,
1179 (u32 *) &cnt.caif_nr_socks);
1180 debugfs_create_u32("num_create", S_IRUSR | S_IWUSR,
1181 debugfsdir,
1182 (u32 *) &cnt.caif_sock_create);
1183 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1184 debugfsdir,
1185 (u32 *) &cnt.num_connect_req);
1186 debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
1187 debugfsdir,
1188 (u32 *) &cnt.num_connect_resp);
1189 debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
1190 debugfsdir,
1191 (u32 *) &cnt.num_connect_fail_resp);
1192 debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
1193 debugfsdir,
1194 (u32 *) &cnt.num_disconnect);
1195 debugfs_create_u32("num_remote_shutdown_ind",
1196 S_IRUSR | S_IWUSR, debugfsdir,
1197 (u32 *) &cnt.num_remote_shutdown_ind);
1198 debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
1199 debugfsdir,
1200 (u32 *) &cnt.num_tx_flow_off_ind);
1201 debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
1202 debugfsdir,
1203 (u32 *) &cnt.num_tx_flow_on_ind);
1204 debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
1205 debugfsdir,
1206 (u32 *) &cnt.num_rx_flow_off);
1207 debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
1208 debugfsdir,
1209 (u32 *) &cnt.num_rx_flow_on);
1210 }
1211#endif
1212 return af_caif_init();
1213}
1214
1215static void __exit caif_sktexit_module(void) 1116static void __exit caif_sktexit_module(void)
1216{ 1117{
1217 sock_unregister(PF_CAIF); 1118 sock_unregister(PF_CAIF);
1218 if (debugfsdir != NULL)
1219 debugfs_remove_recursive(debugfsdir);
1220} 1119}
1221module_init(caif_sktinit_module); 1120module_init(caif_sktinit_module);
1222module_exit(caif_sktexit_module); 1121module_exit(caif_sktexit_module);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 5cf52225692e..047cd0eec022 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -9,6 +9,7 @@
9#include <linux/stddef.h> 9#include <linux/stddef.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/pkt_sched.h>
12#include <net/caif/caif_layer.h> 13#include <net/caif/caif_layer.h>
13#include <net/caif/cfpkt.h> 14#include <net/caif/cfpkt.h>
14#include <net/caif/cfctrl.h> 15#include <net/caif/cfctrl.h>
@@ -189,6 +190,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
189 cfctrl->serv.dev_info.id = physlinkid; 190 cfctrl->serv.dev_info.id = physlinkid;
190 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); 191 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
191 cfpkt_addbdy(pkt, physlinkid); 192 cfpkt_addbdy(pkt, physlinkid);
193 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
192 dn->transmit(dn, pkt); 194 dn->transmit(dn, pkt);
193} 195}
194 196
@@ -281,6 +283,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
281 * might arrive with the newly allocated channel ID. 283 * might arrive with the newly allocated channel ID.
282 */ 284 */
283 cfpkt_info(pkt)->dev_info->id = param->phyid; 285 cfpkt_info(pkt)->dev_info->id = param->phyid;
286 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
284 ret = 287 ret =
285 dn->transmit(dn, pkt); 288 dn->transmit(dn, pkt);
286 if (ret < 0) { 289 if (ret < 0) {
@@ -314,6 +317,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
314 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 317 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
315 cfpkt_addbdy(pkt, channelid); 318 cfpkt_addbdy(pkt, channelid);
316 init_info(cfpkt_info(pkt), cfctrl); 319 init_info(cfpkt_info(pkt), cfctrl);
320 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
317 ret = 321 ret =
318 dn->transmit(dn, pkt); 322 dn->transmit(dn, pkt);
319#ifndef CAIF_NO_LOOP 323#ifndef CAIF_NO_LOOP
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 65d6ef3cf9aa..2914659eb9b2 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -41,8 +41,10 @@ static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
41 struct caif_payload_info *info; 41 struct caif_payload_info *info;
42 int ret; 42 int ret;
43 43
44 if (!cfsrvl_ready(service, &ret)) 44 if (!cfsrvl_ready(service, &ret)) {
45 cfpkt_destroy(pkt);
45 return ret; 46 return ret;
47 }
46 48
47 /* Add info for MUX-layer to route the packet out */ 49 /* Add info for MUX-layer to route the packet out */
48 info = cfpkt_info(pkt); 50 info = cfpkt_info(pkt);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 0f5ff27aa41c..a63f4a5f5aff 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -86,12 +86,17 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
86 struct caif_payload_info *info; 86 struct caif_payload_info *info;
87 struct cfsrvl *service = container_obj(layr); 87 struct cfsrvl *service = container_obj(layr);
88 int ret; 88 int ret;
89 if (!cfsrvl_ready(service, &ret)) 89
90 if (!cfsrvl_ready(service, &ret)) {
91 cfpkt_destroy(pkt);
90 return ret; 92 return ret;
93 }
91 94
92 /* STE Modem cannot handle more than 1500 bytes datagrams */ 95 /* STE Modem cannot handle more than 1500 bytes datagrams */
93 if (cfpkt_getlen(pkt) > DGM_MTU) 96 if (cfpkt_getlen(pkt) > DGM_MTU) {
97 cfpkt_destroy(pkt);
94 return -EMSGSIZE; 98 return -EMSGSIZE;
99 }
95 100
96 cfpkt_add_head(pkt, &zero, 3); 101 cfpkt_add_head(pkt, &zero, 3);
97 packet_type = 0x08; /* B9 set - UNCLASSIFIED */ 102 packet_type = 0x08; /* B9 set - UNCLASSIFIED */
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index e335ba859b97..863dedd91bb6 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -381,6 +381,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
381 memcpy(skb2->data, split, len2nd); 381 memcpy(skb2->data, split, len2nd);
382 skb2->tail += len2nd; 382 skb2->tail += len2nd;
383 skb2->len += len2nd; 383 skb2->len += len2nd;
384 skb2->priority = skb->priority;
384 return skb_to_pkt(skb2); 385 return skb_to_pkt(skb2);
385} 386}
386 387
@@ -394,3 +395,9 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
394 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; 395 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
395} 396}
396EXPORT_SYMBOL(cfpkt_info); 397EXPORT_SYMBOL(cfpkt_info);
398
399void cfpkt_set_prio(struct cfpkt *pkt, int prio)
400{
401 pkt_to_skb(pkt)->priority = prio;
402}
403EXPORT_SYMBOL(cfpkt_set_prio);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 6dc75d4f8d94..2b563ad04597 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -184,6 +184,11 @@ out:
184 rfml->serv.dev_info.id); 184 rfml->serv.dev_info.id);
185 } 185 }
186 spin_unlock(&rfml->sync); 186 spin_unlock(&rfml->sync);
187
188 if (unlikely(err == -EAGAIN))
189 /* It is not possible to recover after drop of a fragment */
190 err = -EIO;
191
187 return err; 192 return err;
188} 193}
189 194
@@ -218,7 +223,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
218 caif_assert(layr->dn->transmit != NULL); 223 caif_assert(layr->dn->transmit != NULL);
219 224
220 if (!cfsrvl_ready(&rfml->serv, &err)) 225 if (!cfsrvl_ready(&rfml->serv, &err))
221 return err; 226 goto out;
222 227
223 err = -EPROTO; 228 err = -EPROTO;
224 if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1) 229 if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
@@ -251,8 +256,11 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
251 256
252 err = cfrfml_transmit_segment(rfml, frontpkt); 257 err = cfrfml_transmit_segment(rfml, frontpkt);
253 258
254 if (err != 0) 259 if (err != 0) {
260 frontpkt = NULL;
255 goto out; 261 goto out;
262 }
263
256 frontpkt = rearpkt; 264 frontpkt = rearpkt;
257 rearpkt = NULL; 265 rearpkt = NULL;
258 266
@@ -286,19 +294,8 @@ out:
286 if (rearpkt) 294 if (rearpkt)
287 cfpkt_destroy(rearpkt); 295 cfpkt_destroy(rearpkt);
288 296
289 if (frontpkt && frontpkt != pkt) { 297 if (frontpkt)
290
291 cfpkt_destroy(frontpkt); 298 cfpkt_destroy(frontpkt);
292 /*
293 * Socket layer will free the original packet,
294 * but this packet may already be sent and
295 * freed. So we have to return 0 in this case
296 * to avoid socket layer to re-free this packet.
297 * The return of shutdown indication will
298 * cause connection to be invalidated anyhow.
299 */
300 err = 0;
301 }
302 } 299 }
303 300
304 return err; 301 return err;
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index b99f5b22689d..dd485f6128e8 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pkt_sched.h>
14#include <net/caif/caif_layer.h> 15#include <net/caif/caif_layer.h>
15#include <net/caif/cfsrvl.h> 16#include <net/caif/cfsrvl.h>
16#include <net/caif/cfpkt.h> 17#include <net/caif/cfpkt.h>
@@ -120,6 +121,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
120 info->channel_id = service->layer.id; 121 info->channel_id = service->layer.id;
121 info->hdr_len = 1; 122 info->hdr_len = 1;
122 info->dev_info = &service->dev_info; 123 info->dev_info = &service->dev_info;
124 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
123 return layr->dn->transmit(layr->dn, pkt); 125 return layr->dn->transmit(layr->dn, pkt);
124 } 126 }
125 case CAIF_MODEMCMD_FLOW_OFF_REQ: 127 case CAIF_MODEMCMD_FLOW_OFF_REQ:
@@ -140,6 +142,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
140 info->channel_id = service->layer.id; 142 info->channel_id = service->layer.id;
141 info->hdr_len = 1; 143 info->hdr_len = 1;
142 info->dev_info = &service->dev_info; 144 info->dev_info = &service->dev_info;
145 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
143 return layr->dn->transmit(layr->dn, pkt); 146 return layr->dn->transmit(layr->dn, pkt);
144 } 147 }
145 default: 148 default:
@@ -174,15 +177,11 @@ void cfsrvl_init(struct cfsrvl *service,
174 177
175bool cfsrvl_ready(struct cfsrvl *service, int *err) 178bool cfsrvl_ready(struct cfsrvl *service, int *err)
176{ 179{
177 if (service->open && service->modem_flow_on && service->phy_flow_on)
178 return true;
179 if (!service->open) { 180 if (!service->open) {
180 *err = -ENOTCONN; 181 *err = -ENOTCONN;
181 return false; 182 return false;
182 } 183 }
183 caif_assert(!(service->modem_flow_on && service->phy_flow_on)); 184 return true;
184 *err = -EAGAIN;
185 return false;
186} 185}
187 186
188u8 cfsrvl_getphyid(struct cflayer *layer) 187u8 cfsrvl_getphyid(struct cflayer *layer)
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 53e49f3e3af3..86d2dadb4b73 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -84,8 +84,11 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
84 caif_assert(layr != NULL); 84 caif_assert(layr != NULL);
85 caif_assert(layr->dn != NULL); 85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL); 86 caif_assert(layr->dn->transmit != NULL);
87 if (!cfsrvl_ready(service, &ret)) 87
88 if (!cfsrvl_ready(service, &ret)) {
89 cfpkt_destroy(pkt);
88 return ret; 90 return ret;
91 }
89 92
90 cfpkt_add_head(pkt, &zero, 1); 93 cfpkt_add_head(pkt, &zero, 1);
91 /* Add info for MUX-layer to route the packet out. */ 94 /* Add info for MUX-layer to route the packet out. */
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index e3f37db40ac3..a8e2a2d758a5 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -50,8 +50,12 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
50 struct caif_payload_info *info; 50 struct caif_payload_info *info;
51 u32 videoheader = 0; 51 u32 videoheader = 0;
52 int ret; 52 int ret;
53 if (!cfsrvl_ready(service, &ret)) 53
54 if (!cfsrvl_ready(service, &ret)) {
55 cfpkt_destroy(pkt);
54 return ret; 56 return ret;
57 }
58
55 cfpkt_add_head(pkt, &videoheader, 4); 59 cfpkt_add_head(pkt, &videoheader, 4);
56 /* Add info for MUX-layer to route the packet out */ 60 /* Add info for MUX-layer to route the packet out */
57 info = cfpkt_info(pkt); 61 info = cfpkt_info(pkt);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 865690948bbc..69771c04ba8f 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -28,6 +28,7 @@
28/* 5 sec. connect timeout */ 28/* 5 sec. connect timeout */
29#define CONNECT_TIMEOUT (5 * HZ) 29#define CONNECT_TIMEOUT (5 * HZ)
30#define CAIF_NET_DEFAULT_QUEUE_LEN 500 30#define CAIF_NET_DEFAULT_QUEUE_LEN 500
31#define UNDEF_CONNID 0xffffffff
31 32
32/*This list is protected by the rtnl lock. */ 33/*This list is protected by the rtnl lock. */
33static LIST_HEAD(chnl_net_list); 34static LIST_HEAD(chnl_net_list);
@@ -72,14 +73,12 @@ static void robust_list_del(struct list_head *delete_node)
72static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) 73static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
73{ 74{
74 struct sk_buff *skb; 75 struct sk_buff *skb;
75 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 76 struct chnl_net *priv;
76 int pktlen; 77 int pktlen;
77 int err = 0;
78 const u8 *ip_version; 78 const u8 *ip_version;
79 u8 buf; 79 u8 buf;
80 80
81 priv = container_of(layr, struct chnl_net, chnl); 81 priv = container_of(layr, struct chnl_net, chnl);
82
83 if (!priv) 82 if (!priv)
84 return -EINVAL; 83 return -EINVAL;
85 84
@@ -95,8 +94,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
95 94
96 /* check the version of IP */ 95 /* check the version of IP */
97 ip_version = skb_header_pointer(skb, 0, 1, &buf); 96 ip_version = skb_header_pointer(skb, 0, 1, &buf);
98 if (!ip_version) 97
99 return -EINVAL;
100 switch (*ip_version >> 4) { 98 switch (*ip_version >> 4) {
101 case 4: 99 case 4:
102 skb->protocol = htons(ETH_P_IP); 100 skb->protocol = htons(ETH_P_IP);
@@ -105,6 +103,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
105 skb->protocol = htons(ETH_P_IPV6); 103 skb->protocol = htons(ETH_P_IPV6);
106 break; 104 break;
107 default: 105 default:
106 kfree_skb(skb);
107 priv->netdev->stats.rx_errors++;
108 return -EINVAL; 108 return -EINVAL;
109 } 109 }
110 110
@@ -123,7 +123,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
123 priv->netdev->stats.rx_packets++; 123 priv->netdev->stats.rx_packets++;
124 priv->netdev->stats.rx_bytes += pktlen; 124 priv->netdev->stats.rx_bytes += pktlen;
125 125
126 return err; 126 return 0;
127} 127}
128 128
129static int delete_device(struct chnl_net *dev) 129static int delete_device(struct chnl_net *dev)
@@ -221,12 +221,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
221 221
222 if (skb->len > priv->netdev->mtu) { 222 if (skb->len > priv->netdev->mtu) {
223 pr_warn("Size of skb exceeded MTU\n"); 223 pr_warn("Size of skb exceeded MTU\n");
224 return -ENOSPC; 224 kfree_skb(skb);
225 dev->stats.tx_errors++;
226 return NETDEV_TX_OK;
225 } 227 }
226 228
227 if (!priv->flowenabled) { 229 if (!priv->flowenabled) {
228 pr_debug("dropping packets flow off\n"); 230 pr_debug("dropping packets flow off\n");
229 return NETDEV_TX_BUSY; 231 kfree_skb(skb);
232 dev->stats.tx_dropped++;
233 return NETDEV_TX_OK;
230 } 234 }
231 235
232 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) 236 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
@@ -240,9 +244,8 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
240 /* Send the packet down the stack. */ 244 /* Send the packet down the stack. */
241 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); 245 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
242 if (result) { 246 if (result) {
243 if (result == -EAGAIN) 247 dev->stats.tx_dropped++;
244 result = NETDEV_TX_BUSY; 248 return NETDEV_TX_OK;
245 return result;
246 } 249 }
247 250
248 /* Update statistics. */ 251 /* Update statistics. */
@@ -409,7 +412,7 @@ static void ipcaif_net_setup(struct net_device *dev)
409 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; 412 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
410 priv->conn_req.priority = CAIF_PRIO_LOW; 413 priv->conn_req.priority = CAIF_PRIO_LOW;
411 /* Insert illegal value */ 414 /* Insert illegal value */
412 priv->conn_req.sockaddr.u.dgm.connection_id = 0; 415 priv->conn_req.sockaddr.u.dgm.connection_id = UNDEF_CONNID;
413 priv->flowenabled = false; 416 priv->flowenabled = false;
414 417
415 init_waitqueue_head(&priv->netmgmt_wq); 418 init_waitqueue_head(&priv->netmgmt_wq);
@@ -421,14 +424,14 @@ static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
421 struct chnl_net *priv; 424 struct chnl_net *priv;
422 u8 loop; 425 u8 loop;
423 priv = netdev_priv(dev); 426 priv = netdev_priv(dev);
424 NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID, 427 if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID,
425 priv->conn_req.sockaddr.u.dgm.connection_id); 428 priv->conn_req.sockaddr.u.dgm.connection_id) ||
426 NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID, 429 nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID,
427 priv->conn_req.sockaddr.u.dgm.connection_id); 430 priv->conn_req.sockaddr.u.dgm.connection_id))
431 goto nla_put_failure;
428 loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP; 432 loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
429 NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop); 433 if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop))
430 434 goto nla_put_failure;
431
432 return 0; 435 return 0;
433nla_put_failure: 436nla_put_failure:
434 return -EMSGSIZE; 437 return -EMSGSIZE;
@@ -472,9 +475,11 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
472 else 475 else
473 list_add(&caifdev->list_field, &chnl_net_list); 476 list_add(&caifdev->list_field, &chnl_net_list);
474 477
475 /* Take ifindex as connection-id if null */ 478 /* Use ifindex as connection id, and use loopback channel default. */
476 if (caifdev->conn_req.sockaddr.u.dgm.connection_id == 0) 479 if (caifdev->conn_req.sockaddr.u.dgm.connection_id == UNDEF_CONNID) {
477 caifdev->conn_req.sockaddr.u.dgm.connection_id = dev->ifindex; 480 caifdev->conn_req.sockaddr.u.dgm.connection_id = dev->ifindex;
481 caifdev->conn_req.protocol = CAIFPROTO_DATAGRAM_LOOP;
482 }
478 return ret; 483 return ret;
479} 484}
480 485
diff --git a/net/can/gw.c b/net/can/gw.c
index 3d79b127881e..b41acf25668f 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -66,7 +66,7 @@ MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 66MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
67MODULE_ALIAS("can-gw"); 67MODULE_ALIAS("can-gw");
68 68
69HLIST_HEAD(cgw_list); 69static HLIST_HEAD(cgw_list);
70static struct notifier_block notifier; 70static struct notifier_block notifier;
71 71
72static struct kmem_cache *cgw_cache __read_mostly; 72static struct kmem_cache *cgw_cache __read_mostly;
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index e02da7a5c5a1..f459e93b774f 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -13,7 +13,7 @@
13 */ 13 */
14struct ceph_x_ticket_handler { 14struct ceph_x_ticket_handler {
15 struct rb_node node; 15 struct rb_node node;
16 unsigned service; 16 unsigned int service;
17 17
18 struct ceph_crypto_key session_key; 18 struct ceph_crypto_key session_key;
19 struct ceph_timespec validity; 19 struct ceph_timespec validity;
@@ -27,7 +27,7 @@ struct ceph_x_ticket_handler {
27 27
28struct ceph_x_authorizer { 28struct ceph_x_authorizer {
29 struct ceph_buffer *buf; 29 struct ceph_buffer *buf;
30 unsigned service; 30 unsigned int service;
31 u64 nonce; 31 u64 nonce;
32 char reply_buf[128]; /* big enough for encrypted blob */ 32 char reply_buf[128]; /* big enough for encrypted blob */
33}; 33};
@@ -38,7 +38,7 @@ struct ceph_x_info {
38 bool starting; 38 bool starting;
39 u64 server_challenge; 39 u64 server_challenge;
40 40
41 unsigned have_keys; 41 unsigned int have_keys;
42 struct rb_root ticket_handlers; 42 struct rb_root ticket_handlers;
43 43
44 struct ceph_x_authorizer auth_authorizer; 44 struct ceph_x_authorizer auth_authorizer;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index c815f31a1a3f..58b09efb528d 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -441,8 +441,8 @@ EXPORT_SYMBOL(ceph_client_id);
441 * create a fresh client instance 441 * create a fresh client instance
442 */ 442 */
443struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, 443struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
444 unsigned supported_features, 444 unsigned int supported_features,
445 unsigned required_features) 445 unsigned int required_features)
446{ 446{
447 struct ceph_client *client; 447 struct ceph_client *client;
448 struct ceph_entity_addr *myaddr = NULL; 448 struct ceph_entity_addr *myaddr = NULL;
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 0a1b53bce76d..67bb1f11e613 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -20,7 +20,7 @@
20 c = c - a; c = c - b; c = c ^ (b >> 15); \ 20 c = c - a; c = c - b; c = c ^ (b >> 15); \
21 } while (0) 21 } while (0)
22 22
23unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) 23unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
24{ 24{
25 const unsigned char *k = (const unsigned char *)str; 25 const unsigned char *k = (const unsigned char *)str;
26 __u32 a, b, c; /* the internal state */ 26 __u32 a, b, c; /* the internal state */
@@ -81,7 +81,7 @@ unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
81/* 81/*
82 * linux dcache hash 82 * linux dcache hash
83 */ 83 */
84unsigned ceph_str_hash_linux(const char *str, unsigned length) 84unsigned int ceph_str_hash_linux(const char *str, unsigned int length)
85{ 85{
86 unsigned long hash = 0; 86 unsigned long hash = 0;
87 unsigned char c; 87 unsigned char c;
@@ -94,7 +94,7 @@ unsigned ceph_str_hash_linux(const char *str, unsigned length)
94} 94}
95 95
96 96
97unsigned ceph_str_hash(int type, const char *s, unsigned len) 97unsigned int ceph_str_hash(int type, const char *s, unsigned int len)
98{ 98{
99 switch (type) { 99 switch (type) {
100 case CEPH_STR_HASH_LINUX: 100 case CEPH_STR_HASH_LINUX:
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 11cf352201ba..d7edc24333b8 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/crush/crush.h> 21#include <linux/crush/crush.h>
22#include <linux/crush/hash.h> 22#include <linux/crush/hash.h>
23#include <linux/crush/mapper.h>
23 24
24/* 25/*
25 * Implement the core CRUSH mapping algorithm. 26 * Implement the core CRUSH mapping algorithm.
@@ -68,8 +69,8 @@ int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size
68static int bucket_perm_choose(struct crush_bucket *bucket, 69static int bucket_perm_choose(struct crush_bucket *bucket,
69 int x, int r) 70 int x, int r)
70{ 71{
71 unsigned pr = r % bucket->size; 72 unsigned int pr = r % bucket->size;
72 unsigned i, s; 73 unsigned int i, s;
73 74
74 /* start a new permutation if @x has changed */ 75 /* start a new permutation if @x has changed */
75 if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) { 76 if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
@@ -100,13 +101,13 @@ static int bucket_perm_choose(struct crush_bucket *bucket,
100 for (i = 0; i < bucket->perm_n; i++) 101 for (i = 0; i < bucket->perm_n; i++)
101 dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]); 102 dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
102 while (bucket->perm_n <= pr) { 103 while (bucket->perm_n <= pr) {
103 unsigned p = bucket->perm_n; 104 unsigned int p = bucket->perm_n;
104 /* no point in swapping the final entry */ 105 /* no point in swapping the final entry */
105 if (p < bucket->size - 1) { 106 if (p < bucket->size - 1) {
106 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % 107 i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
107 (bucket->size - p); 108 (bucket->size - p);
108 if (i) { 109 if (i) {
109 unsigned t = bucket->perm[p + i]; 110 unsigned int t = bucket->perm[p + i];
110 bucket->perm[p + i] = bucket->perm[p]; 111 bucket->perm[p + i] = bucket->perm[p];
111 bucket->perm[p] = t; 112 bucket->perm[p] = t;
112 } 113 }
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 27d4ea315d12..54b531a01121 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -94,9 +94,9 @@ static int monc_show(struct seq_file *s, void *p)
94 mutex_lock(&monc->mutex); 94 mutex_lock(&monc->mutex);
95 95
96 if (monc->have_mdsmap) 96 if (monc->have_mdsmap)
97 seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); 97 seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap);
98 if (monc->have_osdmap) 98 if (monc->have_osdmap)
99 seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); 99 seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap);
100 if (monc->want_next_osdmap) 100 if (monc->want_next_osdmap)
101 seq_printf(s, "want next osdmap\n"); 101 seq_printf(s, "want next osdmap\n");
102 102
@@ -146,7 +146,7 @@ static int osdc_show(struct seq_file *s, void *pp)
146 146
147 if (req->r_reassert_version.epoch) 147 if (req->r_reassert_version.epoch)
148 seq_printf(s, "\t%u'%llu", 148 seq_printf(s, "\t%u'%llu",
149 (unsigned)le32_to_cpu(req->r_reassert_version.epoch), 149 (unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
150 le64_to_cpu(req->r_reassert_version.version)); 150 le64_to_cpu(req->r_reassert_version.version));
151 else 151 else
152 seq_printf(s, "\t"); 152 seq_printf(s, "\t");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index bdbecac2d69d..5e9f61d6d234 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -789,7 +789,7 @@ static void prepare_write_banner(struct ceph_connection *con)
789 789
790static int prepare_write_connect(struct ceph_connection *con) 790static int prepare_write_connect(struct ceph_connection *con)
791{ 791{
792 unsigned global_seq = get_global_seq(con->msgr, 0); 792 unsigned int global_seq = get_global_seq(con->msgr, 0);
793 int proto; 793 int proto;
794 int auth_proto; 794 int auth_proto;
795 struct ceph_auth_handshake *auth; 795 struct ceph_auth_handshake *auth;
@@ -917,7 +917,7 @@ static void iter_bio_next(struct bio **bio_iter, int *seg)
917static int write_partial_msg_pages(struct ceph_connection *con) 917static int write_partial_msg_pages(struct ceph_connection *con)
918{ 918{
919 struct ceph_msg *msg = con->out_msg; 919 struct ceph_msg *msg = con->out_msg;
920 unsigned data_len = le32_to_cpu(msg->hdr.data_len); 920 unsigned int data_len = le32_to_cpu(msg->hdr.data_len);
921 size_t len; 921 size_t len;
922 bool do_datacrc = !con->msgr->nocrc; 922 bool do_datacrc = !con->msgr->nocrc;
923 int ret; 923 int ret;
@@ -1673,7 +1673,7 @@ static bool ceph_con_in_msg_alloc(struct ceph_connection *con,
1673 1673
1674static int read_partial_message_pages(struct ceph_connection *con, 1674static int read_partial_message_pages(struct ceph_connection *con,
1675 struct page **pages, 1675 struct page **pages,
1676 unsigned data_len, bool do_datacrc) 1676 unsigned int data_len, bool do_datacrc)
1677{ 1677{
1678 void *p; 1678 void *p;
1679 int ret; 1679 int ret;
@@ -1706,7 +1706,7 @@ static int read_partial_message_pages(struct ceph_connection *con,
1706#ifdef CONFIG_BLOCK 1706#ifdef CONFIG_BLOCK
1707static int read_partial_message_bio(struct ceph_connection *con, 1707static int read_partial_message_bio(struct ceph_connection *con,
1708 struct bio **bio_iter, int *bio_seg, 1708 struct bio **bio_iter, int *bio_seg,
1709 unsigned data_len, bool do_datacrc) 1709 unsigned int data_len, bool do_datacrc)
1710{ 1710{
1711 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); 1711 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1712 void *p; 1712 void *p;
@@ -1749,7 +1749,7 @@ static int read_partial_message(struct ceph_connection *con)
1749 int size; 1749 int size;
1750 int end; 1750 int end;
1751 int ret; 1751 int ret;
1752 unsigned front_len, middle_len, data_len; 1752 unsigned int front_len, middle_len, data_len;
1753 bool do_datacrc = !con->msgr->nocrc; 1753 bool do_datacrc = !con->msgr->nocrc;
1754 u64 seq; 1754 u64 seq;
1755 u32 crc; 1755 u32 crc;
@@ -2473,9 +2473,9 @@ void ceph_msg_revoke_incoming(struct ceph_msg *msg)
2473 con = msg->con; 2473 con = msg->con;
2474 mutex_lock(&con->mutex); 2474 mutex_lock(&con->mutex);
2475 if (con->in_msg == msg) { 2475 if (con->in_msg == msg) {
2476 unsigned front_len = le32_to_cpu(con->in_hdr.front_len); 2476 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
2477 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len); 2477 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
2478 unsigned data_len = le32_to_cpu(con->in_hdr.data_len); 2478 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
2479 2479
2480 /* skip rest of message */ 2480 /* skip rest of message */
2481 dout("%s %p msg %p revoked\n", __func__, con, msg); 2481 dout("%s %p msg %p revoked\n", __func__, con, msg);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index dc16595d6885..e9db3de20b2e 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -171,7 +171,7 @@ static bool __sub_expired(struct ceph_mon_client *monc)
171 */ 171 */
172static void __schedule_delayed(struct ceph_mon_client *monc) 172static void __schedule_delayed(struct ceph_mon_client *monc)
173{ 173{
174 unsigned delay; 174 unsigned int delay;
175 175
176 if (monc->cur_mon < 0 || __sub_expired(monc)) 176 if (monc->cur_mon < 0 || __sub_expired(monc))
177 delay = 10 * HZ; 177 delay = 10 * HZ;
@@ -187,7 +187,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc)
187static void __send_subscribe(struct ceph_mon_client *monc) 187static void __send_subscribe(struct ceph_mon_client *monc)
188{ 188{
189 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", 189 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
190 (unsigned)monc->sub_sent, __sub_expired(monc), 190 (unsigned int)monc->sub_sent, __sub_expired(monc),
191 monc->want_next_osdmap); 191 monc->want_next_osdmap);
192 if ((__sub_expired(monc) && !monc->sub_sent) || 192 if ((__sub_expired(monc) && !monc->sub_sent) ||
193 monc->want_next_osdmap == 1) { 193 monc->want_next_osdmap == 1) {
@@ -204,7 +204,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
204 204
205 if (monc->want_next_osdmap) { 205 if (monc->want_next_osdmap) {
206 dout("__send_subscribe to 'osdmap' %u\n", 206 dout("__send_subscribe to 'osdmap' %u\n",
207 (unsigned)monc->have_osdmap); 207 (unsigned int)monc->have_osdmap);
208 ceph_encode_string(&p, end, "osdmap", 6); 208 ceph_encode_string(&p, end, "osdmap", 6);
209 i = p; 209 i = p;
210 i->have = cpu_to_le64(monc->have_osdmap); 210 i->have = cpu_to_le64(monc->have_osdmap);
@@ -214,7 +214,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
214 } 214 }
215 if (monc->want_mdsmap) { 215 if (monc->want_mdsmap) {
216 dout("__send_subscribe to 'mdsmap' %u+\n", 216 dout("__send_subscribe to 'mdsmap' %u+\n",
217 (unsigned)monc->have_mdsmap); 217 (unsigned int)monc->have_mdsmap);
218 ceph_encode_string(&p, end, "mdsmap", 6); 218 ceph_encode_string(&p, end, "mdsmap", 6);
219 i = p; 219 i = p;
220 i->have = cpu_to_le64(monc->have_mdsmap); 220 i->have = cpu_to_le64(monc->have_mdsmap);
@@ -239,7 +239,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
239static void handle_subscribe_ack(struct ceph_mon_client *monc, 239static void handle_subscribe_ack(struct ceph_mon_client *monc,
240 struct ceph_msg *msg) 240 struct ceph_msg *msg)
241{ 241{
242 unsigned seconds; 242 unsigned int seconds;
243 struct ceph_mon_subscribe_ack *h = msg->front.iov_base; 243 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
244 244
245 if (msg->front.iov_len < sizeof(*h)) 245 if (msg->front.iov_len < sizeof(*h))
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index c178c770acb4..db2da54f7336 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1216,7 +1216,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1216 } 1216 }
1217 1217
1218 if (!req->r_got_reply) { 1218 if (!req->r_got_reply) {
1219 unsigned bytes; 1219 unsigned int bytes;
1220 1220
1221 req->r_result = le32_to_cpu(rhead->result); 1221 req->r_result = le32_to_cpu(rhead->result);
1222 bytes = le32_to_cpu(msg->hdr.data_len); 1222 bytes = le32_to_cpu(msg->hdr.data_len);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d3de09f519b2..9600674c2c39 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -38,7 +38,7 @@ done:
38 38
39/* maps */ 39/* maps */
40 40
41static int calc_bits_of(unsigned t) 41static int calc_bits_of(unsigned int t)
42{ 42{
43 int b = 0; 43 int b = 0;
44 while (t) { 44 while (t) {
@@ -154,7 +154,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
154 magic = ceph_decode_32(p); 154 magic = ceph_decode_32(p);
155 if (magic != CRUSH_MAGIC) { 155 if (magic != CRUSH_MAGIC) {
156 pr_err("crush_decode magic %x != current %x\n", 156 pr_err("crush_decode magic %x != current %x\n",
157 (unsigned)magic, (unsigned)CRUSH_MAGIC); 157 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
158 goto bad; 158 goto bad;
159 } 159 }
160 c->max_buckets = ceph_decode_32(p); 160 c->max_buckets = ceph_decode_32(p);
@@ -453,7 +453,7 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
453 453
454static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 454static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
455{ 455{
456 unsigned n, m; 456 unsigned int n, m;
457 457
458 ceph_decode_copy(p, &pi->v, sizeof(pi->v)); 458 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
459 calc_pg_masks(pi); 459 calc_pg_masks(pi);
@@ -975,7 +975,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
975 objsetno = stripeno / su_per_object; 975 objsetno = stripeno / su_per_object;
976 976
977 *ono = objsetno * sc + stripepos; 977 *ono = objsetno * sc + stripepos;
978 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono); 978 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
979 979
980 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 980 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
981 t = off; 981 t = off;
@@ -1003,11 +1003,11 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
1003 struct ceph_file_layout *fl, 1003 struct ceph_file_layout *fl,
1004 struct ceph_osdmap *osdmap) 1004 struct ceph_osdmap *osdmap)
1005{ 1005{
1006 unsigned num, num_mask; 1006 unsigned int num, num_mask;
1007 struct ceph_pg pgid; 1007 struct ceph_pg pgid;
1008 int poolid = le32_to_cpu(fl->fl_pg_pool); 1008 int poolid = le32_to_cpu(fl->fl_pg_pool);
1009 struct ceph_pg_pool_info *pool; 1009 struct ceph_pg_pool_info *pool;
1010 unsigned ps; 1010 unsigned int ps;
1011 1011
1012 BUG_ON(!osdmap); 1012 BUG_ON(!osdmap);
1013 1013
@@ -1039,7 +1039,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1039 struct ceph_pg_mapping *pg; 1039 struct ceph_pg_mapping *pg;
1040 struct ceph_pg_pool_info *pool; 1040 struct ceph_pg_pool_info *pool;
1041 int ruleno; 1041 int ruleno;
1042 unsigned poolid, ps, pps, t, r; 1042 unsigned int poolid, ps, pps, t, r;
1043 1043
1044 poolid = le32_to_cpu(pgid.pool); 1044 poolid = le32_to_cpu(pgid.pool);
1045 ps = le16_to_cpu(pgid.ps); 1045 ps = le16_to_cpu(pgid.ps);
diff --git a/net/compat.c b/net/compat.c
index 6def90e0a112..1b96281892de 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -79,7 +79,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
79 79
80/* I've named the args so it is easy to tell whose space the pointers are in. */ 80/* I've named the args so it is easy to tell whose space the pointers are in. */
81int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, 81int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
82 struct sockaddr *kern_address, int mode) 82 struct sockaddr_storage *kern_address, int mode)
83{ 83{
84 int tot_len; 84 int tot_len;
85 85
@@ -219,8 +219,6 @@ Efault:
219 219
220int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data) 220int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
221{ 221{
222 struct compat_timeval ctv;
223 struct compat_timespec cts[3];
224 struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; 222 struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
225 struct compat_cmsghdr cmhdr; 223 struct compat_cmsghdr cmhdr;
226 int cmlen; 224 int cmlen;
@@ -230,24 +228,28 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
230 return 0; /* XXX: return error? check spec. */ 228 return 0; /* XXX: return error? check spec. */
231 } 229 }
232 230
233 if (level == SOL_SOCKET && type == SCM_TIMESTAMP) { 231 if (!COMPAT_USE_64BIT_TIME) {
234 struct timeval *tv = (struct timeval *)data; 232 struct compat_timeval ctv;
235 ctv.tv_sec = tv->tv_sec; 233 struct compat_timespec cts[3];
236 ctv.tv_usec = tv->tv_usec; 234 if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
237 data = &ctv; 235 struct timeval *tv = (struct timeval *)data;
238 len = sizeof(ctv); 236 ctv.tv_sec = tv->tv_sec;
239 } 237 ctv.tv_usec = tv->tv_usec;
240 if (level == SOL_SOCKET && 238 data = &ctv;
241 (type == SCM_TIMESTAMPNS || type == SCM_TIMESTAMPING)) { 239 len = sizeof(ctv);
242 int count = type == SCM_TIMESTAMPNS ? 1 : 3; 240 }
243 int i; 241 if (level == SOL_SOCKET &&
244 struct timespec *ts = (struct timespec *)data; 242 (type == SCM_TIMESTAMPNS || type == SCM_TIMESTAMPING)) {
245 for (i = 0; i < count; i++) { 243 int count = type == SCM_TIMESTAMPNS ? 1 : 3;
246 cts[i].tv_sec = ts[i].tv_sec; 244 int i;
247 cts[i].tv_nsec = ts[i].tv_nsec; 245 struct timespec *ts = (struct timespec *)data;
246 for (i = 0; i < count; i++) {
247 cts[i].tv_sec = ts[i].tv_sec;
248 cts[i].tv_nsec = ts[i].tv_nsec;
249 }
250 data = &cts;
251 len = sizeof(cts[0]) * count;
248 } 252 }
249 data = &cts;
250 len = sizeof(cts[0]) * count;
251 } 253 }
252 254
253 cmlen = CMSG_COMPAT_LEN(len); 255 cmlen = CMSG_COMPAT_LEN(len);
@@ -326,14 +328,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
326 __scm_destroy(scm); 328 __scm_destroy(scm);
327} 329}
328 330
329/*
330 * A struct sock_filter is architecture independent.
331 */
332struct compat_sock_fprog {
333 u16 len;
334 compat_uptr_t filter; /* struct sock_filter * */
335};
336
337static int do_set_attach_filter(struct socket *sock, int level, int optname, 331static int do_set_attach_filter(struct socket *sock, int level, int optname,
338 char __user *optval, unsigned int optlen) 332 char __user *optval, unsigned int optlen)
339{ 333{
@@ -454,11 +448,15 @@ static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
454 448
455int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 449int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
456{ 450{
457 struct compat_timeval __user *ctv = 451 struct compat_timeval __user *ctv;
458 (struct compat_timeval __user *) userstamp; 452 int err;
459 int err = -ENOENT;
460 struct timeval tv; 453 struct timeval tv;
461 454
455 if (COMPAT_USE_64BIT_TIME)
456 return sock_get_timestamp(sk, userstamp);
457
458 ctv = (struct compat_timeval __user *) userstamp;
459 err = -ENOENT;
462 if (!sock_flag(sk, SOCK_TIMESTAMP)) 460 if (!sock_flag(sk, SOCK_TIMESTAMP))
463 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 461 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
464 tv = ktime_to_timeval(sk->sk_stamp); 462 tv = ktime_to_timeval(sk->sk_stamp);
@@ -478,11 +476,15 @@ EXPORT_SYMBOL(compat_sock_get_timestamp);
478 476
479int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 477int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
480{ 478{
481 struct compat_timespec __user *ctv = 479 struct compat_timespec __user *ctv;
482 (struct compat_timespec __user *) userstamp; 480 int err;
483 int err = -ENOENT;
484 struct timespec ts; 481 struct timespec ts;
485 482
483 if (COMPAT_USE_64BIT_TIME)
484 return sock_get_timestampns (sk, userstamp);
485
486 ctv = (struct compat_timespec __user *) userstamp;
487 err = -ENOENT;
486 if (!sock_flag(sk, SOCK_TIMESTAMP)) 488 if (!sock_flag(sk, SOCK_TIMESTAMP))
487 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 489 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
488 ts = ktime_to_timespec(sk->sk_stamp); 490 ts = ktime_to_timespec(sk->sk_stamp);
@@ -731,13 +733,13 @@ static unsigned char nas[21] = {
731}; 733};
732#undef AL 734#undef AL
733 735
734asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) 736asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
735{ 737{
736 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 738 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
737} 739}
738 740
739asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, 741asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
740 unsigned vlen, unsigned int flags) 742 unsigned int vlen, unsigned int flags)
741{ 743{
742 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 744 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
743 flags | MSG_CMSG_COMPAT); 745 flags | MSG_CMSG_COMPAT);
@@ -748,25 +750,30 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns
748 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 750 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
749} 751}
750 752
751asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags) 753asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
752{ 754{
753 return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT); 755 return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
754} 756}
755 757
756asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, 758asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
757 unsigned flags, struct sockaddr __user *addr, 759 unsigned int flags, struct sockaddr __user *addr,
758 int __user *addrlen) 760 int __user *addrlen)
759{ 761{
760 return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); 762 return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
761} 763}
762 764
763asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, 765asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
764 unsigned vlen, unsigned int flags, 766 unsigned int vlen, unsigned int flags,
765 struct compat_timespec __user *timeout) 767 struct compat_timespec __user *timeout)
766{ 768{
767 int datagrams; 769 int datagrams;
768 struct timespec ktspec; 770 struct timespec ktspec;
769 771
772 if (COMPAT_USE_64BIT_TIME)
773 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
774 flags | MSG_CMSG_COMPAT,
775 (struct timespec *) timeout);
776
770 if (timeout == NULL) 777 if (timeout == NULL)
771 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 778 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
772 flags | MSG_CMSG_COMPAT, NULL); 779 flags | MSG_CMSG_COMPAT, NULL);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 68bbf9f65cb0..ae6acf6a3dea 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -37,7 +37,6 @@
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/system.h>
41#include <linux/mm.h> 40#include <linux/mm.h>
42#include <linux/interrupt.h> 41#include <linux/interrupt.h>
43#include <linux/errno.h> 42#include <linux/errno.h>
@@ -66,7 +65,7 @@ static inline int connection_based(struct sock *sk)
66 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; 65 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
67} 66}
68 67
69static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, 68static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
70 void *key) 69 void *key)
71{ 70{
72 unsigned long bits = (unsigned long)key; 71 unsigned long bits = (unsigned long)key;
@@ -132,6 +131,8 @@ out_noerr:
132 * __skb_recv_datagram - Receive a datagram skbuff 131 * __skb_recv_datagram - Receive a datagram skbuff
133 * @sk: socket 132 * @sk: socket
134 * @flags: MSG_ flags 133 * @flags: MSG_ flags
134 * @off: an offset in bytes to peek skb from. Returns an offset
135 * within an skb where data actually starts
135 * @peeked: returns non-zero if this packet has been seen before 136 * @peeked: returns non-zero if this packet has been seen before
136 * @err: error code returned 137 * @err: error code returned
137 * 138 *
@@ -157,8 +158,8 @@ out_noerr:
157 * quite explicitly by POSIX 1003.1g, don't change them without having 158 * quite explicitly by POSIX 1003.1g, don't change them without having
158 * the standard around please. 159 * the standard around please.
159 */ 160 */
160struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 161struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
161 int *peeked, int *err) 162 int *peeked, int *off, int *err)
162{ 163{
163 struct sk_buff *skb; 164 struct sk_buff *skb;
164 long timeo; 165 long timeo;
@@ -180,21 +181,25 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
180 * However, this function was correct in any case. 8) 181 * However, this function was correct in any case. 8)
181 */ 182 */
182 unsigned long cpu_flags; 183 unsigned long cpu_flags;
184 struct sk_buff_head *queue = &sk->sk_receive_queue;
183 185
184 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 186 spin_lock_irqsave(&queue->lock, cpu_flags);
185 skb = skb_peek(&sk->sk_receive_queue); 187 skb_queue_walk(queue, skb) {
186 if (skb) {
187 *peeked = skb->peeked; 188 *peeked = skb->peeked;
188 if (flags & MSG_PEEK) { 189 if (flags & MSG_PEEK) {
190 if (*off >= skb->len) {
191 *off -= skb->len;
192 continue;
193 }
189 skb->peeked = 1; 194 skb->peeked = 1;
190 atomic_inc(&skb->users); 195 atomic_inc(&skb->users);
191 } else 196 } else
192 __skb_unlink(skb, &sk->sk_receive_queue); 197 __skb_unlink(skb, queue);
193 }
194 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
195 198
196 if (skb) 199 spin_unlock_irqrestore(&queue->lock, cpu_flags);
197 return skb; 200 return skb;
201 }
202 spin_unlock_irqrestore(&queue->lock, cpu_flags);
198 203
199 /* User doesn't want to wait */ 204 /* User doesn't want to wait */
200 error = -EAGAIN; 205 error = -EAGAIN;
@@ -211,13 +216,13 @@ no_packet:
211} 216}
212EXPORT_SYMBOL(__skb_recv_datagram); 217EXPORT_SYMBOL(__skb_recv_datagram);
213 218
214struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 219struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
215 int noblock, int *err) 220 int noblock, int *err)
216{ 221{
217 int peeked; 222 int peeked, off = 0;
218 223
219 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 224 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
220 &peeked, err); 225 &peeked, &off, err);
221} 226}
222EXPORT_SYMBOL(skb_recv_datagram); 227EXPORT_SYMBOL(skb_recv_datagram);
223 228
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ca32f6b3105..cd0981977f5c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -73,7 +73,6 @@
73 */ 73 */
74 74
75#include <asm/uaccess.h> 75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h> 76#include <linux/bitops.h>
78#include <linux/capability.h> 77#include <linux/capability.h>
79#include <linux/cpu.h> 78#include <linux/cpu.h>
@@ -134,7 +133,7 @@
134#include <linux/inetdevice.h> 133#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h> 134#include <linux/cpu_rmap.h>
136#include <linux/net_tstamp.h> 135#include <linux/net_tstamp.h>
137#include <linux/jump_label.h> 136#include <linux/static_key.h>
138#include <net/flow_keys.h> 137#include <net/flow_keys.h>
139 138
140#include "net-sysfs.h" 139#include "net-sysfs.h"
@@ -209,7 +208,8 @@ static inline void dev_base_seq_inc(struct net *net)
209 208
210static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 209static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
211{ 210{
212 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 211 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
212
213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
214} 214}
215 215
@@ -300,10 +300,9 @@ static const unsigned short netdev_lock_type[] =
300 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 300 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
301 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 301 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
302 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 302 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
303 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 303 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
304 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 304 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
305 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, 305 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
306 ARPHRD_VOID, ARPHRD_NONE};
307 306
308static const char *const netdev_lock_name[] = 307static const char *const netdev_lock_name[] =
309 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 308 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -318,10 +317,9 @@ static const char *const netdev_lock_name[] =
318 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 317 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
319 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 318 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
320 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 319 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
321 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 320 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
322 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 321 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
323 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", 322 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
324 "_xmit_VOID", "_xmit_NONE"};
325 323
326static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 324static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
327static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 325static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -446,7 +444,7 @@ void __dev_remove_pack(struct packet_type *pt)
446 } 444 }
447 } 445 }
448 446
449 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 447 pr_warn("dev_remove_pack: %p not found\n", pt);
450out: 448out:
451 spin_unlock(&ptype_lock); 449 spin_unlock(&ptype_lock);
452} 450}
@@ -848,21 +846,21 @@ EXPORT_SYMBOL(dev_get_by_flags_rcu);
848 * to allow sysfs to work. We also disallow any kind of 846 * to allow sysfs to work. We also disallow any kind of
849 * whitespace. 847 * whitespace.
850 */ 848 */
851int dev_valid_name(const char *name) 849bool dev_valid_name(const char *name)
852{ 850{
853 if (*name == '\0') 851 if (*name == '\0')
854 return 0; 852 return false;
855 if (strlen(name) >= IFNAMSIZ) 853 if (strlen(name) >= IFNAMSIZ)
856 return 0; 854 return false;
857 if (!strcmp(name, ".") || !strcmp(name, "..")) 855 if (!strcmp(name, ".") || !strcmp(name, ".."))
858 return 0; 856 return false;
859 857
860 while (*name) { 858 while (*name) {
861 if (*name == '/' || isspace(*name)) 859 if (*name == '/' || isspace(*name))
862 return 0; 860 return false;
863 name++; 861 name++;
864 } 862 }
865 return 1; 863 return true;
866} 864}
867EXPORT_SYMBOL(dev_valid_name); 865EXPORT_SYMBOL(dev_valid_name);
868 866
@@ -1039,8 +1037,7 @@ rollback:
1039 memcpy(dev->name, oldname, IFNAMSIZ); 1037 memcpy(dev->name, oldname, IFNAMSIZ);
1040 goto rollback; 1038 goto rollback;
1041 } else { 1039 } else {
1042 printk(KERN_ERR 1040 pr_err("%s: name change rollback failed: %d\n",
1043 "%s: name change rollback failed: %d.\n",
1044 dev->name, ret); 1041 dev->name, ret);
1045 } 1042 }
1046 } 1043 }
@@ -1139,9 +1136,8 @@ void dev_load(struct net *net, const char *name)
1139 no_module = request_module("netdev-%s", name); 1136 no_module = request_module("netdev-%s", name);
1140 if (no_module && capable(CAP_SYS_MODULE)) { 1137 if (no_module && capable(CAP_SYS_MODULE)) {
1141 if (!request_module("%s", name)) 1138 if (!request_module("%s", name))
1142 pr_err("Loading kernel module for a network device " 1139 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1143"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " 1140 name);
1144"instead\n", name);
1145 } 1141 }
1146} 1142}
1147EXPORT_SYMBOL(dev_load); 1143EXPORT_SYMBOL(dev_load);
@@ -1412,14 +1408,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
1412 * register_netdevice_notifier(). The notifier is unlinked into the 1408 * register_netdevice_notifier(). The notifier is unlinked into the
1413 * kernel structures and may then be reused. A negative errno code 1409 * kernel structures and may then be reused. A negative errno code
1414 * is returned on a failure. 1410 * is returned on a failure.
1411 *
1412 * After unregistering unregister and down device events are synthesized
1413 * for all devices on the device list to the removed notifier to remove
1414 * the need for special case cleanup code.
1415 */ 1415 */
1416 1416
1417int unregister_netdevice_notifier(struct notifier_block *nb) 1417int unregister_netdevice_notifier(struct notifier_block *nb)
1418{ 1418{
1419 struct net_device *dev;
1420 struct net *net;
1419 int err; 1421 int err;
1420 1422
1421 rtnl_lock(); 1423 rtnl_lock();
1422 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1424 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1425 if (err)
1426 goto unlock;
1427
1428 for_each_net(net) {
1429 for_each_netdev(net, dev) {
1430 if (dev->flags & IFF_UP) {
1431 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1432 nb->notifier_call(nb, NETDEV_DOWN, dev);
1433 }
1434 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1435 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1436 }
1437 }
1438unlock:
1423 rtnl_unlock(); 1439 rtnl_unlock();
1424 return err; 1440 return err;
1425} 1441}
@@ -1441,11 +1457,11 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1441} 1457}
1442EXPORT_SYMBOL(call_netdevice_notifiers); 1458EXPORT_SYMBOL(call_netdevice_notifiers);
1443 1459
1444static struct jump_label_key netstamp_needed __read_mostly; 1460static struct static_key netstamp_needed __read_mostly;
1445#ifdef HAVE_JUMP_LABEL 1461#ifdef HAVE_JUMP_LABEL
1446/* We are not allowed to call jump_label_dec() from irq context 1462/* We are not allowed to call static_key_slow_dec() from irq context
1447 * If net_disable_timestamp() is called from irq context, defer the 1463 * If net_disable_timestamp() is called from irq context, defer the
1448 * jump_label_dec() calls. 1464 * static_key_slow_dec() calls.
1449 */ 1465 */
1450static atomic_t netstamp_needed_deferred; 1466static atomic_t netstamp_needed_deferred;
1451#endif 1467#endif
@@ -1457,12 +1473,12 @@ void net_enable_timestamp(void)
1457 1473
1458 if (deferred) { 1474 if (deferred) {
1459 while (--deferred) 1475 while (--deferred)
1460 jump_label_dec(&netstamp_needed); 1476 static_key_slow_dec(&netstamp_needed);
1461 return; 1477 return;
1462 } 1478 }
1463#endif 1479#endif
1464 WARN_ON(in_interrupt()); 1480 WARN_ON(in_interrupt());
1465 jump_label_inc(&netstamp_needed); 1481 static_key_slow_inc(&netstamp_needed);
1466} 1482}
1467EXPORT_SYMBOL(net_enable_timestamp); 1483EXPORT_SYMBOL(net_enable_timestamp);
1468 1484
@@ -1474,19 +1490,19 @@ void net_disable_timestamp(void)
1474 return; 1490 return;
1475 } 1491 }
1476#endif 1492#endif
1477 jump_label_dec(&netstamp_needed); 1493 static_key_slow_dec(&netstamp_needed);
1478} 1494}
1479EXPORT_SYMBOL(net_disable_timestamp); 1495EXPORT_SYMBOL(net_disable_timestamp);
1480 1496
1481static inline void net_timestamp_set(struct sk_buff *skb) 1497static inline void net_timestamp_set(struct sk_buff *skb)
1482{ 1498{
1483 skb->tstamp.tv64 = 0; 1499 skb->tstamp.tv64 = 0;
1484 if (static_branch(&netstamp_needed)) 1500 if (static_key_false(&netstamp_needed))
1485 __net_timestamp(skb); 1501 __net_timestamp(skb);
1486} 1502}
1487 1503
1488#define net_timestamp_check(COND, SKB) \ 1504#define net_timestamp_check(COND, SKB) \
1489 if (static_branch(&netstamp_needed)) { \ 1505 if (static_key_false(&netstamp_needed)) { \
1490 if ((COND) && !(SKB)->tstamp.tv64) \ 1506 if ((COND) && !(SKB)->tstamp.tv64) \
1491 __net_timestamp(SKB); \ 1507 __net_timestamp(SKB); \
1492 } \ 1508 } \
@@ -1599,10 +1615,15 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1599 kfree_skb(skb); 1615 kfree_skb(skb);
1600 return NET_RX_DROP; 1616 return NET_RX_DROP;
1601 } 1617 }
1602 skb_set_dev(skb, dev); 1618 skb->skb_iif = 0;
1619 skb->dev = dev;
1620 skb_dst_drop(skb);
1603 skb->tstamp.tv64 = 0; 1621 skb->tstamp.tv64 = 0;
1604 skb->pkt_type = PACKET_HOST; 1622 skb->pkt_type = PACKET_HOST;
1605 skb->protocol = eth_type_trans(skb, dev); 1623 skb->protocol = eth_type_trans(skb, dev);
1624 skb->mark = 0;
1625 secpath_reset(skb);
1626 nf_reset(skb);
1606 return netif_rx(skb); 1627 return netif_rx(skb);
1607} 1628}
1608EXPORT_SYMBOL_GPL(dev_forward_skb); 1629EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1654,11 +1675,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1654 1675
1655 if (skb_network_header(skb2) < skb2->data || 1676 if (skb_network_header(skb2) < skb2->data ||
1656 skb2->network_header > skb2->tail) { 1677 skb2->network_header > skb2->tail) {
1657 if (net_ratelimit()) 1678 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1658 printk(KERN_CRIT "protocol %04x is " 1679 ntohs(skb2->protocol),
1659 "buggy, dev %s\n", 1680 dev->name);
1660 ntohs(skb2->protocol),
1661 dev->name);
1662 skb_reset_network_header(skb2); 1681 skb_reset_network_header(skb2);
1663 } 1682 }
1664 1683
@@ -1691,9 +1710,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1691 1710
1692 /* If TC0 is invalidated disable TC mapping */ 1711 /* If TC0 is invalidated disable TC mapping */
1693 if (tc->offset + tc->count > txq) { 1712 if (tc->offset + tc->count > txq) {
1694 pr_warning("Number of in use tx queues changed " 1713 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1695 "invalidating tc mappings. Priority "
1696 "traffic classification disabled!\n");
1697 dev->num_tc = 0; 1714 dev->num_tc = 0;
1698 return; 1715 return;
1699 } 1716 }
@@ -1704,11 +1721,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1704 1721
1705 tc = &dev->tc_to_txq[q]; 1722 tc = &dev->tc_to_txq[q];
1706 if (tc->offset + tc->count > txq) { 1723 if (tc->offset + tc->count > txq) {
1707 pr_warning("Number of in use tx queues " 1724 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1708 "changed. Priority %i to tc " 1725 i, q);
1709 "mapping %i is no longer valid "
1710 "setting map to 0\n",
1711 i, q);
1712 netdev_set_prio_tc_map(dev, i, 0); 1726 netdev_set_prio_tc_map(dev, i, 0);
1713 } 1727 }
1714 } 1728 }
@@ -1857,36 +1871,6 @@ void netif_device_attach(struct net_device *dev)
1857} 1871}
1858EXPORT_SYMBOL(netif_device_attach); 1872EXPORT_SYMBOL(netif_device_attach);
1859 1873
1860/**
1861 * skb_dev_set -- assign a new device to a buffer
1862 * @skb: buffer for the new device
1863 * @dev: network device
1864 *
1865 * If an skb is owned by a device already, we have to reset
1866 * all data private to the namespace a device belongs to
1867 * before assigning it a new device.
1868 */
1869#ifdef CONFIG_NET_NS
1870void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1871{
1872 skb_dst_drop(skb);
1873 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1874 secpath_reset(skb);
1875 nf_reset(skb);
1876 skb_init_secmark(skb);
1877 skb->mark = 0;
1878 skb->priority = 0;
1879 skb->nf_trace = 0;
1880 skb->ipvs_property = 0;
1881#ifdef CONFIG_NET_SCHED
1882 skb->tc_index = 0;
1883#endif
1884 }
1885 skb->dev = dev;
1886}
1887EXPORT_SYMBOL(skb_set_dev);
1888#endif /* CONFIG_NET_NS */
1889
1890static void skb_warn_bad_offload(const struct sk_buff *skb) 1874static void skb_warn_bad_offload(const struct sk_buff *skb)
1891{ 1875{
1892 static const netdev_features_t null_features = 0; 1876 static const netdev_features_t null_features = 0;
@@ -2014,8 +1998,7 @@ EXPORT_SYMBOL(skb_gso_segment);
2014void netdev_rx_csum_fault(struct net_device *dev) 1998void netdev_rx_csum_fault(struct net_device *dev)
2015{ 1999{
2016 if (net_ratelimit()) { 2000 if (net_ratelimit()) {
2017 printk(KERN_ERR "%s: hw csum failure.\n", 2001 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2018 dev ? dev->name : "<unknown>");
2019 dump_stack(); 2002 dump_stack();
2020 } 2003 }
2021} 2004}
@@ -2331,11 +2314,9 @@ EXPORT_SYMBOL(__skb_tx_hash);
2331static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 2314static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2332{ 2315{
2333 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2316 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2334 if (net_ratelimit()) { 2317 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2335 pr_warning("%s selects TX queue %d, but " 2318 dev->name, queue_index,
2336 "real number of TX queues is %d\n", 2319 dev->real_num_tx_queues);
2337 dev->name, queue_index, dev->real_num_tx_queues);
2338 }
2339 return 0; 2320 return 0;
2340 } 2321 }
2341 return queue_index; 2322 return queue_index;
@@ -2577,17 +2558,15 @@ int dev_queue_xmit(struct sk_buff *skb)
2577 } 2558 }
2578 } 2559 }
2579 HARD_TX_UNLOCK(dev, txq); 2560 HARD_TX_UNLOCK(dev, txq);
2580 if (net_ratelimit()) 2561 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2581 printk(KERN_CRIT "Virtual device %s asks to " 2562 dev->name);
2582 "queue packet!\n", dev->name);
2583 } else { 2563 } else {
2584 /* Recursion is detected! It is possible, 2564 /* Recursion is detected! It is possible,
2585 * unfortunately 2565 * unfortunately
2586 */ 2566 */
2587recursion_alert: 2567recursion_alert:
2588 if (net_ratelimit()) 2568 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2589 printk(KERN_CRIT "Dead loop on virtual device " 2569 dev->name);
2590 "%s, fix it urgently!\n", dev->name);
2591 } 2570 }
2592 } 2571 }
2593 2572
@@ -2660,7 +2639,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
2660struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2639struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2661EXPORT_SYMBOL(rps_sock_flow_table); 2640EXPORT_SYMBOL(rps_sock_flow_table);
2662 2641
2663struct jump_label_key rps_needed __read_mostly; 2642struct static_key rps_needed __read_mostly;
2664 2643
2665static struct rps_dev_flow * 2644static struct rps_dev_flow *
2666set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2645set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
@@ -2945,7 +2924,7 @@ int netif_rx(struct sk_buff *skb)
2945 2924
2946 trace_netif_rx(skb); 2925 trace_netif_rx(skb);
2947#ifdef CONFIG_RPS 2926#ifdef CONFIG_RPS
2948 if (static_branch(&rps_needed)) { 2927 if (static_key_false(&rps_needed)) {
2949 struct rps_dev_flow voidflow, *rflow = &voidflow; 2928 struct rps_dev_flow voidflow, *rflow = &voidflow;
2950 int cpu; 2929 int cpu;
2951 2930
@@ -3068,9 +3047,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3068 struct Qdisc *q; 3047 struct Qdisc *q;
3069 3048
3070 if (unlikely(MAX_RED_LOOP < ttl++)) { 3049 if (unlikely(MAX_RED_LOOP < ttl++)) {
3071 if (net_ratelimit()) 3050 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3072 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", 3051 skb->skb_iif, dev->ifindex);
3073 skb->skb_iif, dev->ifindex);
3074 return TC_ACT_SHOT; 3052 return TC_ACT_SHOT;
3075 } 3053 }
3076 3054
@@ -3309,7 +3287,7 @@ int netif_receive_skb(struct sk_buff *skb)
3309 return NET_RX_SUCCESS; 3287 return NET_RX_SUCCESS;
3310 3288
3311#ifdef CONFIG_RPS 3289#ifdef CONFIG_RPS
3312 if (static_branch(&rps_needed)) { 3290 if (static_key_false(&rps_needed)) {
3313 struct rps_dev_flow voidflow, *rflow = &voidflow; 3291 struct rps_dev_flow voidflow, *rflow = &voidflow;
3314 int cpu, ret; 3292 int cpu, ret;
3315 3293
@@ -3530,10 +3508,16 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3530 break; 3508 break;
3531 3509
3532 case GRO_DROP: 3510 case GRO_DROP:
3533 case GRO_MERGED_FREE:
3534 kfree_skb(skb); 3511 kfree_skb(skb);
3535 break; 3512 break;
3536 3513
3514 case GRO_MERGED_FREE:
3515 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3516 kmem_cache_free(skbuff_head_cache, skb);
3517 else
3518 __kfree_skb(skb);
3519 break;
3520
3537 case GRO_HELD: 3521 case GRO_HELD:
3538 case GRO_MERGED: 3522 case GRO_MERGED:
3539 break; 3523 break;
@@ -3569,7 +3553,8 @@ EXPORT_SYMBOL(napi_gro_receive);
3569static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 3553static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3570{ 3554{
3571 __skb_pull(skb, skb_headlen(skb)); 3555 __skb_pull(skb, skb_headlen(skb));
3572 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); 3556 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3557 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3573 skb->vlan_tci = 0; 3558 skb->vlan_tci = 0;
3574 skb->dev = napi->dev; 3559 skb->dev = napi->dev;
3575 skb->skb_iif = 0; 3560 skb->skb_iif = 0;
@@ -3617,7 +3602,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3617} 3602}
3618EXPORT_SYMBOL(napi_frags_finish); 3603EXPORT_SYMBOL(napi_frags_finish);
3619 3604
3620struct sk_buff *napi_frags_skb(struct napi_struct *napi) 3605static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3621{ 3606{
3622 struct sk_buff *skb = napi->skb; 3607 struct sk_buff *skb = napi->skb;
3623 struct ethhdr *eth; 3608 struct ethhdr *eth;
@@ -3652,7 +3637,6 @@ struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3652out: 3637out:
3653 return skb; 3638 return skb;
3654} 3639}
3655EXPORT_SYMBOL(napi_frags_skb);
3656 3640
3657gro_result_t napi_gro_frags(struct napi_struct *napi) 3641gro_result_t napi_gro_frags(struct napi_struct *napi)
3658{ 3642{
@@ -4036,54 +4020,41 @@ static int dev_ifconf(struct net *net, char __user *arg)
4036 4020
4037#ifdef CONFIG_PROC_FS 4021#ifdef CONFIG_PROC_FS
4038 4022
4039#define BUCKET_SPACE (32 - NETDEV_HASHBITS) 4023#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4040
4041struct dev_iter_state {
4042 struct seq_net_private p;
4043 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4044};
4045 4024
4046#define get_bucket(x) ((x) >> BUCKET_SPACE) 4025#define get_bucket(x) ((x) >> BUCKET_SPACE)
4047#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) 4026#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4048#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 4027#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4049 4028
4050static inline struct net_device *dev_from_same_bucket(struct seq_file *seq) 4029static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4051{ 4030{
4052 struct dev_iter_state *state = seq->private;
4053 struct net *net = seq_file_net(seq); 4031 struct net *net = seq_file_net(seq);
4054 struct net_device *dev; 4032 struct net_device *dev;
4055 struct hlist_node *p; 4033 struct hlist_node *p;
4056 struct hlist_head *h; 4034 struct hlist_head *h;
4057 unsigned int count, bucket, offset; 4035 unsigned int count = 0, offset = get_offset(*pos);
4058 4036
4059 bucket = get_bucket(state->pos); 4037 h = &net->dev_name_head[get_bucket(*pos)];
4060 offset = get_offset(state->pos);
4061 h = &net->dev_name_head[bucket];
4062 count = 0;
4063 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4038 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4064 if (count++ == offset) { 4039 if (++count == offset)
4065 state->pos = set_bucket_offset(bucket, count);
4066 return dev; 4040 return dev;
4067 }
4068 } 4041 }
4069 4042
4070 return NULL; 4043 return NULL;
4071} 4044}
4072 4045
4073static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) 4046static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4074{ 4047{
4075 struct dev_iter_state *state = seq->private;
4076 struct net_device *dev; 4048 struct net_device *dev;
4077 unsigned int bucket; 4049 unsigned int bucket;
4078 4050
4079 bucket = get_bucket(state->pos);
4080 do { 4051 do {
4081 dev = dev_from_same_bucket(seq); 4052 dev = dev_from_same_bucket(seq, pos);
4082 if (dev) 4053 if (dev)
4083 return dev; 4054 return dev;
4084 4055
4085 bucket++; 4056 bucket = get_bucket(*pos) + 1;
4086 state->pos = set_bucket_offset(bucket, 0); 4057 *pos = set_bucket_offset(bucket, 1);
4087 } while (bucket < NETDEV_HASHENTRIES); 4058 } while (bucket < NETDEV_HASHENTRIES);
4088 4059
4089 return NULL; 4060 return NULL;
@@ -4096,33 +4067,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4096void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4067void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4097 __acquires(RCU) 4068 __acquires(RCU)
4098{ 4069{
4099 struct dev_iter_state *state = seq->private;
4100
4101 rcu_read_lock(); 4070 rcu_read_lock();
4102 if (!*pos) 4071 if (!*pos)
4103 return SEQ_START_TOKEN; 4072 return SEQ_START_TOKEN;
4104 4073
4105 /* check for end of the hash */ 4074 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4106 if (state->pos == 0 && *pos > 1)
4107 return NULL; 4075 return NULL;
4108 4076
4109 return dev_from_new_bucket(seq); 4077 return dev_from_bucket(seq, pos);
4110} 4078}
4111 4079
4112void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4080void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4113{ 4081{
4114 struct net_device *dev;
4115
4116 ++*pos; 4082 ++*pos;
4117 4083 return dev_from_bucket(seq, pos);
4118 if (v == SEQ_START_TOKEN)
4119 return dev_from_new_bucket(seq);
4120
4121 dev = dev_from_same_bucket(seq);
4122 if (dev)
4123 return dev;
4124
4125 return dev_from_new_bucket(seq);
4126} 4084}
4127 4085
4128void dev_seq_stop(struct seq_file *seq, void *v) 4086void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4221,13 +4179,7 @@ static const struct seq_operations dev_seq_ops = {
4221static int dev_seq_open(struct inode *inode, struct file *file) 4179static int dev_seq_open(struct inode *inode, struct file *file)
4222{ 4180{
4223 return seq_open_net(inode, file, &dev_seq_ops, 4181 return seq_open_net(inode, file, &dev_seq_ops,
4224 sizeof(struct dev_iter_state)); 4182 sizeof(struct seq_net_private));
4225}
4226
4227int dev_seq_open_ops(struct inode *inode, struct file *file,
4228 const struct seq_operations *ops)
4229{
4230 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
4231} 4183}
4232 4184
4233static const struct file_operations dev_seq_fops = { 4185static const struct file_operations dev_seq_fops = {
@@ -4497,16 +4449,15 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
4497 dev->flags &= ~IFF_PROMISC; 4449 dev->flags &= ~IFF_PROMISC;
4498 else { 4450 else {
4499 dev->promiscuity -= inc; 4451 dev->promiscuity -= inc;
4500 printk(KERN_WARNING "%s: promiscuity touches roof, " 4452 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4501 "set promiscuity failed, promiscuity feature " 4453 dev->name);
4502 "of device might be broken.\n", dev->name);
4503 return -EOVERFLOW; 4454 return -EOVERFLOW;
4504 } 4455 }
4505 } 4456 }
4506 if (dev->flags != old_flags) { 4457 if (dev->flags != old_flags) {
4507 printk(KERN_INFO "device %s %s promiscuous mode\n", 4458 pr_info("device %s %s promiscuous mode\n",
4508 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 4459 dev->name,
4509 "left"); 4460 dev->flags & IFF_PROMISC ? "entered" : "left");
4510 if (audit_enabled) { 4461 if (audit_enabled) {
4511 current_uid_gid(&uid, &gid); 4462 current_uid_gid(&uid, &gid);
4512 audit_log(current->audit_context, GFP_ATOMIC, 4463 audit_log(current->audit_context, GFP_ATOMIC,
@@ -4579,9 +4530,8 @@ int dev_set_allmulti(struct net_device *dev, int inc)
4579 dev->flags &= ~IFF_ALLMULTI; 4530 dev->flags &= ~IFF_ALLMULTI;
4580 else { 4531 else {
4581 dev->allmulti -= inc; 4532 dev->allmulti -= inc;
4582 printk(KERN_WARNING "%s: allmulti touches roof, " 4533 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4583 "set allmulti failed, allmulti feature of " 4534 dev->name);
4584 "device might be broken.\n", dev->name);
4585 return -EOVERFLOW; 4535 return -EOVERFLOW;
4586 } 4536 }
4587 } 4537 }
@@ -4640,9 +4590,9 @@ void dev_set_rx_mode(struct net_device *dev)
4640 * 4590 *
4641 * Get the combination of flag bits exported through APIs to userspace. 4591 * Get the combination of flag bits exported through APIs to userspace.
4642 */ 4592 */
4643unsigned dev_get_flags(const struct net_device *dev) 4593unsigned int dev_get_flags(const struct net_device *dev)
4644{ 4594{
4645 unsigned flags; 4595 unsigned int flags;
4646 4596
4647 flags = (dev->flags & ~(IFF_PROMISC | 4597 flags = (dev->flags & ~(IFF_PROMISC |
4648 IFF_ALLMULTI | 4598 IFF_ALLMULTI |
@@ -5238,8 +5188,8 @@ static void rollback_registered_many(struct list_head *head)
5238 * devices and proceed with the remaining. 5188 * devices and proceed with the remaining.
5239 */ 5189 */
5240 if (dev->reg_state == NETREG_UNINITIALIZED) { 5190 if (dev->reg_state == NETREG_UNINITIALIZED) {
5241 pr_debug("unregister_netdevice: device %s/%p never " 5191 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5242 "was registered\n", dev->name, dev); 5192 dev->name, dev);
5243 5193
5244 WARN_ON(1); 5194 WARN_ON(1);
5245 list_del(&dev->unreg_list); 5195 list_del(&dev->unreg_list);
@@ -5471,7 +5421,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5471 5421
5472 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5422 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5473 if (!rx) { 5423 if (!rx) {
5474 pr_err("netdev: Unable to allocate %u rx queues.\n", count); 5424 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5475 return -ENOMEM; 5425 return -ENOMEM;
5476 } 5426 }
5477 dev->_rx = rx; 5427 dev->_rx = rx;
@@ -5505,8 +5455,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5505 5455
5506 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5456 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5507 if (!tx) { 5457 if (!tx) {
5508 pr_err("netdev: Unable to allocate %u tx queues.\n", 5458 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5509 count);
5510 return -ENOMEM; 5459 return -ENOMEM;
5511 } 5460 }
5512 dev->_tx = tx; 5461 dev->_tx = tx;
@@ -5765,10 +5714,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5765 refcnt = netdev_refcnt_read(dev); 5714 refcnt = netdev_refcnt_read(dev);
5766 5715
5767 if (time_after(jiffies, warning_time + 10 * HZ)) { 5716 if (time_after(jiffies, warning_time + 10 * HZ)) {
5768 printk(KERN_EMERG "unregister_netdevice: " 5717 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5769 "waiting for %s to become free. Usage " 5718 dev->name, refcnt);
5770 "count = %d\n",
5771 dev->name, refcnt);
5772 warning_time = jiffies; 5719 warning_time = jiffies;
5773 } 5720 }
5774 } 5721 }
@@ -5819,7 +5766,7 @@ void netdev_run_todo(void)
5819 list_del(&dev->todo_list); 5766 list_del(&dev->todo_list);
5820 5767
5821 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5768 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5822 printk(KERN_ERR "network todo '%s' but state %d\n", 5769 pr_err("network todo '%s' but state %d\n",
5823 dev->name, dev->reg_state); 5770 dev->name, dev->reg_state);
5824 dump_stack(); 5771 dump_stack();
5825 continue; 5772 continue;
@@ -5848,12 +5795,12 @@ void netdev_run_todo(void)
5848/* Convert net_device_stats to rtnl_link_stats64. They have the same 5795/* Convert net_device_stats to rtnl_link_stats64. They have the same
5849 * fields in the same order, with only the type differing. 5796 * fields in the same order, with only the type differing.
5850 */ 5797 */
5851static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5798void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5852 const struct net_device_stats *netdev_stats) 5799 const struct net_device_stats *netdev_stats)
5853{ 5800{
5854#if BITS_PER_LONG == 64 5801#if BITS_PER_LONG == 64
5855 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5802 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5856 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5803 memcpy(stats64, netdev_stats, sizeof(*stats64));
5857#else 5804#else
5858 size_t i, n = sizeof(*stats64) / sizeof(u64); 5805 size_t i, n = sizeof(*stats64) / sizeof(u64);
5859 const unsigned long *src = (const unsigned long *)netdev_stats; 5806 const unsigned long *src = (const unsigned long *)netdev_stats;
@@ -5865,6 +5812,7 @@ static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5865 dst[i] = src[i]; 5812 dst[i] = src[i];
5866#endif 5813#endif
5867} 5814}
5815EXPORT_SYMBOL(netdev_stats_to_stats64);
5868 5816
5869/** 5817/**
5870 * dev_get_stats - get network device statistics 5818 * dev_get_stats - get network device statistics
@@ -5935,15 +5883,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5935 BUG_ON(strlen(name) >= sizeof(dev->name)); 5883 BUG_ON(strlen(name) >= sizeof(dev->name));
5936 5884
5937 if (txqs < 1) { 5885 if (txqs < 1) {
5938 pr_err("alloc_netdev: Unable to allocate device " 5886 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5939 "with zero queues.\n");
5940 return NULL; 5887 return NULL;
5941 } 5888 }
5942 5889
5943#ifdef CONFIG_RPS 5890#ifdef CONFIG_RPS
5944 if (rxqs < 1) { 5891 if (rxqs < 1) {
5945 pr_err("alloc_netdev: Unable to allocate device " 5892 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5946 "with zero RX queues.\n");
5947 return NULL; 5893 return NULL;
5948 } 5894 }
5949#endif 5895#endif
@@ -5959,7 +5905,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5959 5905
5960 p = kzalloc(alloc_size, GFP_KERNEL); 5906 p = kzalloc(alloc_size, GFP_KERNEL);
5961 if (!p) { 5907 if (!p) {
5962 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); 5908 pr_err("alloc_netdev: Unable to allocate device\n");
5963 return NULL; 5909 return NULL;
5964 } 5910 }
5965 5911
@@ -6492,8 +6438,8 @@ static void __net_exit default_device_exit(struct net *net)
6492 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6438 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6493 err = dev_change_net_namespace(dev, &init_net, fb_name); 6439 err = dev_change_net_namespace(dev, &init_net, fb_name);
6494 if (err) { 6440 if (err) {
6495 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", 6441 pr_emerg("%s: failed to move %s to init_net: %d\n",
6496 __func__, dev->name, err); 6442 __func__, dev->name, err);
6497 BUG(); 6443 BUG();
6498 } 6444 }
6499 } 6445 }
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 29c07fef9228..c4cc2bc49f06 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -21,12 +21,35 @@
21 * General list handling functions 21 * General list handling functions
22 */ 22 */
23 23
24static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
25 unsigned char *addr, int addr_len,
26 unsigned char addr_type, bool global)
27{
28 struct netdev_hw_addr *ha;
29 int alloc_size;
30
31 alloc_size = sizeof(*ha);
32 if (alloc_size < L1_CACHE_BYTES)
33 alloc_size = L1_CACHE_BYTES;
34 ha = kmalloc(alloc_size, GFP_ATOMIC);
35 if (!ha)
36 return -ENOMEM;
37 memcpy(ha->addr, addr, addr_len);
38 ha->type = addr_type;
39 ha->refcount = 1;
40 ha->global_use = global;
41 ha->synced = false;
42 list_add_tail_rcu(&ha->list, &list->list);
43 list->count++;
44
45 return 0;
46}
47
24static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, 48static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
25 unsigned char *addr, int addr_len, 49 unsigned char *addr, int addr_len,
26 unsigned char addr_type, bool global) 50 unsigned char addr_type, bool global)
27{ 51{
28 struct netdev_hw_addr *ha; 52 struct netdev_hw_addr *ha;
29 int alloc_size;
30 53
31 if (addr_len > MAX_ADDR_LEN) 54 if (addr_len > MAX_ADDR_LEN)
32 return -EINVAL; 55 return -EINVAL;
@@ -46,21 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
46 } 69 }
47 } 70 }
48 71
49 72 return __hw_addr_create_ex(list, addr, addr_len, addr_type, global);
50 alloc_size = sizeof(*ha);
51 if (alloc_size < L1_CACHE_BYTES)
52 alloc_size = L1_CACHE_BYTES;
53 ha = kmalloc(alloc_size, GFP_ATOMIC);
54 if (!ha)
55 return -ENOMEM;
56 memcpy(ha->addr, addr, addr_len);
57 ha->type = addr_type;
58 ha->refcount = 1;
59 ha->global_use = global;
60 ha->synced = false;
61 list_add_tail_rcu(&ha->list, &list->list);
62 list->count++;
63 return 0;
64} 73}
65 74
66static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, 75static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
@@ -377,6 +386,34 @@ EXPORT_SYMBOL(dev_addr_del_multiple);
377 */ 386 */
378 387
379/** 388/**
389 * dev_uc_add_excl - Add a global secondary unicast address
390 * @dev: device
391 * @addr: address to add
392 */
393int dev_uc_add_excl(struct net_device *dev, unsigned char *addr)
394{
395 struct netdev_hw_addr *ha;
396 int err;
397
398 netif_addr_lock_bh(dev);
399 list_for_each_entry(ha, &dev->uc.list, list) {
400 if (!memcmp(ha->addr, addr, dev->addr_len) &&
401 ha->type == NETDEV_HW_ADDR_T_UNICAST) {
402 err = -EEXIST;
403 goto out;
404 }
405 }
406 err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
407 NETDEV_HW_ADDR_T_UNICAST, true);
408 if (!err)
409 __dev_set_rx_mode(dev);
410out:
411 netif_addr_unlock_bh(dev);
412 return err;
413}
414EXPORT_SYMBOL(dev_uc_add_excl);
415
416/**
380 * dev_uc_add - Add a secondary unicast address 417 * dev_uc_add - Add a secondary unicast address
381 * @dev: device 418 * @dev: device
382 * @addr: address to add 419 * @addr: address to add
@@ -501,6 +538,34 @@ EXPORT_SYMBOL(dev_uc_init);
501 * Multicast list handling functions 538 * Multicast list handling functions
502 */ 539 */
503 540
541/**
542 * dev_mc_add_excl - Add a global secondary multicast address
543 * @dev: device
544 * @addr: address to add
545 */
546int dev_mc_add_excl(struct net_device *dev, unsigned char *addr)
547{
548 struct netdev_hw_addr *ha;
549 int err;
550
551 netif_addr_lock_bh(dev);
552 list_for_each_entry(ha, &dev->mc.list, list) {
553 if (!memcmp(ha->addr, addr, dev->addr_len) &&
554 ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
555 err = -EEXIST;
556 goto out;
557 }
558 }
559 err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
560 NETDEV_HW_ADDR_T_MULTICAST, true);
561 if (!err)
562 __dev_set_rx_mode(dev);
563out:
564 netif_addr_unlock_bh(dev);
565 return err;
566}
567EXPORT_SYMBOL(dev_mc_add_excl);
568
504static int __dev_mc_add(struct net_device *dev, unsigned char *addr, 569static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
505 bool global) 570 bool global)
506{ 571{
@@ -696,7 +761,8 @@ static const struct seq_operations dev_mc_seq_ops = {
696 761
697static int dev_mc_seq_open(struct inode *inode, struct file *file) 762static int dev_mc_seq_open(struct inode *inode, struct file *file)
698{ 763{
699 return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); 764 return seq_open_net(inode, file, &dev_mc_seq_ops,
765 sizeof(struct seq_net_private));
700} 766}
701 767
702static const struct file_operations dev_mc_seq_fops = { 768static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38e060f..ea5fb9fcc3f5 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com> 4 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
7#include <linux/netdevice.h> 9#include <linux/netdevice.h>
8#include <linux/etherdevice.h> 10#include <linux/etherdevice.h>
9#include <linux/string.h> 11#include <linux/string.h>
@@ -22,6 +24,7 @@
22#include <linux/timer.h> 24#include <linux/timer.h>
23#include <linux/bitops.h> 25#include <linux/bitops.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/module.h>
25#include <net/genetlink.h> 28#include <net/genetlink.h>
26#include <net/netevent.h> 29#include <net/netevent.h>
27 30
@@ -42,13 +45,14 @@ static void send_dm_alert(struct work_struct *unused);
42 * netlink alerts 45 * netlink alerts
43 */ 46 */
44static int trace_state = TRACE_OFF; 47static int trace_state = TRACE_OFF;
45static DEFINE_SPINLOCK(trace_state_lock); 48static DEFINE_MUTEX(trace_state_mutex);
46 49
47struct per_cpu_dm_data { 50struct per_cpu_dm_data {
48 struct work_struct dm_alert_work; 51 struct work_struct dm_alert_work;
49 struct sk_buff *skb; 52 struct sk_buff __rcu *skb;
50 atomic_t dm_hit_count; 53 atomic_t dm_hit_count;
51 struct timer_list send_timer; 54 struct timer_list send_timer;
55 int cpu;
52}; 56};
53 57
54struct dm_hw_stat_delta { 58struct dm_hw_stat_delta {
@@ -79,29 +83,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
79 size_t al; 83 size_t al;
80 struct net_dm_alert_msg *msg; 84 struct net_dm_alert_msg *msg;
81 struct nlattr *nla; 85 struct nlattr *nla;
86 struct sk_buff *skb;
87 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
82 88
83 al = sizeof(struct net_dm_alert_msg); 89 al = sizeof(struct net_dm_alert_msg);
84 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 90 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
85 al += sizeof(struct nlattr); 91 al += sizeof(struct nlattr);
86 92
87 data->skb = genlmsg_new(al, GFP_KERNEL); 93 skb = genlmsg_new(al, GFP_KERNEL);
88 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 94
89 0, NET_DM_CMD_ALERT); 95 if (skb) {
90 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); 96 genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
91 msg = nla_data(nla); 97 0, NET_DM_CMD_ALERT);
92 memset(msg, 0, al); 98 nla = nla_reserve(skb, NLA_UNSPEC,
93 atomic_set(&data->dm_hit_count, dm_hit_limit); 99 sizeof(struct net_dm_alert_msg));
100 msg = nla_data(nla);
101 memset(msg, 0, al);
102 } else
103 schedule_work_on(data->cpu, &data->dm_alert_work);
104
105 /*
106 * Don't need to lock this, since we are guaranteed to only
107 * run this on a single cpu at a time.
108 * Note also that we only update data->skb if the old and new skb
109 * pointers don't match. This ensures that we don't continually call
110 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
111 */
112 if (skb != oskb) {
113 rcu_assign_pointer(data->skb, skb);
114
115 synchronize_rcu();
116
117 atomic_set(&data->dm_hit_count, dm_hit_limit);
118 }
119
94} 120}
95 121
96static void send_dm_alert(struct work_struct *unused) 122static void send_dm_alert(struct work_struct *unused)
97{ 123{
98 struct sk_buff *skb; 124 struct sk_buff *skb;
99 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 125 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
126
127 WARN_ON_ONCE(data->cpu != smp_processor_id());
100 128
101 /* 129 /*
102 * Grab the skb we're about to send 130 * Grab the skb we're about to send
103 */ 131 */
104 skb = data->skb; 132 skb = rcu_dereference_protected(data->skb, 1);
105 133
106 /* 134 /*
107 * Replace it with a new one 135 * Replace it with a new one
@@ -111,8 +139,10 @@ static void send_dm_alert(struct work_struct *unused)
111 /* 139 /*
112 * Ship it! 140 * Ship it!
113 */ 141 */
114 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 142 if (skb)
143 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
115 144
145 put_cpu_var(dm_cpu_data);
116} 146}
117 147
118/* 148/*
@@ -123,9 +153,11 @@ static void send_dm_alert(struct work_struct *unused)
123 */ 153 */
124static void sched_send_work(unsigned long unused) 154static void sched_send_work(unsigned long unused)
125{ 155{
126 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 156 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
157
158 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
127 159
128 schedule_work(&data->dm_alert_work); 160 put_cpu_var(dm_cpu_data);
129} 161}
130 162
131static void trace_drop_common(struct sk_buff *skb, void *location) 163static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,9 +166,16 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
134 struct nlmsghdr *nlh; 166 struct nlmsghdr *nlh;
135 struct nlattr *nla; 167 struct nlattr *nla;
136 int i; 168 int i;
137 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 169 struct sk_buff *dskb;
170 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
138 171
139 172
173 rcu_read_lock();
174 dskb = rcu_dereference(data->skb);
175
176 if (!dskb)
177 goto out;
178
140 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { 179 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
141 /* 180 /*
142 * we're already at zero, discard this hit 181 * we're already at zero, discard this hit
@@ -144,12 +183,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
144 goto out; 183 goto out;
145 } 184 }
146 185
147 nlh = (struct nlmsghdr *)data->skb->data; 186 nlh = (struct nlmsghdr *)dskb->data;
148 nla = genlmsg_data(nlmsg_data(nlh)); 187 nla = genlmsg_data(nlmsg_data(nlh));
149 msg = nla_data(nla); 188 msg = nla_data(nla);
150 for (i = 0; i < msg->entries; i++) { 189 for (i = 0; i < msg->entries; i++) {
151 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 190 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
152 msg->points[i].count++; 191 msg->points[i].count++;
192 atomic_inc(&data->dm_hit_count);
153 goto out; 193 goto out;
154 } 194 }
155 } 195 }
@@ -157,7 +197,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
157 /* 197 /*
158 * We need to create a new entry 198 * We need to create a new entry
159 */ 199 */
160 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 200 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
161 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 201 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
162 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 202 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
163 msg->points[msg->entries].count = 1; 203 msg->points[msg->entries].count = 1;
@@ -169,6 +209,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
169 } 209 }
170 210
171out: 211out:
212 rcu_read_unlock();
213 put_cpu_var(dm_cpu_data);
172 return; 214 return;
173} 215}
174 216
@@ -213,7 +255,7 @@ static int set_all_monitor_traces(int state)
213 struct dm_hw_stat_delta *new_stat = NULL; 255 struct dm_hw_stat_delta *new_stat = NULL;
214 struct dm_hw_stat_delta *temp; 256 struct dm_hw_stat_delta *temp;
215 257
216 spin_lock(&trace_state_lock); 258 mutex_lock(&trace_state_mutex);
217 259
218 if (state == trace_state) { 260 if (state == trace_state) {
219 rc = -EAGAIN; 261 rc = -EAGAIN;
@@ -222,9 +264,15 @@ static int set_all_monitor_traces(int state)
222 264
223 switch (state) { 265 switch (state) {
224 case TRACE_ON: 266 case TRACE_ON:
267 if (!try_module_get(THIS_MODULE)) {
268 rc = -ENODEV;
269 break;
270 }
271
225 rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); 272 rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
226 rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); 273 rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
227 break; 274 break;
275
228 case TRACE_OFF: 276 case TRACE_OFF:
229 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); 277 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
230 rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); 278 rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
@@ -240,6 +288,9 @@ static int set_all_monitor_traces(int state)
240 kfree_rcu(new_stat, rcu); 288 kfree_rcu(new_stat, rcu);
241 } 289 }
242 } 290 }
291
292 module_put(THIS_MODULE);
293
243 break; 294 break;
244 default: 295 default:
245 rc = 1; 296 rc = 1;
@@ -252,7 +303,7 @@ static int set_all_monitor_traces(int state)
252 rc = -EINPROGRESS; 303 rc = -EINPROGRESS;
253 304
254out_unlock: 305out_unlock:
255 spin_unlock(&trace_state_lock); 306 mutex_unlock(&trace_state_mutex);
256 307
257 return rc; 308 return rc;
258} 309}
@@ -295,12 +346,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
295 346
296 new_stat->dev = dev; 347 new_stat->dev = dev;
297 new_stat->last_rx = jiffies; 348 new_stat->last_rx = jiffies;
298 spin_lock(&trace_state_lock); 349 mutex_lock(&trace_state_mutex);
299 list_add_rcu(&new_stat->list, &hw_stats_list); 350 list_add_rcu(&new_stat->list, &hw_stats_list);
300 spin_unlock(&trace_state_lock); 351 mutex_unlock(&trace_state_mutex);
301 break; 352 break;
302 case NETDEV_UNREGISTER: 353 case NETDEV_UNREGISTER:
303 spin_lock(&trace_state_lock); 354 mutex_lock(&trace_state_mutex);
304 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { 355 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
305 if (new_stat->dev == dev) { 356 if (new_stat->dev == dev) {
306 new_stat->dev = NULL; 357 new_stat->dev = NULL;
@@ -311,7 +362,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
311 } 362 }
312 } 363 }
313 } 364 }
314 spin_unlock(&trace_state_lock); 365 mutex_unlock(&trace_state_mutex);
315 break; 366 break;
316 } 367 }
317out: 368out:
@@ -342,10 +393,10 @@ static int __init init_net_drop_monitor(void)
342 struct per_cpu_dm_data *data; 393 struct per_cpu_dm_data *data;
343 int cpu, rc; 394 int cpu, rc;
344 395
345 printk(KERN_INFO "Initializing network drop monitor service\n"); 396 pr_info("Initializing network drop monitor service\n");
346 397
347 if (sizeof(void *) > 8) { 398 if (sizeof(void *) > 8) {
348 printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n"); 399 pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
349 return -ENOSPC; 400 return -ENOSPC;
350 } 401 }
351 402
@@ -353,27 +404,29 @@ static int __init init_net_drop_monitor(void)
353 dropmon_ops, 404 dropmon_ops,
354 ARRAY_SIZE(dropmon_ops)); 405 ARRAY_SIZE(dropmon_ops));
355 if (rc) { 406 if (rc) {
356 printk(KERN_ERR "Could not create drop monitor netlink family\n"); 407 pr_err("Could not create drop monitor netlink family\n");
357 return rc; 408 return rc;
358 } 409 }
359 410
360 rc = register_netdevice_notifier(&dropmon_net_notifier); 411 rc = register_netdevice_notifier(&dropmon_net_notifier);
361 if (rc < 0) { 412 if (rc < 0) {
362 printk(KERN_CRIT "Failed to register netdevice notifier\n"); 413 pr_crit("Failed to register netdevice notifier\n");
363 goto out_unreg; 414 goto out_unreg;
364 } 415 }
365 416
366 rc = 0; 417 rc = 0;
367 418
368 for_each_present_cpu(cpu) { 419 for_each_possible_cpu(cpu) {
369 data = &per_cpu(dm_cpu_data, cpu); 420 data = &per_cpu(dm_cpu_data, cpu);
370 reset_per_cpu_data(data); 421 data->cpu = cpu;
371 INIT_WORK(&data->dm_alert_work, send_dm_alert); 422 INIT_WORK(&data->dm_alert_work, send_dm_alert);
372 init_timer(&data->send_timer); 423 init_timer(&data->send_timer);
373 data->send_timer.data = cpu; 424 data->send_timer.data = cpu;
374 data->send_timer.function = sched_send_work; 425 data->send_timer.function = sched_send_work;
426 reset_per_cpu_data(data);
375 } 427 }
376 428
429
377 goto out; 430 goto out;
378 431
379out_unreg: 432out_unreg:
@@ -382,4 +435,37 @@ out:
382 return rc; 435 return rc;
383} 436}
384 437
385late_initcall(init_net_drop_monitor); 438static void exit_net_drop_monitor(void)
439{
440 struct per_cpu_dm_data *data;
441 int cpu;
442
443 BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
444
445 /*
446 * Because of the module_get/put we do in the trace state change path
447 * we are guarnateed not to have any current users when we get here
448 * all we need to do is make sure that we don't have any running timers
449 * or pending schedule calls
450 */
451
452 for_each_possible_cpu(cpu) {
453 data = &per_cpu(dm_cpu_data, cpu);
454 del_timer_sync(&data->send_timer);
455 cancel_work_sync(&data->dm_alert_work);
456 /*
457 * At this point, we should have exclusive access
458 * to this struct and can free the skb inside it
459 */
460 kfree_skb(data->skb);
461 }
462
463 BUG_ON(genl_unregister_family(&net_drop_monitor_family));
464}
465
466module_init(init_net_drop_monitor);
467module_exit(exit_net_drop_monitor);
468
469MODULE_LICENSE("GPL v2");
470MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
471MODULE_ALIAS_GENL_FAMILY("NET_DM");
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3f79db1b612a..9c2afb480270 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,8 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/net_tstamp.h>
21#include <linux/phy.h>
20#include <linux/bitops.h> 22#include <linux/bitops.h>
21#include <linux/uaccess.h> 23#include <linux/uaccess.h>
22#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
@@ -36,6 +38,17 @@ u32 ethtool_op_get_link(struct net_device *dev)
36} 38}
37EXPORT_SYMBOL(ethtool_op_get_link); 39EXPORT_SYMBOL(ethtool_op_get_link);
38 40
41int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
42{
43 info->so_timestamping =
44 SOF_TIMESTAMPING_TX_SOFTWARE |
45 SOF_TIMESTAMPING_RX_SOFTWARE |
46 SOF_TIMESTAMPING_SOFTWARE;
47 info->phc_index = -1;
48 return 0;
49}
50EXPORT_SYMBOL(ethtool_op_get_ts_info);
51
39/* Handlers for each ethtool command */ 52/* Handlers for each ethtool command */
40 53
41#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32) 54#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
@@ -73,6 +86,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
73 [NETIF_F_RXCSUM_BIT] = "rx-checksum", 86 [NETIF_F_RXCSUM_BIT] = "rx-checksum",
74 [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", 87 [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy",
75 [NETIF_F_LOOPBACK_BIT] = "loopback", 88 [NETIF_F_LOOPBACK_BIT] = "loopback",
89 [NETIF_F_RXFCS_BIT] = "rx-fcs",
90 [NETIF_F_RXALL_BIT] = "rx-all",
76}; 91};
77 92
78static int ethtool_get_features(struct net_device *dev, void __user *useraddr) 93static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
@@ -736,18 +751,17 @@ static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
736 return 0; 751 return 0;
737} 752}
738 753
739static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) 754static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
755 int (*getter)(struct net_device *,
756 struct ethtool_eeprom *, u8 *),
757 u32 total_len)
740{ 758{
741 struct ethtool_eeprom eeprom; 759 struct ethtool_eeprom eeprom;
742 const struct ethtool_ops *ops = dev->ethtool_ops;
743 void __user *userbuf = useraddr + sizeof(eeprom); 760 void __user *userbuf = useraddr + sizeof(eeprom);
744 u32 bytes_remaining; 761 u32 bytes_remaining;
745 u8 *data; 762 u8 *data;
746 int ret = 0; 763 int ret = 0;
747 764
748 if (!ops->get_eeprom || !ops->get_eeprom_len)
749 return -EOPNOTSUPP;
750
751 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 765 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
752 return -EFAULT; 766 return -EFAULT;
753 767
@@ -756,7 +770,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
756 return -EINVAL; 770 return -EINVAL;
757 771
758 /* Check for exceeding total eeprom len */ 772 /* Check for exceeding total eeprom len */
759 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) 773 if (eeprom.offset + eeprom.len > total_len)
760 return -EINVAL; 774 return -EINVAL;
761 775
762 data = kmalloc(PAGE_SIZE, GFP_USER); 776 data = kmalloc(PAGE_SIZE, GFP_USER);
@@ -767,7 +781,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
767 while (bytes_remaining > 0) { 781 while (bytes_remaining > 0) {
768 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 782 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
769 783
770 ret = ops->get_eeprom(dev, &eeprom, data); 784 ret = getter(dev, &eeprom, data);
771 if (ret) 785 if (ret)
772 break; 786 break;
773 if (copy_to_user(userbuf, data, eeprom.len)) { 787 if (copy_to_user(userbuf, data, eeprom.len)) {
@@ -788,6 +802,17 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
788 return ret; 802 return ret;
789} 803}
790 804
805static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
806{
807 const struct ethtool_ops *ops = dev->ethtool_ops;
808
809 if (!ops->get_eeprom || !ops->get_eeprom_len)
810 return -EOPNOTSUPP;
811
812 return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
813 ops->get_eeprom_len(dev));
814}
815
791static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) 816static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
792{ 817{
793 struct ethtool_eeprom eeprom; 818 struct ethtool_eeprom eeprom;
@@ -1276,6 +1301,81 @@ out:
1276 return ret; 1301 return ret;
1277} 1302}
1278 1303
1304static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
1305{
1306 int err = 0;
1307 struct ethtool_ts_info info;
1308 const struct ethtool_ops *ops = dev->ethtool_ops;
1309 struct phy_device *phydev = dev->phydev;
1310
1311 memset(&info, 0, sizeof(info));
1312 info.cmd = ETHTOOL_GET_TS_INFO;
1313
1314 if (phydev && phydev->drv && phydev->drv->ts_info) {
1315
1316 err = phydev->drv->ts_info(phydev, &info);
1317
1318 } else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) {
1319
1320 err = ops->get_ts_info(dev, &info);
1321
1322 } else {
1323 info.so_timestamping =
1324 SOF_TIMESTAMPING_RX_SOFTWARE |
1325 SOF_TIMESTAMPING_SOFTWARE;
1326 info.phc_index = -1;
1327 }
1328
1329 if (err)
1330 return err;
1331
1332 if (copy_to_user(useraddr, &info, sizeof(info)))
1333 err = -EFAULT;
1334
1335 return err;
1336}
1337
1338static int ethtool_get_module_info(struct net_device *dev,
1339 void __user *useraddr)
1340{
1341 int ret;
1342 struct ethtool_modinfo modinfo;
1343 const struct ethtool_ops *ops = dev->ethtool_ops;
1344
1345 if (!ops->get_module_info)
1346 return -EOPNOTSUPP;
1347
1348 if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
1349 return -EFAULT;
1350
1351 ret = ops->get_module_info(dev, &modinfo);
1352 if (ret)
1353 return ret;
1354
1355 if (copy_to_user(useraddr, &modinfo, sizeof(modinfo)))
1356 return -EFAULT;
1357
1358 return 0;
1359}
1360
1361static int ethtool_get_module_eeprom(struct net_device *dev,
1362 void __user *useraddr)
1363{
1364 int ret;
1365 struct ethtool_modinfo modinfo;
1366 const struct ethtool_ops *ops = dev->ethtool_ops;
1367
1368 if (!ops->get_module_info || !ops->get_module_eeprom)
1369 return -EOPNOTSUPP;
1370
1371 ret = ops->get_module_info(dev, &modinfo);
1372 if (ret)
1373 return ret;
1374
1375 return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom,
1376 modinfo.eeprom_len);
1377}
1378
1279/* The main entry point in this file. Called from net/core/dev.c */ 1379/* The main entry point in this file. Called from net/core/dev.c */
1280 1380
1281int dev_ethtool(struct net *net, struct ifreq *ifr) 1381int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1293,11 +1393,13 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1293 return -EFAULT; 1393 return -EFAULT;
1294 1394
1295 if (!dev->ethtool_ops) { 1395 if (!dev->ethtool_ops) {
1296 /* ETHTOOL_GDRVINFO does not require any driver support. 1396 /* A few commands do not require any driver support,
1297 * It is also unprivileged and does not change anything, 1397 * are unprivileged, and do not change anything, so we
1298 * so we can take a shortcut to it. */ 1398 * can take a shortcut to them. */
1299 if (ethcmd == ETHTOOL_GDRVINFO) 1399 if (ethcmd == ETHTOOL_GDRVINFO)
1300 return ethtool_get_drvinfo(dev, useraddr); 1400 return ethtool_get_drvinfo(dev, useraddr);
1401 else if (ethcmd == ETHTOOL_GET_TS_INFO)
1402 return ethtool_get_ts_info(dev, useraddr);
1301 else 1403 else
1302 return -EOPNOTSUPP; 1404 return -EOPNOTSUPP;
1303 } 1405 }
@@ -1328,6 +1430,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1328 case ETHTOOL_GRXCLSRULE: 1430 case ETHTOOL_GRXCLSRULE:
1329 case ETHTOOL_GRXCLSRLALL: 1431 case ETHTOOL_GRXCLSRLALL:
1330 case ETHTOOL_GFEATURES: 1432 case ETHTOOL_GFEATURES:
1433 case ETHTOOL_GET_TS_INFO:
1331 break; 1434 break;
1332 default: 1435 default:
1333 if (!capable(CAP_NET_ADMIN)) 1436 if (!capable(CAP_NET_ADMIN))
@@ -1494,6 +1597,15 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1494 case ETHTOOL_GET_DUMP_DATA: 1597 case ETHTOOL_GET_DUMP_DATA:
1495 rc = ethtool_get_dump_data(dev, useraddr); 1598 rc = ethtool_get_dump_data(dev, useraddr);
1496 break; 1599 break;
1600 case ETHTOOL_GET_TS_INFO:
1601 rc = ethtool_get_ts_info(dev, useraddr);
1602 break;
1603 case ETHTOOL_GMODULEINFO:
1604 rc = ethtool_get_module_info(dev, useraddr);
1605 break;
1606 case ETHTOOL_GMODULEEEPROM:
1607 rc = ethtool_get_module_eeprom(dev, useraddr);
1608 break;
1497 default: 1609 default:
1498 rc = -EOPNOTSUPP; 1610 rc = -EOPNOTSUPP;
1499 } 1611 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index c02e63c908da..72cceb79d0d4 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -542,7 +542,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
542 frh = nlmsg_data(nlh); 542 frh = nlmsg_data(nlh);
543 frh->family = ops->family; 543 frh->family = ops->family;
544 frh->table = rule->table; 544 frh->table = rule->table;
545 NLA_PUT_U32(skb, FRA_TABLE, rule->table); 545 if (nla_put_u32(skb, FRA_TABLE, rule->table))
546 goto nla_put_failure;
546 frh->res1 = 0; 547 frh->res1 = 0;
547 frh->res2 = 0; 548 frh->res2 = 0;
548 frh->action = rule->action; 549 frh->action = rule->action;
@@ -553,31 +554,28 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
553 frh->flags |= FIB_RULE_UNRESOLVED; 554 frh->flags |= FIB_RULE_UNRESOLVED;
554 555
555 if (rule->iifname[0]) { 556 if (rule->iifname[0]) {
556 NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname); 557 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
557 558 goto nla_put_failure;
558 if (rule->iifindex == -1) 559 if (rule->iifindex == -1)
559 frh->flags |= FIB_RULE_IIF_DETACHED; 560 frh->flags |= FIB_RULE_IIF_DETACHED;
560 } 561 }
561 562
562 if (rule->oifname[0]) { 563 if (rule->oifname[0]) {
563 NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname); 564 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
564 565 goto nla_put_failure;
565 if (rule->oifindex == -1) 566 if (rule->oifindex == -1)
566 frh->flags |= FIB_RULE_OIF_DETACHED; 567 frh->flags |= FIB_RULE_OIF_DETACHED;
567 } 568 }
568 569
569 if (rule->pref) 570 if ((rule->pref &&
570 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); 571 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
571 572 (rule->mark &&
572 if (rule->mark) 573 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
573 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark); 574 ((rule->mark_mask || rule->mark) &&
574 575 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
575 if (rule->mark_mask || rule->mark) 576 (rule->target &&
576 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); 577 nla_put_u32(skb, FRA_GOTO, rule->target)))
577 578 goto nla_put_failure;
578 if (rule->target)
579 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
580
581 if (ops->fill(rule, skb, frh) < 0) 579 if (ops->fill(rule, skb, frh) < 0)
582 goto nla_put_failure; 580 goto nla_put_failure;
583 581
diff --git a/net/core/filter.c b/net/core/filter.c
index 5dea45279215..a3eddb515d1b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -33,15 +33,18 @@
33#include <net/sock.h> 33#include <net/sock.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <asm/system.h>
37#include <asm/uaccess.h> 36#include <asm/uaccess.h>
38#include <asm/unaligned.h> 37#include <asm/unaligned.h>
39#include <linux/filter.h> 38#include <linux/filter.h>
40#include <linux/reciprocal_div.h> 39#include <linux/reciprocal_div.h>
41#include <linux/ratelimit.h> 40#include <linux/ratelimit.h>
41#include <linux/seccomp.h>
42 42
43/* No hurry in this branch */ 43/* No hurry in this branch
44static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 44 *
45 * Exported for the bpf jit load helper.
46 */
47void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
45{ 48{
46 u8 *ptr = NULL; 49 u8 *ptr = NULL;
47 50
@@ -60,7 +63,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
60{ 63{
61 if (k >= 0) 64 if (k >= 0)
62 return skb_header_pointer(skb, k, size, buffer); 65 return skb_header_pointer(skb, k, size, buffer);
63 return __load_pointer(skb, k, size); 66 return bpf_internal_load_pointer_neg_helper(skb, k, size);
64} 67}
65 68
66/** 69/**
@@ -315,6 +318,9 @@ load_b:
315 case BPF_S_ANC_CPU: 318 case BPF_S_ANC_CPU:
316 A = raw_smp_processor_id(); 319 A = raw_smp_processor_id();
317 continue; 320 continue;
321 case BPF_S_ANC_ALU_XOR_X:
322 A ^= X;
323 continue;
318 case BPF_S_ANC_NLATTR: { 324 case BPF_S_ANC_NLATTR: {
319 struct nlattr *nla; 325 struct nlattr *nla;
320 326
@@ -350,6 +356,11 @@ load_b:
350 A = 0; 356 A = 0;
351 continue; 357 continue;
352 } 358 }
359#ifdef CONFIG_SECCOMP_FILTER
360 case BPF_S_ANC_SECCOMP_LD_W:
361 A = seccomp_bpf_load(fentry->k);
362 continue;
363#endif
353 default: 364 default:
354 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", 365 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
355 fentry->code, fentry->jt, 366 fentry->code, fentry->jt,
@@ -526,7 +537,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
526 * Compare this with conditional jumps below, 537 * Compare this with conditional jumps below,
527 * where offsets are limited. --ANK (981016) 538 * where offsets are limited. --ANK (981016)
528 */ 539 */
529 if (ftest->k >= (unsigned)(flen-pc-1)) 540 if (ftest->k >= (unsigned int)(flen-pc-1))
530 return -EINVAL; 541 return -EINVAL;
531 break; 542 break;
532 case BPF_S_JMP_JEQ_K: 543 case BPF_S_JMP_JEQ_K:
@@ -559,6 +570,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
559 ANCILLARY(HATYPE); 570 ANCILLARY(HATYPE);
560 ANCILLARY(RXHASH); 571 ANCILLARY(RXHASH);
561 ANCILLARY(CPU); 572 ANCILLARY(CPU);
573 ANCILLARY(ALU_XOR_X);
562 } 574 }
563 } 575 }
564 ftest->code = code; 576 ftest->code = code;
@@ -587,6 +599,67 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
587} 599}
588EXPORT_SYMBOL(sk_filter_release_rcu); 600EXPORT_SYMBOL(sk_filter_release_rcu);
589 601
602static int __sk_prepare_filter(struct sk_filter *fp)
603{
604 int err;
605
606 fp->bpf_func = sk_run_filter;
607
608 err = sk_chk_filter(fp->insns, fp->len);
609 if (err)
610 return err;
611
612 bpf_jit_compile(fp);
613 return 0;
614}
615
616/**
617 * sk_unattached_filter_create - create an unattached filter
618 * @fprog: the filter program
619 * @sk: the socket to use
620 *
621 * Create a filter independent ofr any socket. We first run some
622 * sanity checks on it to make sure it does not explode on us later.
623 * If an error occurs or there is insufficient memory for the filter
624 * a negative errno code is returned. On success the return is zero.
625 */
626int sk_unattached_filter_create(struct sk_filter **pfp,
627 struct sock_fprog *fprog)
628{
629 struct sk_filter *fp;
630 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
631 int err;
632
633 /* Make sure new filter is there and in the right amounts. */
634 if (fprog->filter == NULL)
635 return -EINVAL;
636
637 fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
638 if (!fp)
639 return -ENOMEM;
640 memcpy(fp->insns, fprog->filter, fsize);
641
642 atomic_set(&fp->refcnt, 1);
643 fp->len = fprog->len;
644
645 err = __sk_prepare_filter(fp);
646 if (err)
647 goto free_mem;
648
649 *pfp = fp;
650 return 0;
651free_mem:
652 kfree(fp);
653 return err;
654}
655EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
656
657void sk_unattached_filter_destroy(struct sk_filter *fp)
658{
659 sk_filter_release(fp);
660}
661EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
662
590/** 663/**
591 * sk_attach_filter - attach a socket filter 664 * sk_attach_filter - attach a socket filter
592 * @fprog: the filter program 665 * @fprog: the filter program
@@ -617,16 +690,13 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
617 690
618 atomic_set(&fp->refcnt, 1); 691 atomic_set(&fp->refcnt, 1);
619 fp->len = fprog->len; 692 fp->len = fprog->len;
620 fp->bpf_func = sk_run_filter;
621 693
622 err = sk_chk_filter(fp->insns, fp->len); 694 err = __sk_prepare_filter(fp);
623 if (err) { 695 if (err) {
624 sk_filter_uncharge(sk, fp); 696 sk_filter_uncharge(sk, fp);
625 return err; 697 return err;
626 } 698 }
627 699
628 bpf_jit_compile(fp);
629
630 old_fp = rcu_dereference_protected(sk->sk_filter, 700 old_fp = rcu_dereference_protected(sk->sk_filter,
631 sock_owned_by_user(sk)); 701 sock_owned_by_user(sk));
632 rcu_assign_pointer(sk->sk_filter, fp); 702 rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 43b03dd71e85..d9d198aa9fed 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -14,7 +14,6 @@
14 */ 14 */
15 15
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/system.h>
18#include <linux/bitops.h> 17#include <linux/bitops.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/types.h> 19#include <linux/types.h>
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 0452eb27a272..ddedf211e588 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -27,7 +27,8 @@
27static inline int 27static inline int
28gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) 28gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
29{ 29{
30 NLA_PUT(d->skb, type, size, buf); 30 if (nla_put(d->skb, type, size, buf))
31 goto nla_put_failure;
31 return 0; 32 return 0;
32 33
33nla_put_failure: 34nla_put_failure:
diff --git a/net/core/iovec.c b/net/core/iovec.c
index c40f27e7d208..7e7aeb01de45 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,7 +35,7 @@
35 * in any case. 35 * in any case.
36 */ 36 */
37 37
38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
39{ 39{
40 int size, ct, err; 40 int size, ct, err;
41 41
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
deleted file mode 100644
index 81e1ed7c8383..000000000000
--- a/net/core/kmap_skb.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#include <linux/highmem.h>
2
3static inline void *kmap_skb_frag(const skb_frag_t *frag)
4{
5#ifdef CONFIG_HIGHMEM
6 BUG_ON(in_irq());
7
8 local_bh_disable();
9#endif
10 return kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ);
11}
12
13static inline void kunmap_skb_frag(void *vaddr)
14{
15 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
16#ifdef CONFIG_HIGHMEM
17 local_bh_enable();
18#endif
19}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 2a83914b0277..eb09f8bbbf07 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -15,6 +15,8 @@
15 * Harald Welte Add neighbour cache statistics like rtstat 15 * Harald Welte Add neighbour cache statistics like rtstat
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/slab.h> 20#include <linux/slab.h>
19#include <linux/types.h> 21#include <linux/types.h>
20#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -712,14 +714,13 @@ void neigh_destroy(struct neighbour *neigh)
712 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); 714 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
713 715
714 if (!neigh->dead) { 716 if (!neigh->dead) {
715 printk(KERN_WARNING 717 pr_warn("Destroying alive neighbour %p\n", neigh);
716 "Destroying alive neighbour %p\n", neigh);
717 dump_stack(); 718 dump_stack();
718 return; 719 return;
719 } 720 }
720 721
721 if (neigh_del_timer(neigh)) 722 if (neigh_del_timer(neigh))
722 printk(KERN_WARNING "Impossible event.\n"); 723 pr_warn("Impossible event\n");
723 724
724 skb_queue_purge(&neigh->arp_queue); 725 skb_queue_purge(&neigh->arp_queue);
725 neigh->arp_queue_len_bytes = 0; 726 neigh->arp_queue_len_bytes = 0;
@@ -890,7 +891,7 @@ static void neigh_timer_handler(unsigned long arg)
890{ 891{
891 unsigned long now, next; 892 unsigned long now, next;
892 struct neighbour *neigh = (struct neighbour *)arg; 893 struct neighbour *neigh = (struct neighbour *)arg;
893 unsigned state; 894 unsigned int state;
894 int notify = 0; 895 int notify = 0;
895 896
896 write_lock(&neigh->lock); 897 write_lock(&neigh->lock);
@@ -1500,7 +1501,7 @@ static void neigh_parms_destroy(struct neigh_parms *parms)
1500 1501
1501static struct lock_class_key neigh_table_proxy_queue_class; 1502static struct lock_class_key neigh_table_proxy_queue_class;
1502 1503
1503void neigh_table_init_no_netlink(struct neigh_table *tbl) 1504static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1504{ 1505{
1505 unsigned long now = jiffies; 1506 unsigned long now = jiffies;
1506 unsigned long phsize; 1507 unsigned long phsize;
@@ -1538,7 +1539,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1538 tbl->last_flush = now; 1539 tbl->last_flush = now;
1539 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1540 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1540} 1541}
1541EXPORT_SYMBOL(neigh_table_init_no_netlink);
1542 1542
1543void neigh_table_init(struct neigh_table *tbl) 1543void neigh_table_init(struct neigh_table *tbl)
1544{ 1544{
@@ -1555,8 +1555,8 @@ void neigh_table_init(struct neigh_table *tbl)
1555 write_unlock(&neigh_tbl_lock); 1555 write_unlock(&neigh_tbl_lock);
1556 1556
1557 if (unlikely(tmp)) { 1557 if (unlikely(tmp)) {
1558 printk(KERN_ERR "NEIGH: Registering multiple tables for " 1558 pr_err("Registering multiple tables for family %d\n",
1559 "family %d\n", tbl->family); 1559 tbl->family);
1560 dump_stack(); 1560 dump_stack();
1561 } 1561 }
1562} 1562}
@@ -1572,7 +1572,7 @@ int neigh_table_clear(struct neigh_table *tbl)
1572 pneigh_queue_purge(&tbl->proxy_queue); 1572 pneigh_queue_purge(&tbl->proxy_queue);
1573 neigh_ifdown(tbl, NULL); 1573 neigh_ifdown(tbl, NULL);
1574 if (atomic_read(&tbl->entries)) 1574 if (atomic_read(&tbl->entries))
1575 printk(KERN_CRIT "neighbour leakage\n"); 1575 pr_crit("neighbour leakage\n");
1576 write_lock(&neigh_tbl_lock); 1576 write_lock(&neigh_tbl_lock);
1577 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) { 1577 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1578 if (*tp == tbl) { 1578 if (*tp == tbl) {
@@ -1768,29 +1768,29 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1768 if (nest == NULL) 1768 if (nest == NULL)
1769 return -ENOBUFS; 1769 return -ENOBUFS;
1770 1770
1771 if (parms->dev) 1771 if ((parms->dev &&
1772 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex); 1772 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1773 1773 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1774 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); 1774 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
1775 NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes); 1775 /* approximative value for deprecated QUEUE_LEN (in packets) */
1776 /* approximative value for deprecated QUEUE_LEN (in packets) */ 1776 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1777 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, 1777 DIV_ROUND_UP(parms->queue_len_bytes,
1778 DIV_ROUND_UP(parms->queue_len_bytes, 1778 SKB_TRUESIZE(ETH_FRAME_LEN))) ||
1779 SKB_TRUESIZE(ETH_FRAME_LEN))); 1779 nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
1780 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen); 1780 nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
1781 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes); 1781 nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
1782 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes); 1782 nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
1783 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes); 1783 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1784 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time); 1784 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1785 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME, 1785 parms->base_reachable_time) ||
1786 parms->base_reachable_time); 1786 nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
1787 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime); 1787 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1788 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time); 1788 parms->delay_probe_time) ||
1789 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time); 1789 nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
1790 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay); 1790 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
1791 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay); 1791 nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
1792 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime); 1792 nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
1793 1793 goto nla_put_failure;
1794 return nla_nest_end(skb, nest); 1794 return nla_nest_end(skb, nest);
1795 1795
1796nla_put_failure: 1796nla_put_failure:
@@ -1815,12 +1815,12 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1815 ndtmsg->ndtm_pad1 = 0; 1815 ndtmsg->ndtm_pad1 = 0;
1816 ndtmsg->ndtm_pad2 = 0; 1816 ndtmsg->ndtm_pad2 = 0;
1817 1817
1818 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id); 1818 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1819 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval); 1819 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1820 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1); 1820 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1821 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2); 1821 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1822 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3); 1822 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1823 1823 goto nla_put_failure;
1824 { 1824 {
1825 unsigned long now = jiffies; 1825 unsigned long now = jiffies;
1826 unsigned int flush_delta = now - tbl->last_flush; 1826 unsigned int flush_delta = now - tbl->last_flush;
@@ -1841,7 +1841,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1841 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); 1841 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1842 rcu_read_unlock_bh(); 1842 rcu_read_unlock_bh();
1843 1843
1844 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc); 1844 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1845 goto nla_put_failure;
1845 } 1846 }
1846 1847
1847 { 1848 {
@@ -1866,7 +1867,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1866 ndst.ndts_forced_gc_runs += st->forced_gc_runs; 1867 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1867 } 1868 }
1868 1869
1869 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst); 1870 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1871 goto nla_put_failure;
1870 } 1872 }
1871 1873
1872 BUG_ON(tbl->parms.dev); 1874 BUG_ON(tbl->parms.dev);
@@ -2137,7 +2139,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2137 ndm->ndm_type = neigh->type; 2139 ndm->ndm_type = neigh->type;
2138 ndm->ndm_ifindex = neigh->dev->ifindex; 2140 ndm->ndm_ifindex = neigh->dev->ifindex;
2139 2141
2140 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key); 2142 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2143 goto nla_put_failure;
2141 2144
2142 read_lock_bh(&neigh->lock); 2145 read_lock_bh(&neigh->lock);
2143 ndm->ndm_state = neigh->nud_state; 2146 ndm->ndm_state = neigh->nud_state;
@@ -2157,8 +2160,39 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2157 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; 2160 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2158 read_unlock_bh(&neigh->lock); 2161 read_unlock_bh(&neigh->lock);
2159 2162
2160 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes)); 2163 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2161 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); 2164 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2165 goto nla_put_failure;
2166
2167 return nlmsg_end(skb, nlh);
2168
2169nla_put_failure:
2170 nlmsg_cancel(skb, nlh);
2171 return -EMSGSIZE;
2172}
2173
2174static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2175 u32 pid, u32 seq, int type, unsigned int flags,
2176 struct neigh_table *tbl)
2177{
2178 struct nlmsghdr *nlh;
2179 struct ndmsg *ndm;
2180
2181 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2182 if (nlh == NULL)
2183 return -EMSGSIZE;
2184
2185 ndm = nlmsg_data(nlh);
2186 ndm->ndm_family = tbl->family;
2187 ndm->ndm_pad1 = 0;
2188 ndm->ndm_pad2 = 0;
2189 ndm->ndm_flags = pn->flags | NTF_PROXY;
2190 ndm->ndm_type = NDA_DST;
2191 ndm->ndm_ifindex = pn->dev->ifindex;
2192 ndm->ndm_state = NUD_NONE;
2193
2194 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2195 goto nla_put_failure;
2162 2196
2163 return nlmsg_end(skb, nlh); 2197 return nlmsg_end(skb, nlh);
2164 2198
@@ -2216,23 +2250,78 @@ out:
2216 return rc; 2250 return rc;
2217} 2251}
2218 2252
2253static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2254 struct netlink_callback *cb)
2255{
2256 struct pneigh_entry *n;
2257 struct net *net = sock_net(skb->sk);
2258 int rc, h, s_h = cb->args[3];
2259 int idx, s_idx = idx = cb->args[4];
2260
2261 read_lock_bh(&tbl->lock);
2262
2263 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
2264 if (h < s_h)
2265 continue;
2266 if (h > s_h)
2267 s_idx = 0;
2268 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2269 if (dev_net(n->dev) != net)
2270 continue;
2271 if (idx < s_idx)
2272 goto next;
2273 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2274 cb->nlh->nlmsg_seq,
2275 RTM_NEWNEIGH,
2276 NLM_F_MULTI, tbl) <= 0) {
2277 read_unlock_bh(&tbl->lock);
2278 rc = -1;
2279 goto out;
2280 }
2281 next:
2282 idx++;
2283 }
2284 }
2285
2286 read_unlock_bh(&tbl->lock);
2287 rc = skb->len;
2288out:
2289 cb->args[3] = h;
2290 cb->args[4] = idx;
2291 return rc;
2292
2293}
2294
2219static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2295static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2220{ 2296{
2221 struct neigh_table *tbl; 2297 struct neigh_table *tbl;
2222 int t, family, s_t; 2298 int t, family, s_t;
2299 int proxy = 0;
2300 int err = 0;
2223 2301
2224 read_lock(&neigh_tbl_lock); 2302 read_lock(&neigh_tbl_lock);
2225 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; 2303 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2304
2305 /* check for full ndmsg structure presence, family member is
2306 * the same for both structures
2307 */
2308 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2309 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2310 proxy = 1;
2311
2226 s_t = cb->args[0]; 2312 s_t = cb->args[0];
2227 2313
2228 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) { 2314 for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
2315 tbl = tbl->next, t++) {
2229 if (t < s_t || (family && tbl->family != family)) 2316 if (t < s_t || (family && tbl->family != family))
2230 continue; 2317 continue;
2231 if (t > s_t) 2318 if (t > s_t)
2232 memset(&cb->args[1], 0, sizeof(cb->args) - 2319 memset(&cb->args[1], 0, sizeof(cb->args) -
2233 sizeof(cb->args[0])); 2320 sizeof(cb->args[0]));
2234 if (neigh_dump_table(tbl, skb, cb) < 0) 2321 if (proxy)
2235 break; 2322 err = pneigh_dump_table(tbl, skb, cb);
2323 else
2324 err = neigh_dump_table(tbl, skb, cb);
2236 } 2325 }
2237 read_unlock(&neigh_tbl_lock); 2326 read_unlock(&neigh_tbl_lock);
2238 2327
@@ -2711,7 +2800,6 @@ enum {
2711static struct neigh_sysctl_table { 2800static struct neigh_sysctl_table {
2712 struct ctl_table_header *sysctl_header; 2801 struct ctl_table_header *sysctl_header;
2713 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; 2802 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2714 char *dev_name;
2715} neigh_sysctl_template __read_mostly = { 2803} neigh_sysctl_template __read_mostly = {
2716 .neigh_vars = { 2804 .neigh_vars = {
2717 [NEIGH_VAR_MCAST_PROBE] = { 2805 [NEIGH_VAR_MCAST_PROBE] = {
@@ -2837,19 +2925,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2837{ 2925{
2838 struct neigh_sysctl_table *t; 2926 struct neigh_sysctl_table *t;
2839 const char *dev_name_source = NULL; 2927 const char *dev_name_source = NULL;
2840 2928 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
2841#define NEIGH_CTL_PATH_ROOT 0
2842#define NEIGH_CTL_PATH_PROTO 1
2843#define NEIGH_CTL_PATH_NEIGH 2
2844#define NEIGH_CTL_PATH_DEV 3
2845
2846 struct ctl_path neigh_path[] = {
2847 { .procname = "net", },
2848 { .procname = "proto", },
2849 { .procname = "neigh", },
2850 { .procname = "default", },
2851 { },
2852 };
2853 2929
2854 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL); 2930 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2855 if (!t) 2931 if (!t)
@@ -2877,7 +2953,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2877 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 2953 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
2878 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 2954 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
2879 } else { 2955 } else {
2880 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname; 2956 dev_name_source = "default";
2881 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); 2957 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
2882 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; 2958 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
2883 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; 2959 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
@@ -2900,23 +2976,16 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2900 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; 2976 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2901 } 2977 }
2902 2978
2903 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL); 2979 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
2904 if (!t->dev_name) 2980 p_name, dev_name_source);
2905 goto free;
2906
2907 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2908 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2909
2910 t->sysctl_header = 2981 t->sysctl_header =
2911 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars); 2982 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
2912 if (!t->sysctl_header) 2983 if (!t->sysctl_header)
2913 goto free_procname; 2984 goto free;
2914 2985
2915 p->sysctl_table = t; 2986 p->sysctl_table = t;
2916 return 0; 2987 return 0;
2917 2988
2918free_procname:
2919 kfree(t->dev_name);
2920free: 2989free:
2921 kfree(t); 2990 kfree(t);
2922err: 2991err:
@@ -2929,8 +2998,7 @@ void neigh_sysctl_unregister(struct neigh_parms *p)
2929 if (p->sysctl_table) { 2998 if (p->sysctl_table) {
2930 struct neigh_sysctl_table *t = p->sysctl_table; 2999 struct neigh_sysctl_table *t = p->sysctl_table;
2931 p->sysctl_table = NULL; 3000 p->sysctl_table = NULL;
2932 unregister_sysctl_table(t->sysctl_header); 3001 unregister_net_sysctl_table(t->sysctl_header);
2933 kfree(t->dev_name);
2934 kfree(t); 3002 kfree(t);
2935 } 3003 }
2936} 3004}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a1727cda03d7..fdf9e61d0651 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -74,15 +74,14 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
74 int (*set)(struct net_device *, unsigned long)) 74 int (*set)(struct net_device *, unsigned long))
75{ 75{
76 struct net_device *net = to_net_dev(dev); 76 struct net_device *net = to_net_dev(dev);
77 char *endp;
78 unsigned long new; 77 unsigned long new;
79 int ret = -EINVAL; 78 int ret = -EINVAL;
80 79
81 if (!capable(CAP_NET_ADMIN)) 80 if (!capable(CAP_NET_ADMIN))
82 return -EPERM; 81 return -EPERM;
83 82
84 new = simple_strtoul(buf, &endp, 0); 83 ret = kstrtoul(buf, 0, &new);
85 if (endp == buf) 84 if (ret)
86 goto err; 85 goto err;
87 86
88 if (!rtnl_trylock()) 87 if (!rtnl_trylock())
@@ -232,7 +231,7 @@ NETDEVICE_SHOW(flags, fmt_hex);
232 231
233static int change_flags(struct net_device *net, unsigned long new_flags) 232static int change_flags(struct net_device *net, unsigned long new_flags)
234{ 233{
235 return dev_change_flags(net, (unsigned) new_flags); 234 return dev_change_flags(net, (unsigned int) new_flags);
236} 235}
237 236
238static ssize_t store_flags(struct device *dev, struct device_attribute *attr, 237static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
@@ -582,7 +581,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
582 return err; 581 return err;
583 } 582 }
584 583
585 map = kzalloc(max_t(unsigned, 584 map = kzalloc(max_t(unsigned int,
586 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), 585 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
587 GFP_KERNEL); 586 GFP_KERNEL);
588 if (!map) { 587 if (!map) {
@@ -608,10 +607,10 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
608 spin_unlock(&rps_map_lock); 607 spin_unlock(&rps_map_lock);
609 608
610 if (map) 609 if (map)
611 jump_label_inc(&rps_needed); 610 static_key_slow_inc(&rps_needed);
612 if (old_map) { 611 if (old_map) {
613 kfree_rcu(old_map, rcu); 612 kfree_rcu(old_map, rcu);
614 jump_label_dec(&rps_needed); 613 static_key_slow_dec(&rps_needed);
615 } 614 }
616 free_cpumask_var(mask); 615 free_cpumask_var(mask);
617 return len; 616 return len;
@@ -903,7 +902,7 @@ static ssize_t bql_set_hold_time(struct netdev_queue *queue,
903 const char *buf, size_t len) 902 const char *buf, size_t len)
904{ 903{
905 struct dql *dql = &queue->dql; 904 struct dql *dql = &queue->dql;
906 unsigned value; 905 unsigned int value;
907 int err; 906 int err;
908 907
909 err = kstrtouint(buf, 10, &value); 908 err = kstrtouint(buf, 10, &value);
@@ -1107,7 +1106,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
1107 return err; 1106 return err;
1108 } 1107 }
1109 1108
1110 new_dev_maps = kzalloc(max_t(unsigned, 1109 new_dev_maps = kzalloc(max_t(unsigned int,
1111 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL); 1110 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1112 if (!new_dev_maps) { 1111 if (!new_dev_maps) {
1113 free_cpumask_var(mask); 1112 free_cpumask_var(mask);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 0e950fda9a0a..dddbacb8f28c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/workqueue.h> 3#include <linux/workqueue.h>
2#include <linux/rtnetlink.h> 4#include <linux/rtnetlink.h>
3#include <linux/cache.h> 5#include <linux/cache.h>
@@ -83,21 +85,29 @@ assign:
83 85
84static int ops_init(const struct pernet_operations *ops, struct net *net) 86static int ops_init(const struct pernet_operations *ops, struct net *net)
85{ 87{
86 int err; 88 int err = -ENOMEM;
89 void *data = NULL;
90
87 if (ops->id && ops->size) { 91 if (ops->id && ops->size) {
88 void *data = kzalloc(ops->size, GFP_KERNEL); 92 data = kzalloc(ops->size, GFP_KERNEL);
89 if (!data) 93 if (!data)
90 return -ENOMEM; 94 goto out;
91 95
92 err = net_assign_generic(net, *ops->id, data); 96 err = net_assign_generic(net, *ops->id, data);
93 if (err) { 97 if (err)
94 kfree(data); 98 goto cleanup;
95 return err;
96 }
97 } 99 }
100 err = 0;
98 if (ops->init) 101 if (ops->init)
99 return ops->init(net); 102 err = ops->init(net);
100 return 0; 103 if (!err)
104 return 0;
105
106cleanup:
107 kfree(data);
108
109out:
110 return err;
101} 111}
102 112
103static void ops_free(const struct pernet_operations *ops, struct net *net) 113static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -204,8 +214,8 @@ static void net_free(struct net *net)
204{ 214{
205#ifdef NETNS_REFCNT_DEBUG 215#ifdef NETNS_REFCNT_DEBUG
206 if (unlikely(atomic_read(&net->use_count) != 0)) { 216 if (unlikely(atomic_read(&net->use_count) != 0)) {
207 printk(KERN_EMERG "network namespace not free! Usage: %d\n", 217 pr_emerg("network namespace not free! Usage: %d\n",
208 atomic_read(&net->use_count)); 218 atomic_read(&net->use_count));
209 return; 219 return;
210 } 220 }
211#endif 221#endif
@@ -448,12 +458,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
448static int __register_pernet_operations(struct list_head *list, 458static int __register_pernet_operations(struct list_head *list,
449 struct pernet_operations *ops) 459 struct pernet_operations *ops)
450{ 460{
451 int err = 0; 461 return ops_init(ops, &init_net);
452 err = ops_init(ops, &init_net);
453 if (err)
454 ops_free(ops, &init_net);
455 return err;
456
457} 462}
458 463
459static void __unregister_pernet_operations(struct pernet_operations *ops) 464static void __unregister_pernet_operations(struct pernet_operations *ops)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index ddefc513b44a..3d84fb9d8873 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -9,6 +9,8 @@
9 * Copyright (C) 2002 Red Hat, Inc. 9 * Copyright (C) 2002 Red Hat, Inc.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
13#include <linux/netdevice.h> 15#include <linux/netdevice.h>
14#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
@@ -45,9 +47,11 @@ static atomic_t trapped;
45#define NETPOLL_RX_ENABLED 1 47#define NETPOLL_RX_ENABLED 1
46#define NETPOLL_RX_DROP 2 48#define NETPOLL_RX_DROP 2
47 49
48#define MAX_SKB_SIZE \ 50#define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 51 (sizeof(struct ethhdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr)) 52 sizeof(struct iphdr) + \
53 sizeof(struct udphdr) + \
54 MAX_UDP_CHUNK)
51 55
52static void zap_completion_queue(void); 56static void zap_completion_queue(void);
53static void arp_reply(struct sk_buff *skb); 57static void arp_reply(struct sk_buff *skb);
@@ -55,6 +59,13 @@ static void arp_reply(struct sk_buff *skb);
55static unsigned int carrier_timeout = 4; 59static unsigned int carrier_timeout = 4;
56module_param(carrier_timeout, uint, 0644); 60module_param(carrier_timeout, uint, 0644);
57 61
62#define np_info(np, fmt, ...) \
63 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
64#define np_err(np, fmt, ...) \
65 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
66#define np_notice(np, fmt, ...) \
67 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
68
58static void queue_process(struct work_struct *work) 69static void queue_process(struct work_struct *work)
59{ 70{
60 struct netpoll_info *npinfo = 71 struct netpoll_info *npinfo =
@@ -627,18 +638,12 @@ out:
627 638
628void netpoll_print_options(struct netpoll *np) 639void netpoll_print_options(struct netpoll *np)
629{ 640{
630 printk(KERN_INFO "%s: local port %d\n", 641 np_info(np, "local port %d\n", np->local_port);
631 np->name, np->local_port); 642 np_info(np, "local IP %pI4\n", &np->local_ip);
632 printk(KERN_INFO "%s: local IP %pI4\n", 643 np_info(np, "interface '%s'\n", np->dev_name);
633 np->name, &np->local_ip); 644 np_info(np, "remote port %d\n", np->remote_port);
634 printk(KERN_INFO "%s: interface '%s'\n", 645 np_info(np, "remote IP %pI4\n", &np->remote_ip);
635 np->name, np->dev_name); 646 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
636 printk(KERN_INFO "%s: remote port %d\n",
637 np->name, np->remote_port);
638 printk(KERN_INFO "%s: remote IP %pI4\n",
639 np->name, &np->remote_ip);
640 printk(KERN_INFO "%s: remote ethernet address %pM\n",
641 np->name, np->remote_mac);
642} 647}
643EXPORT_SYMBOL(netpoll_print_options); 648EXPORT_SYMBOL(netpoll_print_options);
644 649
@@ -680,8 +685,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
680 goto parse_failed; 685 goto parse_failed;
681 *delim = 0; 686 *delim = 0;
682 if (*cur == ' ' || *cur == '\t') 687 if (*cur == ' ' || *cur == '\t')
683 printk(KERN_INFO "%s: warning: whitespace" 688 np_info(np, "warning: whitespace is not allowed\n");
684 "is not allowed\n", np->name);
685 np->remote_port = simple_strtol(cur, NULL, 10); 689 np->remote_port = simple_strtol(cur, NULL, 10);
686 cur = delim; 690 cur = delim;
687 } 691 }
@@ -705,8 +709,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
705 return 0; 709 return 0;
706 710
707 parse_failed: 711 parse_failed:
708 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", 712 np_info(np, "couldn't parse config at '%s'!\n", cur);
709 np->name, cur);
710 return -1; 713 return -1;
711} 714}
712EXPORT_SYMBOL(netpoll_parse_options); 715EXPORT_SYMBOL(netpoll_parse_options);
@@ -721,8 +724,8 @@ int __netpoll_setup(struct netpoll *np)
721 724
722 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 725 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
723 !ndev->netdev_ops->ndo_poll_controller) { 726 !ndev->netdev_ops->ndo_poll_controller) {
724 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 727 np_err(np, "%s doesn't support polling, aborting\n",
725 np->name, np->dev_name); 728 np->dev_name);
726 err = -ENOTSUPP; 729 err = -ENOTSUPP;
727 goto out; 730 goto out;
728 } 731 }
@@ -785,14 +788,12 @@ int netpoll_setup(struct netpoll *np)
785 if (np->dev_name) 788 if (np->dev_name)
786 ndev = dev_get_by_name(&init_net, np->dev_name); 789 ndev = dev_get_by_name(&init_net, np->dev_name);
787 if (!ndev) { 790 if (!ndev) {
788 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", 791 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
789 np->name, np->dev_name);
790 return -ENODEV; 792 return -ENODEV;
791 } 793 }
792 794
793 if (ndev->master) { 795 if (ndev->master) {
794 printk(KERN_ERR "%s: %s is a slave device, aborting.\n", 796 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
795 np->name, np->dev_name);
796 err = -EBUSY; 797 err = -EBUSY;
797 goto put; 798 goto put;
798 } 799 }
@@ -800,16 +801,14 @@ int netpoll_setup(struct netpoll *np)
800 if (!netif_running(ndev)) { 801 if (!netif_running(ndev)) {
801 unsigned long atmost, atleast; 802 unsigned long atmost, atleast;
802 803
803 printk(KERN_INFO "%s: device %s not up yet, forcing it\n", 804 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
804 np->name, np->dev_name);
805 805
806 rtnl_lock(); 806 rtnl_lock();
807 err = dev_open(ndev); 807 err = dev_open(ndev);
808 rtnl_unlock(); 808 rtnl_unlock();
809 809
810 if (err) { 810 if (err) {
811 printk(KERN_ERR "%s: failed to open %s\n", 811 np_err(np, "failed to open %s\n", ndev->name);
812 np->name, ndev->name);
813 goto put; 812 goto put;
814 } 813 }
815 814
@@ -817,9 +816,7 @@ int netpoll_setup(struct netpoll *np)
817 atmost = jiffies + carrier_timeout * HZ; 816 atmost = jiffies + carrier_timeout * HZ;
818 while (!netif_carrier_ok(ndev)) { 817 while (!netif_carrier_ok(ndev)) {
819 if (time_after(jiffies, atmost)) { 818 if (time_after(jiffies, atmost)) {
820 printk(KERN_NOTICE 819 np_notice(np, "timeout waiting for carrier\n");
821 "%s: timeout waiting for carrier\n",
822 np->name);
823 break; 820 break;
824 } 821 }
825 msleep(1); 822 msleep(1);
@@ -831,9 +828,7 @@ int netpoll_setup(struct netpoll *np)
831 */ 828 */
832 829
833 if (time_before(jiffies, atleast)) { 830 if (time_before(jiffies, atleast)) {
834 printk(KERN_NOTICE "%s: carrier detect appears" 831 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
835 " untrustworthy, waiting 4 seconds\n",
836 np->name);
837 msleep(4000); 832 msleep(4000);
838 } 833 }
839 } 834 }
@@ -844,15 +839,15 @@ int netpoll_setup(struct netpoll *np)
844 839
845 if (!in_dev || !in_dev->ifa_list) { 840 if (!in_dev || !in_dev->ifa_list) {
846 rcu_read_unlock(); 841 rcu_read_unlock();
847 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 842 np_err(np, "no IP address for %s, aborting\n",
848 np->name, np->dev_name); 843 np->dev_name);
849 err = -EDESTADDRREQ; 844 err = -EDESTADDRREQ;
850 goto put; 845 goto put;
851 } 846 }
852 847
853 np->local_ip = in_dev->ifa_list->ifa_local; 848 np->local_ip = in_dev->ifa_list->ifa_local;
854 rcu_read_unlock(); 849 rcu_read_unlock();
855 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); 850 np_info(np, "local IP %pI4\n", &np->local_ip);
856 } 851 }
857 852
858 np->dev = ndev; 853 np->dev = ndev;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 4dacc44637ef..5b8aa2fae48b 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -9,6 +9,8 @@
9 * Authors: Neil Horman <nhorman@tuxdriver.com> 9 * Authors: Neil Horman <nhorman@tuxdriver.com>
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/types.h> 16#include <linux/types.h>
@@ -23,22 +25,6 @@
23#include <net/sock.h> 25#include <net/sock.h>
24#include <net/netprio_cgroup.h> 26#include <net/netprio_cgroup.h>
25 27
26static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
27 struct cgroup *cgrp);
28static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
29static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
30
31struct cgroup_subsys net_prio_subsys = {
32 .name = "net_prio",
33 .create = cgrp_create,
34 .destroy = cgrp_destroy,
35 .populate = cgrp_populate,
36#ifdef CONFIG_NETPRIO_CGROUP
37 .subsys_id = net_prio_subsys_id,
38#endif
39 .module = THIS_MODULE
40};
41
42#define PRIOIDX_SZ 128 28#define PRIOIDX_SZ 128
43 29
44static unsigned long prioidx_map[PRIOIDX_SZ]; 30static unsigned long prioidx_map[PRIOIDX_SZ];
@@ -89,7 +75,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
89 old_priomap = rtnl_dereference(dev->priomap); 75 old_priomap = rtnl_dereference(dev->priomap);
90 76
91 if (!new_priomap) { 77 if (!new_priomap) {
92 printk(KERN_WARNING "Unable to alloc new priomap!\n"); 78 pr_warn("Unable to alloc new priomap!\n");
93 return; 79 return;
94 } 80 }
95 81
@@ -121,8 +107,7 @@ static void update_netdev_tables(void)
121 rtnl_unlock(); 107 rtnl_unlock();
122} 108}
123 109
124static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 110static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
125 struct cgroup *cgrp)
126{ 111{
127 struct cgroup_netprio_state *cs; 112 struct cgroup_netprio_state *cs;
128 int ret; 113 int ret;
@@ -138,7 +123,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
138 123
139 ret = get_prioidx(&cs->prioidx); 124 ret = get_prioidx(&cs->prioidx);
140 if (ret != 0) { 125 if (ret != 0) {
141 printk(KERN_WARNING "No space in priority index array\n"); 126 pr_warn("No space in priority index array\n");
142 kfree(cs); 127 kfree(cs);
143 return ERR_PTR(ret); 128 return ERR_PTR(ret);
144 } 129 }
@@ -146,7 +131,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
146 return &cs->css; 131 return &cs->css;
147} 132}
148 133
149static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) 134static void cgrp_destroy(struct cgroup *cgrp)
150{ 135{
151 struct cgroup_netprio_state *cs; 136 struct cgroup_netprio_state *cs;
152 struct net_device *dev; 137 struct net_device *dev;
@@ -259,12 +244,19 @@ static struct cftype ss_files[] = {
259 .read_map = read_priomap, 244 .read_map = read_priomap,
260 .write_string = write_priomap, 245 .write_string = write_priomap,
261 }, 246 },
247 { } /* terminate */
262}; 248};
263 249
264static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) 250struct cgroup_subsys net_prio_subsys = {
265{ 251 .name = "net_prio",
266 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 252 .create = cgrp_create,
267} 253 .destroy = cgrp_destroy,
254#ifdef CONFIG_NETPRIO_CGROUP
255 .subsys_id = net_prio_subsys_id,
256#endif
257 .base_cftypes = ss_files,
258 .module = THIS_MODULE
259};
268 260
269static int netprio_device_event(struct notifier_block *unused, 261static int netprio_device_event(struct notifier_block *unused,
270 unsigned long event, void *ptr) 262 unsigned long event, void *ptr)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d8ce93cd503..cce9e53528b1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -320,7 +320,7 @@ struct pktgen_dev {
320 (see RFC 3260, sec. 4) */ 320 (see RFC 3260, sec. 4) */
321 321
322 /* MPLS */ 322 /* MPLS */
323 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ 323 unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */
324 __be32 labels[MAX_MPLS_LABELS]; 324 __be32 labels[MAX_MPLS_LABELS];
325 325
326 /* VLAN/SVLAN (802.1Q/Q-in-Q) */ 326 /* VLAN/SVLAN (802.1Q/Q-in-Q) */
@@ -373,10 +373,10 @@ struct pktgen_dev {
373 */ 373 */
374 char odevname[32]; 374 char odevname[32];
375 struct flow_state *flows; 375 struct flow_state *flows;
376 unsigned cflows; /* Concurrent flows (config) */ 376 unsigned int cflows; /* Concurrent flows (config) */
377 unsigned lflow; /* Flow length (config) */ 377 unsigned int lflow; /* Flow length (config) */
378 unsigned nflows; /* accumulated flows (stats) */ 378 unsigned int nflows; /* accumulated flows (stats) */
379 unsigned curfl; /* current sequenced flow (state)*/ 379 unsigned int curfl; /* current sequenced flow (state)*/
380 380
381 u16 queue_map_min; 381 u16 queue_map_min;
382 u16 queue_map_max; 382 u16 queue_map_max;
@@ -592,7 +592,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
592 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 592 pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
593 593
594 if (pkt_dev->nr_labels) { 594 if (pkt_dev->nr_labels) {
595 unsigned i; 595 unsigned int i;
596 seq_printf(seq, " mpls: "); 596 seq_printf(seq, " mpls: ");
597 for (i = 0; i < pkt_dev->nr_labels; i++) 597 for (i = 0; i < pkt_dev->nr_labels; i++)
598 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 598 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
@@ -812,7 +812,7 @@ done_str:
812 812
813static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) 813static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
814{ 814{
815 unsigned n = 0; 815 unsigned int n = 0;
816 char c; 816 char c;
817 ssize_t i = 0; 817 ssize_t i = 0;
818 int len; 818 int len;
@@ -891,8 +891,8 @@ static ssize_t pktgen_if_write(struct file *file,
891 if (copy_from_user(tb, user_buffer, copy)) 891 if (copy_from_user(tb, user_buffer, copy))
892 return -EFAULT; 892 return -EFAULT;
893 tb[copy] = 0; 893 tb[copy] = 0;
894 printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, 894 pr_debug("%s,%lu buffer -:%s:-\n",
895 (unsigned long)count, tb); 895 name, (unsigned long)count, tb);
896 } 896 }
897 897
898 if (!strcmp(name, "min_pkt_size")) { 898 if (!strcmp(name, "min_pkt_size")) {
@@ -1261,8 +1261,7 @@ static ssize_t pktgen_if_write(struct file *file,
1261 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1261 pkt_dev->cur_daddr = pkt_dev->daddr_min;
1262 } 1262 }
1263 if (debug) 1263 if (debug)
1264 printk(KERN_DEBUG "pktgen: dst_min set to: %s\n", 1264 pr_debug("dst_min set to: %s\n", pkt_dev->dst_min);
1265 pkt_dev->dst_min);
1266 i += len; 1265 i += len;
1267 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); 1266 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
1268 return count; 1267 return count;
@@ -1284,8 +1283,7 @@ static ssize_t pktgen_if_write(struct file *file,
1284 pkt_dev->cur_daddr = pkt_dev->daddr_max; 1283 pkt_dev->cur_daddr = pkt_dev->daddr_max;
1285 } 1284 }
1286 if (debug) 1285 if (debug)
1287 printk(KERN_DEBUG "pktgen: dst_max set to: %s\n", 1286 pr_debug("dst_max set to: %s\n", pkt_dev->dst_max);
1288 pkt_dev->dst_max);
1289 i += len; 1287 i += len;
1290 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); 1288 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
1291 return count; 1289 return count;
@@ -1307,7 +1305,7 @@ static ssize_t pktgen_if_write(struct file *file,
1307 pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; 1305 pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
1308 1306
1309 if (debug) 1307 if (debug)
1310 printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf); 1308 pr_debug("dst6 set to: %s\n", buf);
1311 1309
1312 i += len; 1310 i += len;
1313 sprintf(pg_result, "OK: dst6=%s", buf); 1311 sprintf(pg_result, "OK: dst6=%s", buf);
@@ -1329,7 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file,
1329 1327
1330 pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; 1328 pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
1331 if (debug) 1329 if (debug)
1332 printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf); 1330 pr_debug("dst6_min set to: %s\n", buf);
1333 1331
1334 i += len; 1332 i += len;
1335 sprintf(pg_result, "OK: dst6_min=%s", buf); 1333 sprintf(pg_result, "OK: dst6_min=%s", buf);
@@ -1350,7 +1348,7 @@ static ssize_t pktgen_if_write(struct file *file,
1350 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); 1348 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
1351 1349
1352 if (debug) 1350 if (debug)
1353 printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf); 1351 pr_debug("dst6_max set to: %s\n", buf);
1354 1352
1355 i += len; 1353 i += len;
1356 sprintf(pg_result, "OK: dst6_max=%s", buf); 1354 sprintf(pg_result, "OK: dst6_max=%s", buf);
@@ -1373,7 +1371,7 @@ static ssize_t pktgen_if_write(struct file *file,
1373 pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; 1371 pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
1374 1372
1375 if (debug) 1373 if (debug)
1376 printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf); 1374 pr_debug("src6 set to: %s\n", buf);
1377 1375
1378 i += len; 1376 i += len;
1379 sprintf(pg_result, "OK: src6=%s", buf); 1377 sprintf(pg_result, "OK: src6=%s", buf);
@@ -1394,8 +1392,7 @@ static ssize_t pktgen_if_write(struct file *file,
1394 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1392 pkt_dev->cur_saddr = pkt_dev->saddr_min;
1395 } 1393 }
1396 if (debug) 1394 if (debug)
1397 printk(KERN_DEBUG "pktgen: src_min set to: %s\n", 1395 pr_debug("src_min set to: %s\n", pkt_dev->src_min);
1398 pkt_dev->src_min);
1399 i += len; 1396 i += len;
1400 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); 1397 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
1401 return count; 1398 return count;
@@ -1415,8 +1412,7 @@ static ssize_t pktgen_if_write(struct file *file,
1415 pkt_dev->cur_saddr = pkt_dev->saddr_max; 1412 pkt_dev->cur_saddr = pkt_dev->saddr_max;
1416 } 1413 }
1417 if (debug) 1414 if (debug)
1418 printk(KERN_DEBUG "pktgen: src_max set to: %s\n", 1415 pr_debug("src_max set to: %s\n", pkt_dev->src_max);
1419 pkt_dev->src_max);
1420 i += len; 1416 i += len;
1421 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); 1417 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
1422 return count; 1418 return count;
@@ -1510,7 +1506,7 @@ static ssize_t pktgen_if_write(struct file *file,
1510 } 1506 }
1511 1507
1512 if (!strcmp(name, "mpls")) { 1508 if (!strcmp(name, "mpls")) {
1513 unsigned n, cnt; 1509 unsigned int n, cnt;
1514 1510
1515 len = get_labels(&user_buffer[i], pkt_dev); 1511 len = get_labels(&user_buffer[i], pkt_dev);
1516 if (len < 0) 1512 if (len < 0)
@@ -1527,7 +1523,7 @@ static ssize_t pktgen_if_write(struct file *file,
1527 pkt_dev->svlan_id = 0xffff; 1523 pkt_dev->svlan_id = 0xffff;
1528 1524
1529 if (debug) 1525 if (debug)
1530 printk(KERN_DEBUG "pktgen: VLAN/SVLAN auto turned off\n"); 1526 pr_debug("VLAN/SVLAN auto turned off\n");
1531 } 1527 }
1532 return count; 1528 return count;
1533 } 1529 }
@@ -1542,10 +1538,10 @@ static ssize_t pktgen_if_write(struct file *file,
1542 pkt_dev->vlan_id = value; /* turn on VLAN */ 1538 pkt_dev->vlan_id = value; /* turn on VLAN */
1543 1539
1544 if (debug) 1540 if (debug)
1545 printk(KERN_DEBUG "pktgen: VLAN turned on\n"); 1541 pr_debug("VLAN turned on\n");
1546 1542
1547 if (debug && pkt_dev->nr_labels) 1543 if (debug && pkt_dev->nr_labels)
1548 printk(KERN_DEBUG "pktgen: MPLS auto turned off\n"); 1544 pr_debug("MPLS auto turned off\n");
1549 1545
1550 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1546 pkt_dev->nr_labels = 0; /* turn off MPLS */
1551 sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id); 1547 sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
@@ -1554,7 +1550,7 @@ static ssize_t pktgen_if_write(struct file *file,
1554 pkt_dev->svlan_id = 0xffff; 1550 pkt_dev->svlan_id = 0xffff;
1555 1551
1556 if (debug) 1552 if (debug)
1557 printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n"); 1553 pr_debug("VLAN/SVLAN turned off\n");
1558 } 1554 }
1559 return count; 1555 return count;
1560 } 1556 }
@@ -1599,10 +1595,10 @@ static ssize_t pktgen_if_write(struct file *file,
1599 pkt_dev->svlan_id = value; /* turn on SVLAN */ 1595 pkt_dev->svlan_id = value; /* turn on SVLAN */
1600 1596
1601 if (debug) 1597 if (debug)
1602 printk(KERN_DEBUG "pktgen: SVLAN turned on\n"); 1598 pr_debug("SVLAN turned on\n");
1603 1599
1604 if (debug && pkt_dev->nr_labels) 1600 if (debug && pkt_dev->nr_labels)
1605 printk(KERN_DEBUG "pktgen: MPLS auto turned off\n"); 1601 pr_debug("MPLS auto turned off\n");
1606 1602
1607 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1603 pkt_dev->nr_labels = 0; /* turn off MPLS */
1608 sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id); 1604 sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
@@ -1611,7 +1607,7 @@ static ssize_t pktgen_if_write(struct file *file,
1611 pkt_dev->svlan_id = 0xffff; 1607 pkt_dev->svlan_id = 0xffff;
1612 1608
1613 if (debug) 1609 if (debug)
1614 printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n"); 1610 pr_debug("VLAN/SVLAN turned off\n");
1615 } 1611 }
1616 return count; 1612 return count;
1617 } 1613 }
@@ -1779,8 +1775,7 @@ static ssize_t pktgen_thread_write(struct file *file,
1779 i += len; 1775 i += len;
1780 1776
1781 if (debug) 1777 if (debug)
1782 printk(KERN_DEBUG "pktgen: t=%s, count=%lu\n", 1778 pr_debug("t=%s, count=%lu\n", name, (unsigned long)count);
1783 name, (unsigned long)count);
1784 1779
1785 if (!t) { 1780 if (!t) {
1786 pr_err("ERROR: No thread\n"); 1781 pr_err("ERROR: No thread\n");
@@ -1931,7 +1926,7 @@ static int pktgen_device_event(struct notifier_block *unused,
1931{ 1926{
1932 struct net_device *dev = ptr; 1927 struct net_device *dev = ptr;
1933 1928
1934 if (!net_eq(dev_net(dev), &init_net)) 1929 if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
1935 return NOTIFY_DONE; 1930 return NOTIFY_DONE;
1936 1931
1937 /* It is OK that we do not hold the group lock right now, 1932 /* It is OK that we do not hold the group lock right now,
@@ -2324,7 +2319,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2324 } 2319 }
2325 2320
2326 if (pkt_dev->flags & F_MPLS_RND) { 2321 if (pkt_dev->flags & F_MPLS_RND) {
2327 unsigned i; 2322 unsigned int i;
2328 for (i = 0; i < pkt_dev->nr_labels; i++) 2323 for (i = 0; i < pkt_dev->nr_labels; i++)
2329 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 2324 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
2330 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 2325 pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
@@ -2550,7 +2545,7 @@ err:
2550 2545
2551static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2546static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2552{ 2547{
2553 unsigned i; 2548 unsigned int i;
2554 for (i = 0; i < pkt_dev->nr_labels; i++) 2549 for (i = 0; i < pkt_dev->nr_labels; i++)
2555 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2550 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
2556 2551
@@ -2934,8 +2929,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2934 2929
2935 if (datalen < sizeof(struct pktgen_hdr)) { 2930 if (datalen < sizeof(struct pktgen_hdr)) {
2936 datalen = sizeof(struct pktgen_hdr); 2931 datalen = sizeof(struct pktgen_hdr);
2937 if (net_ratelimit()) 2932 net_info_ratelimited("increased datalen to %d\n", datalen);
2938 pr_info("increased datalen to %d\n", datalen);
2939 } 2933 }
2940 2934
2941 udph->source = htons(pkt_dev->cur_udp_src); 2935 udph->source = htons(pkt_dev->cur_udp_src);
@@ -3365,8 +3359,8 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3365 pkt_dev->errors++; 3359 pkt_dev->errors++;
3366 break; 3360 break;
3367 default: /* Drivers are not supposed to return other values! */ 3361 default: /* Drivers are not supposed to return other values! */
3368 if (net_ratelimit()) 3362 net_info_ratelimited("%s xmit error: %d\n",
3369 pr_info("%s xmit error: %d\n", pkt_dev->odevname, ret); 3363 pkt_dev->odevname, ret);
3370 pkt_dev->errors++; 3364 pkt_dev->errors++;
3371 /* fallthru */ 3365 /* fallthru */
3372 case NETDEV_TX_LOCKED: 3366 case NETDEV_TX_LOCKED:
@@ -3755,12 +3749,18 @@ static void __exit pg_cleanup(void)
3755{ 3749{
3756 struct pktgen_thread *t; 3750 struct pktgen_thread *t;
3757 struct list_head *q, *n; 3751 struct list_head *q, *n;
3752 LIST_HEAD(list);
3758 3753
3759 /* Stop all interfaces & threads */ 3754 /* Stop all interfaces & threads */
3760 pktgen_exiting = true; 3755 pktgen_exiting = true;
3761 3756
3762 list_for_each_safe(q, n, &pktgen_threads) { 3757 mutex_lock(&pktgen_thread_lock);
3758 list_splice_init(&pktgen_threads, &list);
3759 mutex_unlock(&pktgen_thread_lock);
3760
3761 list_for_each_safe(q, n, &list) {
3763 t = list_entry(q, struct pktgen_thread, th_list); 3762 t = list_entry(q, struct pktgen_thread, th_list);
3763 list_del(&t->th_list);
3764 kthread_stop(t->tsk); 3764 kthread_stop(t->tsk);
3765 kfree(t); 3765 kfree(t);
3766 } 3766 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f965dce6f20f..21318d15bbc3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,10 +35,11 @@
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/if_addr.h> 37#include <linux/if_addr.h>
38#include <linux/if_bridge.h>
38#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/etherdevice.h>
39 41
40#include <asm/uaccess.h> 42#include <asm/uaccess.h>
41#include <asm/system.h>
42 43
43#include <linux/inet.h> 44#include <linux/inet.h>
44#include <linux/netdevice.h> 45#include <linux/netdevice.h>
@@ -553,7 +554,7 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
553} 554}
554EXPORT_SYMBOL(__rta_fill); 555EXPORT_SYMBOL(__rta_fill);
555 556
556int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) 557int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
557{ 558{
558 struct sock *rtnl = net->rtnl; 559 struct sock *rtnl = net->rtnl;
559 int err = 0; 560 int err = 0;
@@ -608,7 +609,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
608 for (i = 0; i < RTAX_MAX; i++) { 609 for (i = 0; i < RTAX_MAX; i++) {
609 if (metrics[i]) { 610 if (metrics[i]) {
610 valid++; 611 valid++;
611 NLA_PUT_U32(skb, i+1, metrics[i]); 612 if (nla_put_u32(skb, i+1, metrics[i]))
613 goto nla_put_failure;
612 } 614 }
613 } 615 }
614 616
@@ -783,6 +785,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
783 + nla_total_size(4) /* IFLA_MTU */ 785 + nla_total_size(4) /* IFLA_MTU */
784 + nla_total_size(4) /* IFLA_LINK */ 786 + nla_total_size(4) /* IFLA_LINK */
785 + nla_total_size(4) /* IFLA_MASTER */ 787 + nla_total_size(4) /* IFLA_MASTER */
788 + nla_total_size(4) /* IFLA_PROMISCUITY */
786 + nla_total_size(1) /* IFLA_OPERSTATE */ 789 + nla_total_size(1) /* IFLA_OPERSTATE */
787 + nla_total_size(1) /* IFLA_LINKMODE */ 790 + nla_total_size(1) /* IFLA_LINKMODE */
788 + nla_total_size(ext_filter_mask 791 + nla_total_size(ext_filter_mask
@@ -808,7 +811,8 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
808 vf_port = nla_nest_start(skb, IFLA_VF_PORT); 811 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
809 if (!vf_port) 812 if (!vf_port)
810 goto nla_put_failure; 813 goto nla_put_failure;
811 NLA_PUT_U32(skb, IFLA_PORT_VF, vf); 814 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
815 goto nla_put_failure;
812 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 816 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
813 if (err == -EMSGSIZE) 817 if (err == -EMSGSIZE)
814 goto nla_put_failure; 818 goto nla_put_failure;
@@ -892,25 +896,23 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
892 ifm->ifi_flags = dev_get_flags(dev); 896 ifm->ifi_flags = dev_get_flags(dev);
893 ifm->ifi_change = change; 897 ifm->ifi_change = change;
894 898
895 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 899 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
896 NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); 900 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
897 NLA_PUT_U8(skb, IFLA_OPERSTATE, 901 nla_put_u8(skb, IFLA_OPERSTATE,
898 netif_running(dev) ? dev->operstate : IF_OPER_DOWN); 902 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
899 NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); 903 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
900 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); 904 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
901 NLA_PUT_U32(skb, IFLA_GROUP, dev->group); 905 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
902 906 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
903 if (dev->ifindex != dev->iflink) 907 (dev->ifindex != dev->iflink &&
904 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); 908 nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
905 909 (dev->master &&
906 if (dev->master) 910 nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
907 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); 911 (dev->qdisc &&
908 912 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
909 if (dev->qdisc) 913 (dev->ifalias &&
910 NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id); 914 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
911 915 goto nla_put_failure;
912 if (dev->ifalias)
913 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
914 916
915 if (1) { 917 if (1) {
916 struct rtnl_link_ifmap map = { 918 struct rtnl_link_ifmap map = {
@@ -921,12 +923,14 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
921 .dma = dev->dma, 923 .dma = dev->dma,
922 .port = dev->if_port, 924 .port = dev->if_port,
923 }; 925 };
924 NLA_PUT(skb, IFLA_MAP, sizeof(map), &map); 926 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
927 goto nla_put_failure;
925 } 928 }
926 929
927 if (dev->addr_len) { 930 if (dev->addr_len) {
928 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); 931 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
929 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); 932 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
933 goto nla_put_failure;
930 } 934 }
931 935
932 attr = nla_reserve(skb, IFLA_STATS, 936 attr = nla_reserve(skb, IFLA_STATS,
@@ -943,8 +947,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
943 goto nla_put_failure; 947 goto nla_put_failure;
944 copy_rtnl_link_stats64(nla_data(attr), stats); 948 copy_rtnl_link_stats64(nla_data(attr), stats);
945 949
946 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) 950 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
947 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 951 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
952 goto nla_put_failure;
948 953
949 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent 954 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
950 && (ext_filter_mask & RTEXT_FILTER_VF)) { 955 && (ext_filter_mask & RTEXT_FILTER_VF)) {
@@ -987,12 +992,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
987 nla_nest_cancel(skb, vfinfo); 992 nla_nest_cancel(skb, vfinfo);
988 goto nla_put_failure; 993 goto nla_put_failure;
989 } 994 }
990 NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); 995 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
991 NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); 996 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
992 NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 997 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
993 &vf_tx_rate); 998 &vf_tx_rate) ||
994 NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 999 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
995 &vf_spoofchk); 1000 &vf_spoofchk))
1001 goto nla_put_failure;
996 nla_nest_end(skb, vf); 1002 nla_nest_end(skb, vf);
997 } 1003 }
998 nla_nest_end(skb, vfinfo); 1004 nla_nest_end(skb, vfinfo);
@@ -1114,6 +1120,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1114 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1120 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1115 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1121 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1116 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1122 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1123 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1117}; 1124};
1118EXPORT_SYMBOL(ifla_policy); 1125EXPORT_SYMBOL(ifla_policy);
1119 1126
@@ -1133,6 +1140,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1133 .len = sizeof(struct ifla_vf_vlan) }, 1140 .len = sizeof(struct ifla_vf_vlan) },
1134 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, 1141 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
1135 .len = sizeof(struct ifla_vf_tx_rate) }, 1142 .len = sizeof(struct ifla_vf_tx_rate) },
1143 [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
1144 .len = sizeof(struct ifla_vf_spoofchk) },
1136}; 1145};
1137 1146
1138static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1147static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -1515,11 +1524,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1515 err = 0; 1524 err = 0;
1516 1525
1517errout: 1526errout:
1518 if (err < 0 && modified && net_ratelimit()) 1527 if (err < 0 && modified)
1519 printk(KERN_WARNING "A link change request failed with " 1528 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
1520 "some changes committed already. Interface %s may " 1529 dev->name);
1521 "have been left with an inconsistent configuration, "
1522 "please check.\n", dev->name);
1523 1530
1524 if (send_addr_notify) 1531 if (send_addr_notify)
1525 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 1532 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
@@ -1633,14 +1640,14 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1633 int err; 1640 int err;
1634 struct net_device *dev; 1641 struct net_device *dev;
1635 unsigned int num_queues = 1; 1642 unsigned int num_queues = 1;
1636 unsigned int real_num_queues = 1;
1637 1643
1638 if (ops->get_tx_queues) { 1644 if (ops->get_tx_queues) {
1639 err = ops->get_tx_queues(src_net, tb, &num_queues, 1645 err = ops->get_tx_queues(src_net, tb);
1640 &real_num_queues); 1646 if (err < 0)
1641 if (err)
1642 goto err; 1647 goto err;
1648 num_queues = err;
1643 } 1649 }
1650
1644 err = -ENOMEM; 1651 err = -ENOMEM;
1645 dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); 1652 dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
1646 if (!dev) 1653 if (!dev)
@@ -1946,7 +1953,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1946 return skb->len; 1953 return skb->len;
1947} 1954}
1948 1955
1949void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) 1956void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
1950{ 1957{
1951 struct net *net = dev_net(dev); 1958 struct net *net = dev_net(dev);
1952 struct sk_buff *skb; 1959 struct sk_buff *skb;
@@ -1971,6 +1978,267 @@ errout:
1971 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 1978 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
1972} 1979}
1973 1980
1981static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
1982 struct net_device *dev,
1983 u8 *addr, u32 pid, u32 seq,
1984 int type, unsigned int flags)
1985{
1986 struct nlmsghdr *nlh;
1987 struct ndmsg *ndm;
1988
1989 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
1990 if (!nlh)
1991 return -EMSGSIZE;
1992
1993 ndm = nlmsg_data(nlh);
1994 ndm->ndm_family = AF_BRIDGE;
1995 ndm->ndm_pad1 = 0;
1996 ndm->ndm_pad2 = 0;
1997 ndm->ndm_flags = flags;
1998 ndm->ndm_type = 0;
1999 ndm->ndm_ifindex = dev->ifindex;
2000 ndm->ndm_state = NUD_PERMANENT;
2001
2002 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
2003 goto nla_put_failure;
2004
2005 return nlmsg_end(skb, nlh);
2006
2007nla_put_failure:
2008 nlmsg_cancel(skb, nlh);
2009 return -EMSGSIZE;
2010}
2011
2012static inline size_t rtnl_fdb_nlmsg_size(void)
2013{
2014 return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
2015}
2016
2017static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
2018{
2019 struct net *net = dev_net(dev);
2020 struct sk_buff *skb;
2021 int err = -ENOBUFS;
2022
2023 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
2024 if (!skb)
2025 goto errout;
2026
2027 err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
2028 if (err < 0) {
2029 kfree_skb(skb);
2030 goto errout;
2031 }
2032
2033 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2034 return;
2035errout:
2036 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2037}
2038
2039static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2040{
2041 struct net *net = sock_net(skb->sk);
2042 struct net_device *master = NULL;
2043 struct ndmsg *ndm;
2044 struct nlattr *tb[NDA_MAX+1];
2045 struct net_device *dev;
2046 u8 *addr;
2047 int err;
2048
2049 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
2050 if (err < 0)
2051 return err;
2052
2053 ndm = nlmsg_data(nlh);
2054 if (ndm->ndm_ifindex == 0) {
2055 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
2056 return -EINVAL;
2057 }
2058
2059 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2060 if (dev == NULL) {
2061 pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
2062 return -ENODEV;
2063 }
2064
2065 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
2066 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
2067 return -EINVAL;
2068 }
2069
2070 addr = nla_data(tb[NDA_LLADDR]);
2071 if (!is_valid_ether_addr(addr)) {
2072 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
2073 return -EINVAL;
2074 }
2075
2076 err = -EOPNOTSUPP;
2077
2078 /* Support fdb on master device the net/bridge default case */
2079 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2080 (dev->priv_flags & IFF_BRIDGE_PORT)) {
2081 master = dev->master;
2082 err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr,
2083 nlh->nlmsg_flags);
2084 if (err)
2085 goto out;
2086 else
2087 ndm->ndm_flags &= ~NTF_MASTER;
2088 }
2089
2090 /* Embedded bridge, macvlan, and any other device support */
2091 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
2092 err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr,
2093 nlh->nlmsg_flags);
2094
2095 if (!err) {
2096 rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
2097 ndm->ndm_flags &= ~NTF_SELF;
2098 }
2099 }
2100out:
2101 return err;
2102}
2103
2104static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2105{
2106 struct net *net = sock_net(skb->sk);
2107 struct ndmsg *ndm;
2108 struct nlattr *llattr;
2109 struct net_device *dev;
2110 int err = -EINVAL;
2111 __u8 *addr;
2112
2113 if (nlmsg_len(nlh) < sizeof(*ndm))
2114 return -EINVAL;
2115
2116 ndm = nlmsg_data(nlh);
2117 if (ndm->ndm_ifindex == 0) {
2118 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
2119 return -EINVAL;
2120 }
2121
2122 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2123 if (dev == NULL) {
2124 pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
2125 return -ENODEV;
2126 }
2127
2128 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
2129 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
2130 pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n");
2131 return -EINVAL;
2132 }
2133
2134 addr = nla_data(llattr);
2135 err = -EOPNOTSUPP;
2136
2137 /* Support fdb on master device the net/bridge default case */
2138 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2139 (dev->priv_flags & IFF_BRIDGE_PORT)) {
2140 struct net_device *master = dev->master;
2141
2142 if (master->netdev_ops->ndo_fdb_del)
2143 err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr);
2144
2145 if (err)
2146 goto out;
2147 else
2148 ndm->ndm_flags &= ~NTF_MASTER;
2149 }
2150
2151 /* Embedded bridge, macvlan, and any other device support */
2152 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
2153 err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr);
2154
2155 if (!err) {
2156 rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
2157 ndm->ndm_flags &= ~NTF_SELF;
2158 }
2159 }
2160out:
2161 return err;
2162}
2163
2164static int nlmsg_populate_fdb(struct sk_buff *skb,
2165 struct netlink_callback *cb,
2166 struct net_device *dev,
2167 int *idx,
2168 struct netdev_hw_addr_list *list)
2169{
2170 struct netdev_hw_addr *ha;
2171 int err;
2172 u32 pid, seq;
2173
2174 pid = NETLINK_CB(cb->skb).pid;
2175 seq = cb->nlh->nlmsg_seq;
2176
2177 list_for_each_entry(ha, &list->list, list) {
2178 if (*idx < cb->args[0])
2179 goto skip;
2180
2181 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
2182 pid, seq, 0, NTF_SELF);
2183 if (err < 0)
2184 return err;
2185skip:
2186 *idx += 1;
2187 }
2188 return 0;
2189}
2190
2191/**
2192 * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table.
2193 * @nlh: netlink message header
2194 * @dev: netdevice
2195 *
2196 * Default netdevice operation to dump the existing unicast address list.
2197 * Returns zero on success.
2198 */
2199int ndo_dflt_fdb_dump(struct sk_buff *skb,
2200 struct netlink_callback *cb,
2201 struct net_device *dev,
2202 int idx)
2203{
2204 int err;
2205
2206 netif_addr_lock_bh(dev);
2207 err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
2208 if (err)
2209 goto out;
2210 nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
2211out:
2212 netif_addr_unlock_bh(dev);
2213 return idx;
2214}
2215EXPORT_SYMBOL(ndo_dflt_fdb_dump);
2216
2217static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
2218{
2219 int idx = 0;
2220 struct net *net = sock_net(skb->sk);
2221 struct net_device *dev;
2222
2223 rcu_read_lock();
2224 for_each_netdev_rcu(net, dev) {
2225 if (dev->priv_flags & IFF_BRIDGE_PORT) {
2226 struct net_device *master = dev->master;
2227 const struct net_device_ops *ops = master->netdev_ops;
2228
2229 if (ops->ndo_fdb_dump)
2230 idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
2231 }
2232
2233 if (dev->netdev_ops->ndo_fdb_dump)
2234 idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
2235 }
2236 rcu_read_unlock();
2237
2238 cb->args[0] = idx;
2239 return skb->len;
2240}
2241
1974/* Protected by RTNL sempahore. */ 2242/* Protected by RTNL sempahore. */
1975static struct rtattr **rta_buf; 2243static struct rtattr **rta_buf;
1976static int rtattr_max; 2244static int rtattr_max;
@@ -2019,8 +2287,13 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2019 2287
2020 __rtnl_unlock(); 2288 __rtnl_unlock();
2021 rtnl = net->rtnl; 2289 rtnl = net->rtnl;
2022 err = netlink_dump_start(rtnl, skb, nlh, dumpit, 2290 {
2023 NULL, min_dump_alloc); 2291 struct netlink_dump_control c = {
2292 .dump = dumpit,
2293 .min_dump_alloc = min_dump_alloc,
2294 };
2295 err = netlink_dump_start(rtnl, skb, nlh, &c);
2296 }
2024 rtnl_lock(); 2297 rtnl_lock();
2025 return err; 2298 return err;
2026 } 2299 }
@@ -2036,7 +2309,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2036 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); 2309 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
2037 2310
2038 while (RTA_OK(attr, attrlen)) { 2311 while (RTA_OK(attr, attrlen)) {
2039 unsigned flavor = attr->rta_type; 2312 unsigned int flavor = attr->rta_type;
2040 if (flavor) { 2313 if (flavor) {
2041 if (flavor > rta_max[sz_idx]) 2314 if (flavor > rta_max[sz_idx])
2042 return -EINVAL; 2315 return -EINVAL;
@@ -2138,5 +2411,9 @@ void __init rtnetlink_init(void)
2138 2411
2139 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL); 2412 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
2140 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL); 2413 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
2414
2415 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
2416 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
2417 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
2141} 2418}
2142 2419
diff --git a/net/core/scm.c b/net/core/scm.c
index ff52ad0a5150..611c5efd4cb0 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -28,7 +28,6 @@
28#include <linux/nsproxy.h> 28#include <linux/nsproxy.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include <asm/system.h>
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
33 32
34#include <net/protocol.h> 33#include <net/protocol.h>
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da0c97f2fab4..016694d62484 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -36,6 +36,8 @@
36 * The functions in this file will not compile correctly with gcc 2.4.x 36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */ 37 */
38 38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
39#include <linux/module.h> 41#include <linux/module.h>
40#include <linux/types.h> 42#include <linux/types.h>
41#include <linux/kernel.h> 43#include <linux/kernel.h>
@@ -66,12 +68,10 @@
66#include <net/xfrm.h> 68#include <net/xfrm.h>
67 69
68#include <asm/uaccess.h> 70#include <asm/uaccess.h>
69#include <asm/system.h>
70#include <trace/events/skb.h> 71#include <trace/events/skb.h>
72#include <linux/highmem.h>
71 73
72#include "kmap_skb.h" 74struct kmem_cache *skbuff_head_cache __read_mostly;
73
74static struct kmem_cache *skbuff_head_cache __read_mostly;
75static struct kmem_cache *skbuff_fclone_cache __read_mostly; 75static struct kmem_cache *skbuff_fclone_cache __read_mostly;
76 76
77static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 77static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
@@ -120,11 +120,10 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
120 */ 120 */
121static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 121static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
122{ 122{
123 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 123 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
124 "data:%p tail:%#lx end:%#lx dev:%s\n", 124 __func__, here, skb->len, sz, skb->head, skb->data,
125 here, skb->len, sz, skb->head, skb->data, 125 (unsigned long)skb->tail, (unsigned long)skb->end,
126 (unsigned long)skb->tail, (unsigned long)skb->end, 126 skb->dev ? skb->dev->name : "<NULL>");
127 skb->dev ? skb->dev->name : "<NULL>");
128 BUG(); 127 BUG();
129} 128}
130 129
@@ -139,11 +138,10 @@ static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
139 138
140static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 139static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
141{ 140{
142 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 141 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
143 "data:%p tail:%#lx end:%#lx dev:%s\n", 142 __func__, here, skb->len, sz, skb->head, skb->data,
144 here, skb->len, sz, skb->head, skb->data, 143 (unsigned long)skb->tail, (unsigned long)skb->end,
145 (unsigned long)skb->tail, (unsigned long)skb->end, 144 skb->dev ? skb->dev->name : "<NULL>");
146 skb->dev ? skb->dev->name : "<NULL>");
147 BUG(); 145 BUG();
148} 146}
149 147
@@ -247,6 +245,7 @@ EXPORT_SYMBOL(__alloc_skb);
247/** 245/**
248 * build_skb - build a network buffer 246 * build_skb - build a network buffer
249 * @data: data buffer provided by caller 247 * @data: data buffer provided by caller
248 * @frag_size: size of fragment, or 0 if head was kmalloced
250 * 249 *
251 * Allocate a new &sk_buff. Caller provides space holding head and 250 * Allocate a new &sk_buff. Caller provides space holding head and
252 * skb_shared_info. @data must have been allocated by kmalloc() 251 * skb_shared_info. @data must have been allocated by kmalloc()
@@ -260,20 +259,21 @@ EXPORT_SYMBOL(__alloc_skb);
260 * before giving packet to stack. 259 * before giving packet to stack.
261 * RX rings only contains data buffers, not full skbs. 260 * RX rings only contains data buffers, not full skbs.
262 */ 261 */
263struct sk_buff *build_skb(void *data) 262struct sk_buff *build_skb(void *data, unsigned int frag_size)
264{ 263{
265 struct skb_shared_info *shinfo; 264 struct skb_shared_info *shinfo;
266 struct sk_buff *skb; 265 struct sk_buff *skb;
267 unsigned int size; 266 unsigned int size = frag_size ? : ksize(data);
268 267
269 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 268 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
270 if (!skb) 269 if (!skb)
271 return NULL; 270 return NULL;
272 271
273 size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 272 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
274 273
275 memset(skb, 0, offsetof(struct sk_buff, tail)); 274 memset(skb, 0, offsetof(struct sk_buff, tail));
276 skb->truesize = SKB_TRUESIZE(size); 275 skb->truesize = SKB_TRUESIZE(size);
276 skb->head_frag = frag_size != 0;
277 atomic_set(&skb->users, 1); 277 atomic_set(&skb->users, 1);
278 skb->head = data; 278 skb->head = data;
279 skb->data = data; 279 skb->data = data;
@@ -293,6 +293,46 @@ struct sk_buff *build_skb(void *data)
293} 293}
294EXPORT_SYMBOL(build_skb); 294EXPORT_SYMBOL(build_skb);
295 295
296struct netdev_alloc_cache {
297 struct page *page;
298 unsigned int offset;
299};
300static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
301
302/**
303 * netdev_alloc_frag - allocate a page fragment
304 * @fragsz: fragment size
305 *
306 * Allocates a frag from a page for receive buffer.
307 * Uses GFP_ATOMIC allocations.
308 */
309void *netdev_alloc_frag(unsigned int fragsz)
310{
311 struct netdev_alloc_cache *nc;
312 void *data = NULL;
313 unsigned long flags;
314
315 local_irq_save(flags);
316 nc = &__get_cpu_var(netdev_alloc_cache);
317 if (unlikely(!nc->page)) {
318refill:
319 nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
320 nc->offset = 0;
321 }
322 if (likely(nc->page)) {
323 if (nc->offset + fragsz > PAGE_SIZE) {
324 put_page(nc->page);
325 goto refill;
326 }
327 data = page_address(nc->page) + nc->offset;
328 nc->offset += fragsz;
329 get_page(nc->page);
330 }
331 local_irq_restore(flags);
332 return data;
333}
334EXPORT_SYMBOL(netdev_alloc_frag);
335
296/** 336/**
297 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 337 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
298 * @dev: network device to receive on 338 * @dev: network device to receive on
@@ -307,11 +347,23 @@ EXPORT_SYMBOL(build_skb);
307 * %NULL is returned if there is no free memory. 347 * %NULL is returned if there is no free memory.
308 */ 348 */
309struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 349struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
310 unsigned int length, gfp_t gfp_mask) 350 unsigned int length, gfp_t gfp_mask)
311{ 351{
312 struct sk_buff *skb; 352 struct sk_buff *skb = NULL;
353 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
354 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
355
356 if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
357 void *data = netdev_alloc_frag(fragsz);
313 358
314 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 359 if (likely(data)) {
360 skb = build_skb(data, fragsz);
361 if (unlikely(!skb))
362 put_page(virt_to_head_page(data));
363 }
364 } else {
365 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
366 }
315 if (likely(skb)) { 367 if (likely(skb)) {
316 skb_reserve(skb, NET_SKB_PAD); 368 skb_reserve(skb, NET_SKB_PAD);
317 skb->dev = dev; 369 skb->dev = dev;
@@ -321,37 +373,15 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
321EXPORT_SYMBOL(__netdev_alloc_skb); 373EXPORT_SYMBOL(__netdev_alloc_skb);
322 374
323void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 375void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
324 int size) 376 int size, unsigned int truesize)
325{ 377{
326 skb_fill_page_desc(skb, i, page, off, size); 378 skb_fill_page_desc(skb, i, page, off, size);
327 skb->len += size; 379 skb->len += size;
328 skb->data_len += size; 380 skb->data_len += size;
329 skb->truesize += size; 381 skb->truesize += truesize;
330} 382}
331EXPORT_SYMBOL(skb_add_rx_frag); 383EXPORT_SYMBOL(skb_add_rx_frag);
332 384
333/**
334 * dev_alloc_skb - allocate an skbuff for receiving
335 * @length: length to allocate
336 *
337 * Allocate a new &sk_buff and assign it a usage count of one. The
338 * buffer has unspecified headroom built in. Users should allocate
339 * the headroom they think they need without accounting for the
340 * built in space. The built in space is used for optimisations.
341 *
342 * %NULL is returned if there is no free memory. Although this function
343 * allocates memory it can be called from an interrupt.
344 */
345struct sk_buff *dev_alloc_skb(unsigned int length)
346{
347 /*
348 * There is more code here than it seems:
349 * __dev_alloc_skb is an inline
350 */
351 return __dev_alloc_skb(length, GFP_ATOMIC);
352}
353EXPORT_SYMBOL(dev_alloc_skb);
354
355static void skb_drop_list(struct sk_buff **listp) 385static void skb_drop_list(struct sk_buff **listp)
356{ 386{
357 struct sk_buff *list = *listp; 387 struct sk_buff *list = *listp;
@@ -378,6 +408,14 @@ static void skb_clone_fraglist(struct sk_buff *skb)
378 skb_get(list); 408 skb_get(list);
379} 409}
380 410
411static void skb_free_head(struct sk_buff *skb)
412{
413 if (skb->head_frag)
414 put_page(virt_to_head_page(skb->head));
415 else
416 kfree(skb->head);
417}
418
381static void skb_release_data(struct sk_buff *skb) 419static void skb_release_data(struct sk_buff *skb)
382{ 420{
383 if (!skb->cloned || 421 if (!skb->cloned ||
@@ -404,7 +442,7 @@ static void skb_release_data(struct sk_buff *skb)
404 if (skb_has_frag_list(skb)) 442 if (skb_has_frag_list(skb))
405 skb_drop_fraglist(skb); 443 skb_drop_fraglist(skb);
406 444
407 kfree(skb->head); 445 skb_free_head(skb);
408 } 446 }
409} 447}
410 448
@@ -592,6 +630,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
592 new->rxhash = old->rxhash; 630 new->rxhash = old->rxhash;
593 new->ooo_okay = old->ooo_okay; 631 new->ooo_okay = old->ooo_okay;
594 new->l4_rxhash = old->l4_rxhash; 632 new->l4_rxhash = old->l4_rxhash;
633 new->no_fcs = old->no_fcs;
595#ifdef CONFIG_XFRM 634#ifdef CONFIG_XFRM
596 new->sp = secpath_get(old->sp); 635 new->sp = secpath_get(old->sp);
597#endif 636#endif
@@ -645,6 +684,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
645 C(tail); 684 C(tail);
646 C(end); 685 C(end);
647 C(head); 686 C(head);
687 C(head_frag);
648 C(data); 688 C(data);
649 C(truesize); 689 C(truesize);
650 atomic_set(&n->users, 1); 690 atomic_set(&n->users, 1);
@@ -707,10 +747,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
707 } 747 }
708 return -ENOMEM; 748 return -ENOMEM;
709 } 749 }
710 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 750 vaddr = kmap_atomic(skb_frag_page(f));
711 memcpy(page_address(page), 751 memcpy(page_address(page),
712 vaddr + f->page_offset, skb_frag_size(f)); 752 vaddr + f->page_offset, skb_frag_size(f));
713 kunmap_skb_frag(vaddr); 753 kunmap_atomic(vaddr);
714 page->private = (unsigned long)head; 754 page->private = (unsigned long)head;
715 head = page; 755 head = page;
716 } 756 }
@@ -819,7 +859,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
819struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 859struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
820{ 860{
821 int headerlen = skb_headroom(skb); 861 int headerlen = skb_headroom(skb);
822 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 862 unsigned int size = skb_end_offset(skb) + skb->data_len;
823 struct sk_buff *n = alloc_skb(size, gfp_mask); 863 struct sk_buff *n = alloc_skb(size, gfp_mask);
824 864
825 if (!n) 865 if (!n)
@@ -920,9 +960,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
920{ 960{
921 int i; 961 int i;
922 u8 *data; 962 u8 *data;
923 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 963 int size = nhead + skb_end_offset(skb) + ntail;
924 long off; 964 long off;
925 bool fastpath;
926 965
927 BUG_ON(nhead < 0); 966 BUG_ON(nhead < 0);
928 967
@@ -931,30 +970,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
931 970
932 size = SKB_DATA_ALIGN(size); 971 size = SKB_DATA_ALIGN(size);
933 972
934 /* Check if we can avoid taking references on fragments if we own 973 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
935 * the last reference on skb->head. (see skb_release_data()) 974 gfp_mask);
936 */
937 if (!skb->cloned)
938 fastpath = true;
939 else {
940 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
941 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
942 }
943
944 if (fastpath &&
945 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
946 memmove(skb->head + size, skb_shinfo(skb),
947 offsetof(struct skb_shared_info,
948 frags[skb_shinfo(skb)->nr_frags]));
949 memmove(skb->head + nhead, skb->head,
950 skb_tail_pointer(skb) - skb->head);
951 off = nhead;
952 goto adjust_others;
953 }
954
955 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
956 if (!data) 975 if (!data)
957 goto nodata; 976 goto nodata;
977 size = SKB_WITH_OVERHEAD(ksize(data));
958 978
959 /* Copy only real data... and, alas, header. This should be 979 /* Copy only real data... and, alas, header. This should be
960 * optimized for the cases when header is void. 980 * optimized for the cases when header is void.
@@ -965,9 +985,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
965 skb_shinfo(skb), 985 skb_shinfo(skb),
966 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 986 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
967 987
968 if (fastpath) { 988 /*
969 kfree(skb->head); 989 * if shinfo is shared we must drop the old head gracefully, but if it
970 } else { 990 * is not we can just drop the old head and let the existing refcount
991 * be since all we did is relocate the values
992 */
993 if (skb_cloned(skb)) {
971 /* copy this zero copy skb frags */ 994 /* copy this zero copy skb frags */
972 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 995 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
973 if (skb_copy_ubufs(skb, gfp_mask)) 996 if (skb_copy_ubufs(skb, gfp_mask))
@@ -980,11 +1003,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
980 skb_clone_fraglist(skb); 1003 skb_clone_fraglist(skb);
981 1004
982 skb_release_data(skb); 1005 skb_release_data(skb);
1006 } else {
1007 skb_free_head(skb);
983 } 1008 }
984 off = (data + nhead) - skb->head; 1009 off = (data + nhead) - skb->head;
985 1010
986 skb->head = data; 1011 skb->head = data;
987adjust_others: 1012 skb->head_frag = 0;
988 skb->data += off; 1013 skb->data += off;
989#ifdef NET_SKBUFF_DATA_USES_OFFSET 1014#ifdef NET_SKBUFF_DATA_USES_OFFSET
990 skb->end = size; 1015 skb->end = size;
@@ -1273,7 +1298,7 @@ drop_pages:
1273 return -ENOMEM; 1298 return -ENOMEM;
1274 1299
1275 nfrag->next = frag->next; 1300 nfrag->next = frag->next;
1276 kfree_skb(frag); 1301 consume_skb(frag);
1277 frag = nfrag; 1302 frag = nfrag;
1278 *fragp = frag; 1303 *fragp = frag;
1279 } 1304 }
@@ -1485,21 +1510,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1485 1510
1486 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1511 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1487 int end; 1512 int end;
1513 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1488 1514
1489 WARN_ON(start > offset + len); 1515 WARN_ON(start > offset + len);
1490 1516
1491 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1517 end = start + skb_frag_size(f);
1492 if ((copy = end - offset) > 0) { 1518 if ((copy = end - offset) > 0) {
1493 u8 *vaddr; 1519 u8 *vaddr;
1494 1520
1495 if (copy > len) 1521 if (copy > len)
1496 copy = len; 1522 copy = len;
1497 1523
1498 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1524 vaddr = kmap_atomic(skb_frag_page(f));
1499 memcpy(to, 1525 memcpy(to,
1500 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1526 vaddr + f->page_offset + offset - start,
1501 offset - start, copy); 1527 copy);
1502 kunmap_skb_frag(vaddr); 1528 kunmap_atomic(vaddr);
1503 1529
1504 if ((len -= copy) == 0) 1530 if ((len -= copy) == 0)
1505 return 0; 1531 return 0;
@@ -1545,9 +1571,9 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1545 put_page(spd->pages[i]); 1571 put_page(spd->pages[i]);
1546} 1572}
1547 1573
1548static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1574static struct page *linear_to_page(struct page *page, unsigned int *len,
1549 unsigned int *offset, 1575 unsigned int *offset,
1550 struct sk_buff *skb, struct sock *sk) 1576 struct sk_buff *skb, struct sock *sk)
1551{ 1577{
1552 struct page *p = sk->sk_sndmsg_page; 1578 struct page *p = sk->sk_sndmsg_page;
1553 unsigned int off; 1579 unsigned int off;
@@ -1563,6 +1589,9 @@ new_page:
1563 } else { 1589 } else {
1564 unsigned int mlen; 1590 unsigned int mlen;
1565 1591
1592 /* If we are the only user of the page, we can reset offset */
1593 if (page_count(p) == 1)
1594 sk->sk_sndmsg_off = 0;
1566 off = sk->sk_sndmsg_off; 1595 off = sk->sk_sndmsg_off;
1567 mlen = PAGE_SIZE - off; 1596 mlen = PAGE_SIZE - off;
1568 if (mlen < 64 && mlen < *len) { 1597 if (mlen < 64 && mlen < *len) {
@@ -1576,36 +1605,48 @@ new_page:
1576 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1605 memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1577 sk->sk_sndmsg_off += *len; 1606 sk->sk_sndmsg_off += *len;
1578 *offset = off; 1607 *offset = off;
1579 get_page(p);
1580 1608
1581 return p; 1609 return p;
1582} 1610}
1583 1611
1612static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1613 struct page *page,
1614 unsigned int offset)
1615{
1616 return spd->nr_pages &&
1617 spd->pages[spd->nr_pages - 1] == page &&
1618 (spd->partial[spd->nr_pages - 1].offset +
1619 spd->partial[spd->nr_pages - 1].len == offset);
1620}
1621
1584/* 1622/*
1585 * Fill page/offset/length into spd, if it can hold more pages. 1623 * Fill page/offset/length into spd, if it can hold more pages.
1586 */ 1624 */
1587static inline int spd_fill_page(struct splice_pipe_desc *spd, 1625static bool spd_fill_page(struct splice_pipe_desc *spd,
1588 struct pipe_inode_info *pipe, struct page *page, 1626 struct pipe_inode_info *pipe, struct page *page,
1589 unsigned int *len, unsigned int offset, 1627 unsigned int *len, unsigned int offset,
1590 struct sk_buff *skb, int linear, 1628 struct sk_buff *skb, bool linear,
1591 struct sock *sk) 1629 struct sock *sk)
1592{ 1630{
1593 if (unlikely(spd->nr_pages == pipe->buffers)) 1631 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1594 return 1; 1632 return true;
1595 1633
1596 if (linear) { 1634 if (linear) {
1597 page = linear_to_page(page, len, &offset, skb, sk); 1635 page = linear_to_page(page, len, &offset, skb, sk);
1598 if (!page) 1636 if (!page)
1599 return 1; 1637 return true;
1600 } else 1638 }
1601 get_page(page); 1639 if (spd_can_coalesce(spd, page, offset)) {
1602 1640 spd->partial[spd->nr_pages - 1].len += *len;
1641 return false;
1642 }
1643 get_page(page);
1603 spd->pages[spd->nr_pages] = page; 1644 spd->pages[spd->nr_pages] = page;
1604 spd->partial[spd->nr_pages].len = *len; 1645 spd->partial[spd->nr_pages].len = *len;
1605 spd->partial[spd->nr_pages].offset = offset; 1646 spd->partial[spd->nr_pages].offset = offset;
1606 spd->nr_pages++; 1647 spd->nr_pages++;
1607 1648
1608 return 0; 1649 return false;
1609} 1650}
1610 1651
1611static inline void __segment_seek(struct page **page, unsigned int *poff, 1652static inline void __segment_seek(struct page **page, unsigned int *poff,
@@ -1622,20 +1663,20 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
1622 *plen -= off; 1663 *plen -= off;
1623} 1664}
1624 1665
1625static inline int __splice_segment(struct page *page, unsigned int poff, 1666static bool __splice_segment(struct page *page, unsigned int poff,
1626 unsigned int plen, unsigned int *off, 1667 unsigned int plen, unsigned int *off,
1627 unsigned int *len, struct sk_buff *skb, 1668 unsigned int *len, struct sk_buff *skb,
1628 struct splice_pipe_desc *spd, int linear, 1669 struct splice_pipe_desc *spd, bool linear,
1629 struct sock *sk, 1670 struct sock *sk,
1630 struct pipe_inode_info *pipe) 1671 struct pipe_inode_info *pipe)
1631{ 1672{
1632 if (!*len) 1673 if (!*len)
1633 return 1; 1674 return true;
1634 1675
1635 /* skip this segment if already processed */ 1676 /* skip this segment if already processed */
1636 if (*off >= plen) { 1677 if (*off >= plen) {
1637 *off -= plen; 1678 *off -= plen;
1638 return 0; 1679 return false;
1639 } 1680 }
1640 1681
1641 /* ignore any bits we already processed */ 1682 /* ignore any bits we already processed */
@@ -1651,34 +1692,38 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1651 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1692 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1652 1693
1653 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1694 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1654 return 1; 1695 return true;
1655 1696
1656 __segment_seek(&page, &poff, &plen, flen); 1697 __segment_seek(&page, &poff, &plen, flen);
1657 *len -= flen; 1698 *len -= flen;
1658 1699
1659 } while (*len && plen); 1700 } while (*len && plen);
1660 1701
1661 return 0; 1702 return false;
1662} 1703}
1663 1704
1664/* 1705/*
1665 * Map linear and fragment data from the skb to spd. It reports failure if the 1706 * Map linear and fragment data from the skb to spd. It reports true if the
1666 * pipe is full or if we already spliced the requested length. 1707 * pipe is full or if we already spliced the requested length.
1667 */ 1708 */
1668static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1709static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1669 unsigned int *offset, unsigned int *len, 1710 unsigned int *offset, unsigned int *len,
1670 struct splice_pipe_desc *spd, struct sock *sk) 1711 struct splice_pipe_desc *spd, struct sock *sk)
1671{ 1712{
1672 int seg; 1713 int seg;
1673 1714
1674 /* 1715 /* map the linear part :
1675 * map the linear part 1716 * If skb->head_frag is set, this 'linear' part is backed by a
1717 * fragment, and if the head is not shared with any clones then
1718 * we can avoid a copy since we own the head portion of this page.
1676 */ 1719 */
1677 if (__splice_segment(virt_to_page(skb->data), 1720 if (__splice_segment(virt_to_page(skb->data),
1678 (unsigned long) skb->data & (PAGE_SIZE - 1), 1721 (unsigned long) skb->data & (PAGE_SIZE - 1),
1679 skb_headlen(skb), 1722 skb_headlen(skb),
1680 offset, len, skb, spd, 1, sk, pipe)) 1723 offset, len, skb, spd,
1681 return 1; 1724 skb_head_is_locked(skb),
1725 sk, pipe))
1726 return true;
1682 1727
1683 /* 1728 /*
1684 * then map the fragments 1729 * then map the fragments
@@ -1688,11 +1733,11 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1688 1733
1689 if (__splice_segment(skb_frag_page(f), 1734 if (__splice_segment(skb_frag_page(f),
1690 f->page_offset, skb_frag_size(f), 1735 f->page_offset, skb_frag_size(f),
1691 offset, len, skb, spd, 0, sk, pipe)) 1736 offset, len, skb, spd, false, sk, pipe))
1692 return 1; 1737 return true;
1693 } 1738 }
1694 1739
1695 return 0; 1740 return false;
1696} 1741}
1697 1742
1698/* 1743/*
@@ -1705,8 +1750,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1705 struct pipe_inode_info *pipe, unsigned int tlen, 1750 struct pipe_inode_info *pipe, unsigned int tlen,
1706 unsigned int flags) 1751 unsigned int flags)
1707{ 1752{
1708 struct partial_page partial[PIPE_DEF_BUFFERS]; 1753 struct partial_page partial[MAX_SKB_FRAGS];
1709 struct page *pages[PIPE_DEF_BUFFERS]; 1754 struct page *pages[MAX_SKB_FRAGS];
1710 struct splice_pipe_desc spd = { 1755 struct splice_pipe_desc spd = {
1711 .pages = pages, 1756 .pages = pages,
1712 .partial = partial, 1757 .partial = partial,
@@ -1718,9 +1763,6 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1718 struct sock *sk = skb->sk; 1763 struct sock *sk = skb->sk;
1719 int ret = 0; 1764 int ret = 0;
1720 1765
1721 if (splice_grow_spd(pipe, &spd))
1722 return -ENOMEM;
1723
1724 /* 1766 /*
1725 * __skb_splice_bits() only fails if the output has no room left, 1767 * __skb_splice_bits() only fails if the output has no room left,
1726 * so no point in going over the frag_list for the error case. 1768 * so no point in going over the frag_list for the error case.
@@ -1756,7 +1798,6 @@ done:
1756 lock_sock(sk); 1798 lock_sock(sk);
1757 } 1799 }
1758 1800
1759 splice_shrink_spd(pipe, &spd);
1760 return ret; 1801 return ret;
1761} 1802}
1762 1803
@@ -1804,10 +1845,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1804 if (copy > len) 1845 if (copy > len)
1805 copy = len; 1846 copy = len;
1806 1847
1807 vaddr = kmap_skb_frag(frag); 1848 vaddr = kmap_atomic(skb_frag_page(frag));
1808 memcpy(vaddr + frag->page_offset + offset - start, 1849 memcpy(vaddr + frag->page_offset + offset - start,
1809 from, copy); 1850 from, copy);
1810 kunmap_skb_frag(vaddr); 1851 kunmap_atomic(vaddr);
1811 1852
1812 if ((len -= copy) == 0) 1853 if ((len -= copy) == 0)
1813 return 0; 1854 return 0;
@@ -1867,21 +1908,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1867 1908
1868 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1909 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1869 int end; 1910 int end;
1911 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1870 1912
1871 WARN_ON(start > offset + len); 1913 WARN_ON(start > offset + len);
1872 1914
1873 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1915 end = start + skb_frag_size(frag);
1874 if ((copy = end - offset) > 0) { 1916 if ((copy = end - offset) > 0) {
1875 __wsum csum2; 1917 __wsum csum2;
1876 u8 *vaddr; 1918 u8 *vaddr;
1877 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1878 1919
1879 if (copy > len) 1920 if (copy > len)
1880 copy = len; 1921 copy = len;
1881 vaddr = kmap_skb_frag(frag); 1922 vaddr = kmap_atomic(skb_frag_page(frag));
1882 csum2 = csum_partial(vaddr + frag->page_offset + 1923 csum2 = csum_partial(vaddr + frag->page_offset +
1883 offset - start, copy, 0); 1924 offset - start, copy, 0);
1884 kunmap_skb_frag(vaddr); 1925 kunmap_atomic(vaddr);
1885 csum = csum_block_add(csum, csum2, pos); 1926 csum = csum_block_add(csum, csum2, pos);
1886 if (!(len -= copy)) 1927 if (!(len -= copy))
1887 return csum; 1928 return csum;
@@ -1953,12 +1994,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1953 1994
1954 if (copy > len) 1995 if (copy > len)
1955 copy = len; 1996 copy = len;
1956 vaddr = kmap_skb_frag(frag); 1997 vaddr = kmap_atomic(skb_frag_page(frag));
1957 csum2 = csum_partial_copy_nocheck(vaddr + 1998 csum2 = csum_partial_copy_nocheck(vaddr +
1958 frag->page_offset + 1999 frag->page_offset +
1959 offset - start, to, 2000 offset - start, to,
1960 copy, 0); 2001 copy, 0);
1961 kunmap_skb_frag(vaddr); 2002 kunmap_atomic(vaddr);
1962 csum = csum_block_add(csum, csum2, pos); 2003 csum = csum_block_add(csum, csum2, pos);
1963 if (!(len -= copy)) 2004 if (!(len -= copy))
1964 return csum; 2005 return csum;
@@ -2478,7 +2519,7 @@ next_skb:
2478 2519
2479 if (abs_offset < block_limit) { 2520 if (abs_offset < block_limit) {
2480 if (!st->frag_data) 2521 if (!st->frag_data)
2481 st->frag_data = kmap_skb_frag(frag); 2522 st->frag_data = kmap_atomic(skb_frag_page(frag));
2482 2523
2483 *data = (u8 *) st->frag_data + frag->page_offset + 2524 *data = (u8 *) st->frag_data + frag->page_offset +
2484 (abs_offset - st->stepped_offset); 2525 (abs_offset - st->stepped_offset);
@@ -2487,7 +2528,7 @@ next_skb:
2487 } 2528 }
2488 2529
2489 if (st->frag_data) { 2530 if (st->frag_data) {
2490 kunmap_skb_frag(st->frag_data); 2531 kunmap_atomic(st->frag_data);
2491 st->frag_data = NULL; 2532 st->frag_data = NULL;
2492 } 2533 }
2493 2534
@@ -2496,7 +2537,7 @@ next_skb:
2496 } 2537 }
2497 2538
2498 if (st->frag_data) { 2539 if (st->frag_data) {
2499 kunmap_skb_frag(st->frag_data); 2540 kunmap_atomic(st->frag_data);
2500 st->frag_data = NULL; 2541 st->frag_data = NULL;
2501 } 2542 }
2502 2543
@@ -2524,7 +2565,7 @@ EXPORT_SYMBOL(skb_seq_read);
2524void skb_abort_seq_read(struct skb_seq_state *st) 2565void skb_abort_seq_read(struct skb_seq_state *st)
2525{ 2566{
2526 if (st->frag_data) 2567 if (st->frag_data)
2527 kunmap_skb_frag(st->frag_data); 2568 kunmap_atomic(st->frag_data);
2528} 2569}
2529EXPORT_SYMBOL(skb_abort_seq_read); 2570EXPORT_SYMBOL(skb_abort_seq_read);
2530 2571
@@ -2716,14 +2757,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2716 if (unlikely(!nskb)) 2757 if (unlikely(!nskb))
2717 goto err; 2758 goto err;
2718 2759
2719 hsize = skb_end_pointer(nskb) - nskb->head; 2760 hsize = skb_end_offset(nskb);
2720 if (skb_cow_head(nskb, doffset + headroom)) { 2761 if (skb_cow_head(nskb, doffset + headroom)) {
2721 kfree_skb(nskb); 2762 kfree_skb(nskb);
2722 goto err; 2763 goto err;
2723 } 2764 }
2724 2765
2725 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2766 nskb->truesize += skb_end_offset(nskb) - hsize;
2726 hsize;
2727 skb_release_head_state(nskb); 2767 skb_release_head_state(nskb);
2728 __skb_push(nskb, doffset); 2768 __skb_push(nskb, doffset);
2729 } else { 2769 } else {
@@ -2841,6 +2881,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2841 unsigned int len = skb_gro_len(skb); 2881 unsigned int len = skb_gro_len(skb);
2842 unsigned int offset = skb_gro_offset(skb); 2882 unsigned int offset = skb_gro_offset(skb);
2843 unsigned int headlen = skb_headlen(skb); 2883 unsigned int headlen = skb_headlen(skb);
2884 unsigned int delta_truesize;
2844 2885
2845 if (p->len + len >= 65536) 2886 if (p->len + len >= 65536)
2846 return -E2BIG; 2887 return -E2BIG;
@@ -2870,11 +2911,41 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2870 frag->page_offset += offset; 2911 frag->page_offset += offset;
2871 skb_frag_size_sub(frag, offset); 2912 skb_frag_size_sub(frag, offset);
2872 2913
2914 /* all fragments truesize : remove (head size + sk_buff) */
2915 delta_truesize = skb->truesize -
2916 SKB_TRUESIZE(skb_end_offset(skb));
2917
2873 skb->truesize -= skb->data_len; 2918 skb->truesize -= skb->data_len;
2874 skb->len -= skb->data_len; 2919 skb->len -= skb->data_len;
2875 skb->data_len = 0; 2920 skb->data_len = 0;
2876 2921
2877 NAPI_GRO_CB(skb)->free = 1; 2922 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
2923 goto done;
2924 } else if (skb->head_frag) {
2925 int nr_frags = pinfo->nr_frags;
2926 skb_frag_t *frag = pinfo->frags + nr_frags;
2927 struct page *page = virt_to_head_page(skb->head);
2928 unsigned int first_size = headlen - offset;
2929 unsigned int first_offset;
2930
2931 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
2932 return -E2BIG;
2933
2934 first_offset = skb->data -
2935 (unsigned char *)page_address(page) +
2936 offset;
2937
2938 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
2939
2940 frag->page.p = page;
2941 frag->page_offset = first_offset;
2942 skb_frag_size_set(frag, first_size);
2943
2944 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
2945 /* We dont need to clear skbinfo->nr_frags here */
2946
2947 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
2948 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
2878 goto done; 2949 goto done;
2879 } else if (skb_gro_len(p) != pinfo->gso_size) 2950 } else if (skb_gro_len(p) != pinfo->gso_size)
2880 return -E2BIG; 2951 return -E2BIG;
@@ -2906,7 +2977,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2906 nskb->prev = p; 2977 nskb->prev = p;
2907 2978
2908 nskb->data_len += p->len; 2979 nskb->data_len += p->len;
2909 nskb->truesize += p->len; 2980 nskb->truesize += p->truesize;
2910 nskb->len += p->len; 2981 nskb->len += p->len;
2911 2982
2912 *head = nskb; 2983 *head = nskb;
@@ -2916,6 +2987,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2916 p = nskb; 2987 p = nskb;
2917 2988
2918merge: 2989merge:
2990 delta_truesize = skb->truesize;
2919 if (offset > headlen) { 2991 if (offset > headlen) {
2920 unsigned int eat = offset - headlen; 2992 unsigned int eat = offset - headlen;
2921 2993
@@ -2935,7 +3007,7 @@ merge:
2935done: 3007done:
2936 NAPI_GRO_CB(p)->count++; 3008 NAPI_GRO_CB(p)->count++;
2937 p->data_len += len; 3009 p->data_len += len;
2938 p->truesize += len; 3010 p->truesize += delta_truesize;
2939 p->len += len; 3011 p->len += len;
2940 3012
2941 NAPI_GRO_CB(skb)->same_flow = 1; 3013 NAPI_GRO_CB(skb)->same_flow = 1;
@@ -3160,8 +3232,10 @@ static void sock_rmem_free(struct sk_buff *skb)
3160 */ 3232 */
3161int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3233int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3162{ 3234{
3235 int len = skb->len;
3236
3163 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3237 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3164 (unsigned)sk->sk_rcvbuf) 3238 (unsigned int)sk->sk_rcvbuf)
3165 return -ENOMEM; 3239 return -ENOMEM;
3166 3240
3167 skb_orphan(skb); 3241 skb_orphan(skb);
@@ -3174,7 +3248,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3174 3248
3175 skb_queue_tail(&sk->sk_error_queue, skb); 3249 skb_queue_tail(&sk->sk_error_queue, skb);
3176 if (!sock_flag(sk, SOCK_DEAD)) 3250 if (!sock_flag(sk, SOCK_DEAD))
3177 sk->sk_data_ready(sk, skb->len); 3251 sk->sk_data_ready(sk, len);
3178 return 0; 3252 return 0;
3179} 3253}
3180EXPORT_SYMBOL(sock_queue_err_skb); 3254EXPORT_SYMBOL(sock_queue_err_skb);
@@ -3255,10 +3329,8 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3255{ 3329{
3256 if (unlikely(start > skb_headlen(skb)) || 3330 if (unlikely(start > skb_headlen(skb)) ||
3257 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3331 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3258 if (net_ratelimit()) 3332 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3259 printk(KERN_WARNING 3333 start, off, skb_headlen(skb));
3260 "bad partial csum: csum=%u/%u len=%u\n",
3261 start, off, skb_headlen(skb));
3262 return false; 3334 return false;
3263 } 3335 }
3264 skb->ip_summed = CHECKSUM_PARTIAL; 3336 skb->ip_summed = CHECKSUM_PARTIAL;
@@ -3270,8 +3342,93 @@ EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3270 3342
3271void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3343void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3272{ 3344{
3273 if (net_ratelimit()) 3345 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
3274 pr_warning("%s: received packets cannot be forwarded" 3346 skb->dev->name);
3275 " while LRO is enabled\n", skb->dev->name);
3276} 3347}
3277EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3348EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3349
3350void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3351{
3352 if (head_stolen)
3353 kmem_cache_free(skbuff_head_cache, skb);
3354 else
3355 __kfree_skb(skb);
3356}
3357EXPORT_SYMBOL(kfree_skb_partial);
3358
3359/**
3360 * skb_try_coalesce - try to merge skb to prior one
3361 * @to: prior buffer
3362 * @from: buffer to add
3363 * @fragstolen: pointer to boolean
3364 *
3365 */
3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3367 bool *fragstolen, int *delta_truesize)
3368{
3369 int i, delta, len = from->len;
3370
3371 *fragstolen = false;
3372
3373 if (skb_cloned(to))
3374 return false;
3375
3376 if (len <= skb_tailroom(to)) {
3377 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3378 *delta_truesize = 0;
3379 return true;
3380 }
3381
3382 if (skb_has_frag_list(to) || skb_has_frag_list(from))
3383 return false;
3384
3385 if (skb_headlen(from) != 0) {
3386 struct page *page;
3387 unsigned int offset;
3388
3389 if (skb_shinfo(to)->nr_frags +
3390 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3391 return false;
3392
3393 if (skb_head_is_locked(from))
3394 return false;
3395
3396 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3397
3398 page = virt_to_head_page(from->head);
3399 offset = from->data - (unsigned char *)page_address(page);
3400
3401 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3402 page, offset, skb_headlen(from));
3403 *fragstolen = true;
3404 } else {
3405 if (skb_shinfo(to)->nr_frags +
3406 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3407 return false;
3408
3409 delta = from->truesize -
3410 SKB_TRUESIZE(skb_end_pointer(from) - from->head);
3411 }
3412
3413 WARN_ON_ONCE(delta < len);
3414
3415 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3416 skb_shinfo(from)->frags,
3417 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
3418 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3419
3420 if (!skb_cloned(from))
3421 skb_shinfo(from)->nr_frags = 0;
3422
3423 /* if the skb is cloned this does nothing since we set nr_frags to 0 */
3424 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3425 skb_frag_ref(from, i);
3426
3427 to->truesize += delta;
3428 to->len += len;
3429 to->data_len += len;
3430
3431 *delta_truesize = delta;
3432 return true;
3433}
3434EXPORT_SYMBOL(skb_try_coalesce);
diff --git a/net/core/sock.c b/net/core/sock.c
index 02f8dfe320b7..9e5b71fda6ec 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -89,6 +89,8 @@
89 * 2 of the License, or (at your option) any later version. 89 * 2 of the License, or (at your option) any later version.
90 */ 90 */
91 91
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
92#include <linux/capability.h> 94#include <linux/capability.h>
93#include <linux/errno.h> 95#include <linux/errno.h>
94#include <linux/types.h> 96#include <linux/types.h>
@@ -111,11 +113,11 @@
111#include <linux/init.h> 113#include <linux/init.h>
112#include <linux/highmem.h> 114#include <linux/highmem.h>
113#include <linux/user_namespace.h> 115#include <linux/user_namespace.h>
114#include <linux/jump_label.h> 116#include <linux/static_key.h>
115#include <linux/memcontrol.h> 117#include <linux/memcontrol.h>
118#include <linux/prefetch.h>
116 119
117#include <asm/uaccess.h> 120#include <asm/uaccess.h>
118#include <asm/system.h>
119 121
120#include <linux/netdevice.h> 122#include <linux/netdevice.h>
121#include <net/protocol.h> 123#include <net/protocol.h>
@@ -141,7 +143,7 @@ static DEFINE_MUTEX(proto_list_mutex);
141static LIST_HEAD(proto_list); 143static LIST_HEAD(proto_list);
142 144
143#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 145#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
144int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) 146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
145{ 147{
146 struct proto *proto; 148 struct proto *proto;
147 int ret = 0; 149 int ret = 0;
@@ -149,7 +151,7 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
149 mutex_lock(&proto_list_mutex); 151 mutex_lock(&proto_list_mutex);
150 list_for_each_entry(proto, &proto_list, node) { 152 list_for_each_entry(proto, &proto_list, node) {
151 if (proto->init_cgroup) { 153 if (proto->init_cgroup) {
152 ret = proto->init_cgroup(cgrp, ss); 154 ret = proto->init_cgroup(memcg, ss);
153 if (ret) 155 if (ret)
154 goto out; 156 goto out;
155 } 157 }
@@ -160,19 +162,19 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
160out: 162out:
161 list_for_each_entry_continue_reverse(proto, &proto_list, node) 163 list_for_each_entry_continue_reverse(proto, &proto_list, node)
162 if (proto->destroy_cgroup) 164 if (proto->destroy_cgroup)
163 proto->destroy_cgroup(cgrp, ss); 165 proto->destroy_cgroup(memcg);
164 mutex_unlock(&proto_list_mutex); 166 mutex_unlock(&proto_list_mutex);
165 return ret; 167 return ret;
166} 168}
167 169
168void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) 170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
169{ 171{
170 struct proto *proto; 172 struct proto *proto;
171 173
172 mutex_lock(&proto_list_mutex); 174 mutex_lock(&proto_list_mutex);
173 list_for_each_entry_reverse(proto, &proto_list, node) 175 list_for_each_entry_reverse(proto, &proto_list, node)
174 if (proto->destroy_cgroup) 176 if (proto->destroy_cgroup)
175 proto->destroy_cgroup(cgrp, ss); 177 proto->destroy_cgroup(memcg);
176 mutex_unlock(&proto_list_mutex); 178 mutex_unlock(&proto_list_mutex);
177} 179}
178#endif 180#endif
@@ -184,7 +186,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
184static struct lock_class_key af_family_keys[AF_MAX]; 186static struct lock_class_key af_family_keys[AF_MAX];
185static struct lock_class_key af_family_slock_keys[AF_MAX]; 187static struct lock_class_key af_family_slock_keys[AF_MAX];
186 188
187struct jump_label_key memcg_socket_limit_enabled; 189struct static_key memcg_socket_limit_enabled;
188EXPORT_SYMBOL(memcg_socket_limit_enabled); 190EXPORT_SYMBOL(memcg_socket_limit_enabled);
189 191
190/* 192/*
@@ -259,7 +261,9 @@ static struct lock_class_key af_callback_keys[AF_MAX];
259 261
260/* Run time adjustable parameters. */ 262/* Run time adjustable parameters. */
261__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
264EXPORT_SYMBOL(sysctl_wmem_max);
262__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
266EXPORT_SYMBOL(sysctl_rmem_max);
263__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
264__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
265 269
@@ -295,9 +299,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
295 *timeo_p = 0; 299 *timeo_p = 0;
296 if (warned < 10 && net_ratelimit()) { 300 if (warned < 10 && net_ratelimit()) {
297 warned++; 301 warned++;
298 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 302 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
299 "tries to set negative timeout\n", 303 __func__, current->comm, task_pid_nr(current));
300 current->comm, task_pid_nr(current));
301 } 304 }
302 return 0; 305 return 0;
303 } 306 }
@@ -315,8 +318,8 @@ static void sock_warn_obsolete_bsdism(const char *name)
315 static char warncomm[TASK_COMM_LEN]; 318 static char warncomm[TASK_COMM_LEN];
316 if (strcmp(warncomm, current->comm) && warned < 5) { 319 if (strcmp(warncomm, current->comm) && warned < 5) {
317 strcpy(warncomm, current->comm); 320 strcpy(warncomm, current->comm);
318 printk(KERN_WARNING "process `%s' is using obsolete " 321 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
319 "%s SO_BSDCOMPAT\n", warncomm, name); 322 warncomm, name);
320 warned++; 323 warned++;
321 } 324 }
322} 325}
@@ -390,7 +393,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
390 393
391 skb->dev = NULL; 394 skb->dev = NULL;
392 395
393 if (sk_rcvqueues_full(sk, skb)) { 396 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
394 atomic_inc(&sk->sk_drops); 397 atomic_inc(&sk->sk_drops);
395 goto discard_and_relse; 398 goto discard_and_relse;
396 } 399 }
@@ -407,7 +410,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
407 rc = sk_backlog_rcv(sk, skb); 410 rc = sk_backlog_rcv(sk, skb);
408 411
409 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 412 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
410 } else if (sk_add_backlog(sk, skb)) { 413 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
411 bh_unlock_sock(sk); 414 bh_unlock_sock(sk);
412 atomic_inc(&sk->sk_drops); 415 atomic_inc(&sk->sk_drops);
413 goto discard_and_relse; 416 goto discard_and_relse;
@@ -562,7 +565,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
562 sock_valbool_flag(sk, SOCK_DBG, valbool); 565 sock_valbool_flag(sk, SOCK_DBG, valbool);
563 break; 566 break;
564 case SO_REUSEADDR: 567 case SO_REUSEADDR:
565 sk->sk_reuse = valbool; 568 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
566 break; 569 break;
567 case SO_TYPE: 570 case SO_TYPE:
568 case SO_PROTOCOL: 571 case SO_PROTOCOL:
@@ -578,23 +581,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
578 break; 581 break;
579 case SO_SNDBUF: 582 case SO_SNDBUF:
580 /* Don't error on this BSD doesn't and if you think 583 /* Don't error on this BSD doesn't and if you think
581 about it this is right. Otherwise apps have to 584 * about it this is right. Otherwise apps have to
582 play 'guess the biggest size' games. RCVBUF/SNDBUF 585 * play 'guess the biggest size' games. RCVBUF/SNDBUF
583 are treated in BSD as hints */ 586 * are treated in BSD as hints
584 587 */
585 if (val > sysctl_wmem_max) 588 val = min_t(u32, val, sysctl_wmem_max);
586 val = sysctl_wmem_max;
587set_sndbuf: 589set_sndbuf:
588 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 590 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
589 if ((val * 2) < SOCK_MIN_SNDBUF) 591 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
590 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 592 /* Wake up sending tasks if we upped the value. */
591 else
592 sk->sk_sndbuf = val * 2;
593
594 /*
595 * Wake up sending tasks if we
596 * upped the value.
597 */
598 sk->sk_write_space(sk); 593 sk->sk_write_space(sk);
599 break; 594 break;
600 595
@@ -607,12 +602,11 @@ set_sndbuf:
607 602
608 case SO_RCVBUF: 603 case SO_RCVBUF:
609 /* Don't error on this BSD doesn't and if you think 604 /* Don't error on this BSD doesn't and if you think
610 about it this is right. Otherwise apps have to 605 * about it this is right. Otherwise apps have to
611 play 'guess the biggest size' games. RCVBUF/SNDBUF 606 * play 'guess the biggest size' games. RCVBUF/SNDBUF
612 are treated in BSD as hints */ 607 * are treated in BSD as hints
613 608 */
614 if (val > sysctl_rmem_max) 609 val = min_t(u32, val, sysctl_rmem_max);
615 val = sysctl_rmem_max;
616set_rcvbuf: 610set_rcvbuf:
617 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 611 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
618 /* 612 /*
@@ -630,10 +624,7 @@ set_rcvbuf:
630 * returning the value we actually used in getsockopt 624 * returning the value we actually used in getsockopt
631 * is the most desirable behavior. 625 * is the most desirable behavior.
632 */ 626 */
633 if ((val * 2) < SOCK_MIN_RCVBUF) 627 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
634 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
635 else
636 sk->sk_rcvbuf = val * 2;
637 break; 628 break;
638 629
639 case SO_RCVBUFFORCE: 630 case SO_RCVBUFFORCE:
@@ -793,6 +784,17 @@ set_rcvbuf:
793 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 784 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
794 break; 785 break;
795 786
787 case SO_PEEK_OFF:
788 if (sock->ops->set_peek_off)
789 sock->ops->set_peek_off(sk, val);
790 else
791 ret = -EOPNOTSUPP;
792 break;
793
794 case SO_NOFCS:
795 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
796 break;
797
796 default: 798 default:
797 ret = -ENOPROTOOPT; 799 ret = -ENOPROTOOPT;
798 break; 800 break;
@@ -811,8 +813,8 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
811 if (cred) { 813 if (cred) {
812 struct user_namespace *current_ns = current_user_ns(); 814 struct user_namespace *current_ns = current_user_ns();
813 815
814 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); 816 ucred->uid = from_kuid(current_ns, cred->euid);
815 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); 817 ucred->gid = from_kgid(current_ns, cred->egid);
816 } 818 }
817} 819}
818EXPORT_SYMBOL_GPL(cred_to_ucred); 820EXPORT_SYMBOL_GPL(cred_to_ucred);
@@ -848,7 +850,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
848 break; 850 break;
849 851
850 case SO_BROADCAST: 852 case SO_BROADCAST:
851 v.val = !!sock_flag(sk, SOCK_BROADCAST); 853 v.val = sock_flag(sk, SOCK_BROADCAST);
852 break; 854 break;
853 855
854 case SO_SNDBUF: 856 case SO_SNDBUF:
@@ -864,7 +866,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
864 break; 866 break;
865 867
866 case SO_KEEPALIVE: 868 case SO_KEEPALIVE:
867 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 869 v.val = sock_flag(sk, SOCK_KEEPOPEN);
868 break; 870 break;
869 871
870 case SO_TYPE: 872 case SO_TYPE:
@@ -886,7 +888,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
886 break; 888 break;
887 889
888 case SO_OOBINLINE: 890 case SO_OOBINLINE:
889 v.val = !!sock_flag(sk, SOCK_URGINLINE); 891 v.val = sock_flag(sk, SOCK_URGINLINE);
890 break; 892 break;
891 893
892 case SO_NO_CHECK: 894 case SO_NO_CHECK:
@@ -899,7 +901,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
899 901
900 case SO_LINGER: 902 case SO_LINGER:
901 lv = sizeof(v.ling); 903 lv = sizeof(v.ling);
902 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 904 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
903 v.ling.l_linger = sk->sk_lingertime / HZ; 905 v.ling.l_linger = sk->sk_lingertime / HZ;
904 break; 906 break;
905 907
@@ -965,7 +967,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
965 break; 967 break;
966 968
967 case SO_PASSCRED: 969 case SO_PASSCRED:
968 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 970 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
969 break; 971 break;
970 972
971 case SO_PEERCRED: 973 case SO_PEERCRED:
@@ -1000,7 +1002,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1000 break; 1002 break;
1001 1003
1002 case SO_PASSSEC: 1004 case SO_PASSSEC:
1003 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 1005 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1004 break; 1006 break;
1005 1007
1006 case SO_PEERSEC: 1008 case SO_PEERSEC:
@@ -1011,13 +1013,22 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1011 break; 1013 break;
1012 1014
1013 case SO_RXQ_OVFL: 1015 case SO_RXQ_OVFL:
1014 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); 1016 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1015 break; 1017 break;
1016 1018
1017 case SO_WIFI_STATUS: 1019 case SO_WIFI_STATUS:
1018 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); 1020 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1019 break; 1021 break;
1020 1022
1023 case SO_PEEK_OFF:
1024 if (!sock->ops->set_peek_off)
1025 return -EOPNOTSUPP;
1026
1027 v.val = sk->sk_peek_off;
1028 break;
1029 case SO_NOFCS:
1030 v.val = sock_flag(sk, SOCK_NOFCS);
1031 break;
1021 default: 1032 default:
1022 return -ENOPROTOOPT; 1033 return -ENOPROTOOPT;
1023 } 1034 }
@@ -1228,8 +1239,8 @@ static void __sk_free(struct sock *sk)
1228 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1239 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1229 1240
1230 if (atomic_read(&sk->sk_omem_alloc)) 1241 if (atomic_read(&sk->sk_omem_alloc))
1231 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1242 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1232 __func__, atomic_read(&sk->sk_omem_alloc)); 1243 __func__, atomic_read(&sk->sk_omem_alloc));
1233 1244
1234 if (sk->sk_peer_cred) 1245 if (sk->sk_peer_cred)
1235 put_cred(sk->sk_peer_cred); 1246 put_cred(sk->sk_peer_cred);
@@ -1515,7 +1526,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1515 */ 1526 */
1516void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1527void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1517{ 1528{
1518 if ((unsigned)size <= sysctl_optmem_max && 1529 if ((unsigned int)size <= sysctl_optmem_max &&
1519 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1530 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1520 void *mem; 1531 void *mem;
1521 /* First do the add, to avoid the race if kmalloc 1532 /* First do the add, to avoid the race if kmalloc
@@ -1581,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1581 gfp_t gfp_mask; 1592 gfp_t gfp_mask;
1582 long timeo; 1593 long timeo;
1583 int err; 1594 int err;
1595 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1596
1597 err = -EMSGSIZE;
1598 if (npages > MAX_SKB_FRAGS)
1599 goto failure;
1584 1600
1585 gfp_mask = sk->sk_allocation; 1601 gfp_mask = sk->sk_allocation;
1586 if (gfp_mask & __GFP_WAIT) 1602 if (gfp_mask & __GFP_WAIT)
@@ -1599,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1599 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1615 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1600 skb = alloc_skb(header_len, gfp_mask); 1616 skb = alloc_skb(header_len, gfp_mask);
1601 if (skb) { 1617 if (skb) {
1602 int npages;
1603 int i; 1618 int i;
1604 1619
1605 /* No pages, we're done... */ 1620 /* No pages, we're done... */
1606 if (!data_len) 1621 if (!data_len)
1607 break; 1622 break;
1608 1623
1609 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1610 skb->truesize += data_len; 1624 skb->truesize += data_len;
1611 skb_shinfo(skb)->nr_frags = npages; 1625 skb_shinfo(skb)->nr_frags = npages;
1612 for (i = 0; i < npages; i++) { 1626 for (i = 0; i < npages; i++) {
@@ -1693,6 +1707,7 @@ static void __release_sock(struct sock *sk)
1693 do { 1707 do {
1694 struct sk_buff *next = skb->next; 1708 struct sk_buff *next = skb->next;
1695 1709
1710 prefetch(next);
1696 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1711 WARN_ON_ONCE(skb_dst_is_noref(skb));
1697 skb->next = NULL; 1712 skb->next = NULL;
1698 sk_backlog_rcv(sk, skb); 1713 sk_backlog_rcv(sk, skb);
@@ -2092,6 +2107,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2092 2107
2093 sk->sk_sndmsg_page = NULL; 2108 sk->sk_sndmsg_page = NULL;
2094 sk->sk_sndmsg_off = 0; 2109 sk->sk_sndmsg_off = 0;
2110 sk->sk_peek_off = -1;
2095 2111
2096 sk->sk_peer_pid = NULL; 2112 sk->sk_peer_pid = NULL;
2097 sk->sk_peer_cred = NULL; 2113 sk->sk_peer_cred = NULL;
@@ -2412,7 +2428,7 @@ static void assign_proto_idx(struct proto *prot)
2412 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2428 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2413 2429
2414 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2430 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2415 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2431 pr_err("PROTO_INUSE_NR exhausted\n");
2416 return; 2432 return;
2417 } 2433 }
2418 2434
@@ -2442,8 +2458,8 @@ int proto_register(struct proto *prot, int alloc_slab)
2442 NULL); 2458 NULL);
2443 2459
2444 if (prot->slab == NULL) { 2460 if (prot->slab == NULL) {
2445 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2461 pr_crit("%s: Can't create sock SLAB cache!\n",
2446 prot->name); 2462 prot->name);
2447 goto out; 2463 goto out;
2448 } 2464 }
2449 2465
@@ -2457,8 +2473,8 @@ int proto_register(struct proto *prot, int alloc_slab)
2457 SLAB_HWCACHE_ALIGN, NULL); 2473 SLAB_HWCACHE_ALIGN, NULL);
2458 2474
2459 if (prot->rsk_prot->slab == NULL) { 2475 if (prot->rsk_prot->slab == NULL) {
2460 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2476 pr_crit("%s: Can't create request sock SLAB cache!\n",
2461 prot->name); 2477 prot->name);
2462 goto out_free_request_sock_slab_name; 2478 goto out_free_request_sock_slab_name;
2463 } 2479 }
2464 } 2480 }
@@ -2556,7 +2572,7 @@ static char proto_method_implemented(const void *method)
2556} 2572}
2557static long sock_prot_memory_allocated(struct proto *proto) 2573static long sock_prot_memory_allocated(struct proto *proto)
2558{ 2574{
2559 return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L; 2575 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2560} 2576}
2561 2577
2562static char *sock_prot_memory_pressure(struct proto *proto) 2578static char *sock_prot_memory_pressure(struct proto *proto)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b9868e1fd62c..5fd146720f39 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -10,7 +10,7 @@
10#include <linux/inet_diag.h> 10#include <linux/inet_diag.h>
11#include <linux/sock_diag.h> 11#include <linux/sock_diag.h>
12 12
13static struct sock_diag_handler *sock_diag_handlers[AF_MAX]; 13static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
14static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); 14static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
15static DEFINE_MUTEX(sock_diag_table_mutex); 15static DEFINE_MUTEX(sock_diag_table_mutex);
16 16
@@ -70,7 +70,7 @@ void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlms
70} 70}
71EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); 71EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
72 72
73int sock_diag_register(struct sock_diag_handler *hndl) 73int sock_diag_register(const struct sock_diag_handler *hndl)
74{ 74{
75 int err = 0; 75 int err = 0;
76 76
@@ -88,7 +88,7 @@ int sock_diag_register(struct sock_diag_handler *hndl)
88} 88}
89EXPORT_SYMBOL_GPL(sock_diag_register); 89EXPORT_SYMBOL_GPL(sock_diag_register);
90 90
91void sock_diag_unregister(struct sock_diag_handler *hnld) 91void sock_diag_unregister(const struct sock_diag_handler *hnld)
92{ 92{
93 int family = hnld->family; 93 int family = hnld->family;
94 94
@@ -102,7 +102,7 @@ void sock_diag_unregister(struct sock_diag_handler *hnld)
102} 102}
103EXPORT_SYMBOL_GPL(sock_diag_unregister); 103EXPORT_SYMBOL_GPL(sock_diag_unregister);
104 104
105static inline struct sock_diag_handler *sock_diag_lock_handler(int family) 105static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
106{ 106{
107 if (sock_diag_handlers[family] == NULL) 107 if (sock_diag_handlers[family] == NULL)
108 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 108 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
@@ -112,7 +112,7 @@ static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
112 return sock_diag_handlers[family]; 112 return sock_diag_handlers[family];
113} 113}
114 114
115static inline void sock_diag_unlock_handler(struct sock_diag_handler *h) 115static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
116{ 116{
117 mutex_unlock(&sock_diag_table_mutex); 117 mutex_unlock(&sock_diag_table_mutex);
118} 118}
@@ -121,7 +121,7 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
121{ 121{
122 int err; 122 int err;
123 struct sock_diag_req *req = NLMSG_DATA(nlh); 123 struct sock_diag_req *req = NLMSG_DATA(nlh);
124 struct sock_diag_handler *hndl; 124 const struct sock_diag_handler *hndl;
125 125
126 if (nlmsg_len(nlh) < sizeof(*req)) 126 if (nlmsg_len(nlh) < sizeof(*req))
127 return -EINVAL; 127 return -EINVAL;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index d05559d4d9cd..a7c36845b123 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -14,6 +14,7 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/kmemleak.h>
17 18
18#include <net/ip.h> 19#include <net/ip.h>
19#include <net/sock.h> 20#include <net/sock.h>
@@ -69,9 +70,9 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
69 if (sock_table != orig_sock_table) { 70 if (sock_table != orig_sock_table) {
70 rcu_assign_pointer(rps_sock_flow_table, sock_table); 71 rcu_assign_pointer(rps_sock_flow_table, sock_table);
71 if (sock_table) 72 if (sock_table)
72 jump_label_inc(&rps_needed); 73 static_key_slow_inc(&rps_needed);
73 if (orig_sock_table) { 74 if (orig_sock_table) {
74 jump_label_dec(&rps_needed); 75 static_key_slow_dec(&rps_needed);
75 synchronize_rcu(); 76 synchronize_rcu();
76 vfree(orig_sock_table); 77 vfree(orig_sock_table);
77 } 78 }
@@ -202,12 +203,6 @@ static struct ctl_table netns_core_table[] = {
202 { } 203 { }
203}; 204};
204 205
205__net_initdata struct ctl_path net_core_path[] = {
206 { .procname = "net", },
207 { .procname = "core", },
208 { },
209};
210
211static __net_init int sysctl_core_net_init(struct net *net) 206static __net_init int sysctl_core_net_init(struct net *net)
212{ 207{
213 struct ctl_table *tbl; 208 struct ctl_table *tbl;
@@ -223,8 +218,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
223 tbl[0].data = &net->core.sysctl_somaxconn; 218 tbl[0].data = &net->core.sysctl_somaxconn;
224 } 219 }
225 220
226 net->core.sysctl_hdr = register_net_sysctl_table(net, 221 net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
227 net_core_path, tbl);
228 if (net->core.sysctl_hdr == NULL) 222 if (net->core.sysctl_hdr == NULL)
229 goto err_reg; 223 goto err_reg;
230 224
@@ -254,10 +248,7 @@ static __net_initdata struct pernet_operations sysctl_core_ops = {
254 248
255static __init int sysctl_core_init(void) 249static __init int sysctl_core_init(void)
256{ 250{
257 static struct ctl_table empty[1]; 251 register_net_sysctl(&init_net, "net/core", net_core_table);
258
259 register_sysctl_paths(net_core_path, empty);
260 register_net_sysctl_rotable(net_core_path, net_core_table);
261 return register_pernet_subsys(&sysctl_core_ops); 252 return register_pernet_subsys(&sysctl_core_ops);
262} 253}
263 254
diff --git a/net/core/utils.c b/net/core/utils.c
index 386e263f6066..39895a65e54a 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -30,7 +30,6 @@
30#include <net/net_ratelimit.h> 30#include <net/net_ratelimit.h>
31 31
32#include <asm/byteorder.h> 32#include <asm/byteorder.h>
33#include <asm/system.h>
34#include <asm/uaccess.h> 33#include <asm/uaccess.h>
35 34
36int net_msg_warn __read_mostly = 1; 35int net_msg_warn __read_mostly = 1;
@@ -59,14 +58,11 @@ __be32 in_aton(const char *str)
59 int i; 58 int i;
60 59
61 l = 0; 60 l = 0;
62 for (i = 0; i < 4; i++) 61 for (i = 0; i < 4; i++) {
63 {
64 l <<= 8; 62 l <<= 8;
65 if (*str != '\0') 63 if (*str != '\0') {
66 {
67 val = 0; 64 val = 0;
68 while (*str != '\0' && *str != '.' && *str != '\n') 65 while (*str != '\0' && *str != '.' && *str != '\n') {
69 {
70 val *= 10; 66 val *= 10;
71 val += *str - '0'; 67 val += *str - '0';
72 str++; 68 str++;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d86053002c16..656c7c75b192 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -178,6 +178,7 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
178 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, 178 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
179 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, 179 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
180 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, 180 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
181 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
181}; 182};
182 183
183static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { 184static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -703,6 +704,7 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
703 704
704 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, 705 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
705 pid, seq, flags); 706 pid, seq, flags);
707 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
706out: 708out:
707 return ret; 709 return ret;
708} 710}
@@ -935,6 +937,7 @@ static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
935 937
936 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB, 938 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
937 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags); 939 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
940 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
938 941
939 return ret; 942 return ret;
940} 943}
@@ -1205,13 +1208,15 @@ static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1205 if (!app) 1208 if (!app)
1206 goto nla_put_failure; 1209 goto nla_put_failure;
1207 1210
1208 if (app_info_type) 1211 if (app_info_type &&
1209 NLA_PUT(skb, app_info_type, sizeof(info), &info); 1212 nla_put(skb, app_info_type, sizeof(info), &info))
1210 1213 goto nla_put_failure;
1211 for (i = 0; i < app_count; i++)
1212 NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
1213 &table[i]);
1214 1214
1215 for (i = 0; i < app_count; i++) {
1216 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1217 &table[i]))
1218 goto nla_put_failure;
1219 }
1215 nla_nest_end(skb, app); 1220 nla_nest_end(skb, app);
1216 } 1221 }
1217 err = 0; 1222 err = 0;
@@ -1230,8 +1235,8 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1230 int dcbx; 1235 int dcbx;
1231 int err = -EMSGSIZE; 1236 int err = -EMSGSIZE;
1232 1237
1233 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1238 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1234 1239 goto nla_put_failure;
1235 ieee = nla_nest_start(skb, DCB_ATTR_IEEE); 1240 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1236 if (!ieee) 1241 if (!ieee)
1237 goto nla_put_failure; 1242 goto nla_put_failure;
@@ -1239,15 +1244,28 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1239 if (ops->ieee_getets) { 1244 if (ops->ieee_getets) {
1240 struct ieee_ets ets; 1245 struct ieee_ets ets;
1241 err = ops->ieee_getets(netdev, &ets); 1246 err = ops->ieee_getets(netdev, &ets);
1242 if (!err) 1247 if (!err &&
1243 NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets); 1248 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1249 goto nla_put_failure;
1250 }
1251
1252 if (ops->ieee_getmaxrate) {
1253 struct ieee_maxrate maxrate;
1254 err = ops->ieee_getmaxrate(netdev, &maxrate);
1255 if (!err) {
1256 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1257 sizeof(maxrate), &maxrate);
1258 if (err)
1259 goto nla_put_failure;
1260 }
1244 } 1261 }
1245 1262
1246 if (ops->ieee_getpfc) { 1263 if (ops->ieee_getpfc) {
1247 struct ieee_pfc pfc; 1264 struct ieee_pfc pfc;
1248 err = ops->ieee_getpfc(netdev, &pfc); 1265 err = ops->ieee_getpfc(netdev, &pfc);
1249 if (!err) 1266 if (!err &&
1250 NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc); 1267 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1268 goto nla_put_failure;
1251 } 1269 }
1252 1270
1253 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); 1271 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
@@ -1278,15 +1296,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1278 if (ops->ieee_peer_getets) { 1296 if (ops->ieee_peer_getets) {
1279 struct ieee_ets ets; 1297 struct ieee_ets ets;
1280 err = ops->ieee_peer_getets(netdev, &ets); 1298 err = ops->ieee_peer_getets(netdev, &ets);
1281 if (!err) 1299 if (!err &&
1282 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets); 1300 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1301 goto nla_put_failure;
1283 } 1302 }
1284 1303
1285 if (ops->ieee_peer_getpfc) { 1304 if (ops->ieee_peer_getpfc) {
1286 struct ieee_pfc pfc; 1305 struct ieee_pfc pfc;
1287 err = ops->ieee_peer_getpfc(netdev, &pfc); 1306 err = ops->ieee_peer_getpfc(netdev, &pfc);
1288 if (!err) 1307 if (!err &&
1289 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc); 1308 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1309 goto nla_put_failure;
1290 } 1310 }
1291 1311
1292 if (ops->peer_getappinfo && ops->peer_getapptable) { 1312 if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1340,10 +1360,11 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1340 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, 1360 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1341 &prio, &pgid, &tc_pct, &up_map); 1361 &prio, &pgid, &tc_pct, &up_map);
1342 1362
1343 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); 1363 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1344 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); 1364 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1345 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); 1365 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1346 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); 1366 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1367 goto nla_put_failure;
1347 nla_nest_end(skb, tc_nest); 1368 nla_nest_end(skb, tc_nest);
1348 } 1369 }
1349 1370
@@ -1356,7 +1377,8 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1356 else 1377 else
1357 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, 1378 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1358 &tc_pct); 1379 &tc_pct);
1359 NLA_PUT_U8(skb, i, tc_pct); 1380 if (nla_put_u8(skb, i, tc_pct))
1381 goto nla_put_failure;
1360 } 1382 }
1361 nla_nest_end(skb, pg); 1383 nla_nest_end(skb, pg);
1362 return 0; 1384 return 0;
@@ -1373,8 +1395,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1373 int dcbx, i, err = -EMSGSIZE; 1395 int dcbx, i, err = -EMSGSIZE;
1374 u8 value; 1396 u8 value;
1375 1397
1376 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1398 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1377 1399 goto nla_put_failure;
1378 cee = nla_nest_start(skb, DCB_ATTR_CEE); 1400 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1379 if (!cee) 1401 if (!cee)
1380 goto nla_put_failure; 1402 goto nla_put_failure;
@@ -1401,7 +1423,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1401 1423
1402 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 1424 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1403 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); 1425 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1404 NLA_PUT_U8(skb, i, value); 1426 if (nla_put_u8(skb, i, value))
1427 goto nla_put_failure;
1405 } 1428 }
1406 nla_nest_end(skb, pfc_nest); 1429 nla_nest_end(skb, pfc_nest);
1407 } 1430 }
@@ -1454,8 +1477,9 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1454 1477
1455 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; 1478 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1456 i++) 1479 i++)
1457 if (!ops->getfeatcfg(netdev, i, &value)) 1480 if (!ops->getfeatcfg(netdev, i, &value) &&
1458 NLA_PUT_U8(skb, i, value); 1481 nla_put_u8(skb, i, value))
1482 goto nla_put_failure;
1459 1483
1460 nla_nest_end(skb, feat); 1484 nla_nest_end(skb, feat);
1461 } 1485 }
@@ -1464,15 +1488,17 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1464 if (ops->cee_peer_getpg) { 1488 if (ops->cee_peer_getpg) {
1465 struct cee_pg pg; 1489 struct cee_pg pg;
1466 err = ops->cee_peer_getpg(netdev, &pg); 1490 err = ops->cee_peer_getpg(netdev, &pg);
1467 if (!err) 1491 if (!err &&
1468 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg); 1492 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1493 goto nla_put_failure;
1469 } 1494 }
1470 1495
1471 if (ops->cee_peer_getpfc) { 1496 if (ops->cee_peer_getpfc) {
1472 struct cee_pfc pfc; 1497 struct cee_pfc pfc;
1473 err = ops->cee_peer_getpfc(netdev, &pfc); 1498 err = ops->cee_peer_getpfc(netdev, &pfc);
1474 if (!err) 1499 if (!err &&
1475 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc); 1500 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1501 goto nla_put_failure;
1476 } 1502 }
1477 1503
1478 if (ops->peer_getappinfo && ops->peer_getapptable) { 1504 if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1589,6 +1615,14 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1589 goto err; 1615 goto err;
1590 } 1616 }
1591 1617
1618 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1619 struct ieee_maxrate *maxrate =
1620 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1621 err = ops->ieee_setmaxrate(netdev, maxrate);
1622 if (err)
1623 goto err;
1624 }
1625
1592 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { 1626 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1593 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1627 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1594 err = ops->ieee_setpfc(netdev, pfc); 1628 err = ops->ieee_setpfc(netdev, pfc);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 560627307200..8c67bedf85b0 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -98,8 +98,9 @@ static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
98{ 98{
99 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x); 99 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
100 100
101 DCCP_BUG_ON(hc->tx_t_ipi == 0);
101 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi, 102 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
102 hc->tx_s, (unsigned)(hc->tx_x >> 6)); 103 hc->tx_s, (unsigned int)(hc->tx_x >> 6));
103} 104}
104 105
105static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now) 106static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
@@ -152,9 +153,9 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
152 153
153 if (hc->tx_x != old_x) { 154 if (hc->tx_x != old_x) {
154 ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " 155 ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
155 "X_recv=%u\n", (unsigned)(old_x >> 6), 156 "X_recv=%u\n", (unsigned int)(old_x >> 6),
156 (unsigned)(hc->tx_x >> 6), hc->tx_x_calc, 157 (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
157 (unsigned)(hc->tx_x_recv >> 6)); 158 (unsigned int)(hc->tx_x_recv >> 6));
158 159
159 ccid3_update_send_interval(hc); 160 ccid3_update_send_interval(hc);
160 } 161 }
@@ -236,8 +237,6 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
236 * 237 *
237 * Note that X_recv is scaled by 2^6 while X_calc is not 238 * Note that X_recv is scaled by 2^6 while X_calc is not
238 */ 239 */
239 BUG_ON(hc->tx_p && !hc->tx_x_calc);
240
241 if (hc->tx_x_calc > (hc->tx_x_recv >> 5)) 240 if (hc->tx_x_calc > (hc->tx_x_recv >> 5))
242 hc->tx_x_recv = 241 hc->tx_x_recv =
243 max(hc->tx_x_recv / 2, 242 max(hc->tx_x_recv / 2,
@@ -426,8 +425,8 @@ done_computing_x:
426 "p=%u, X_calc=%u, X_recv=%u, X=%u\n", 425 "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
427 dccp_role(sk), sk, hc->tx_rtt, r_sample, 426 dccp_role(sk), sk, hc->tx_rtt, r_sample,
428 hc->tx_s, hc->tx_p, hc->tx_x_calc, 427 hc->tx_s, hc->tx_p, hc->tx_x_calc,
429 (unsigned)(hc->tx_x_recv >> 6), 428 (unsigned int)(hc->tx_x_recv >> 6),
430 (unsigned)(hc->tx_x >> 6)); 429 (unsigned int)(hc->tx_x >> 6));
431 430
432 /* unschedule no feedback timer */ 431 /* unschedule no feedback timer */
433 sk_stop_timer(sk, &hc->tx_no_feedback_timer); 432 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 29d6bb629a6c..9040be049d8c 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -75,7 +75,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
75 * state, about 60 seconds */ 75 * state, about 60 seconds */
76 76
77/* RFC 1122, 4.2.3.1 initial RTO value */ 77/* RFC 1122, 4.2.3.1 initial RTO value */
78#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ)) 78#define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ))
79 79
80/* 80/*
81 * The maximum back-off value for retransmissions. This is needed for 81 * The maximum back-off value for retransmissions. This is needed for
@@ -84,7 +84,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
84 * - feature-negotiation retransmission (sec. 6.6.3), 84 * - feature-negotiation retransmission (sec. 6.6.3),
85 * - Acks in client-PARTOPEN state (sec. 8.1.5). 85 * - Acks in client-PARTOPEN state (sec. 8.1.5).
86 */ 86 */
87#define DCCP_RTO_MAX ((unsigned)(64 * HZ)) 87#define DCCP_RTO_MAX ((unsigned int)(64 * HZ))
88 88
89/* 89/*
90 * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4 90 * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4
@@ -287,9 +287,9 @@ extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
287extern int dccp_child_process(struct sock *parent, struct sock *child, 287extern int dccp_child_process(struct sock *parent, struct sock *child,
288 struct sk_buff *skb); 288 struct sk_buff *skb);
289extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 289extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
290 struct dccp_hdr *dh, unsigned len); 290 struct dccp_hdr *dh, unsigned int len);
291extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 291extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
292 const struct dccp_hdr *dh, const unsigned len); 292 const struct dccp_hdr *dh, const unsigned int len);
293 293
294extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); 294extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
295extern void dccp_destroy_sock(struct sock *sk); 295extern void dccp_destroy_sock(struct sock *sk);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 51d5fe5fffba..bc93a333931e 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -285,7 +285,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
285} 285}
286 286
287static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 287static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
288 const struct dccp_hdr *dh, const unsigned len) 288 const struct dccp_hdr *dh, const unsigned int len)
289{ 289{
290 struct dccp_sock *dp = dccp_sk(sk); 290 struct dccp_sock *dp = dccp_sk(sk);
291 291
@@ -366,7 +366,7 @@ discard:
366} 366}
367 367
368int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 368int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
369 const struct dccp_hdr *dh, const unsigned len) 369 const struct dccp_hdr *dh, const unsigned int len)
370{ 370{
371 if (dccp_check_seqno(sk, skb)) 371 if (dccp_check_seqno(sk, skb))
372 goto discard; 372 goto discard;
@@ -388,7 +388,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_established);
388static int dccp_rcv_request_sent_state_process(struct sock *sk, 388static int dccp_rcv_request_sent_state_process(struct sock *sk,
389 struct sk_buff *skb, 389 struct sk_buff *skb,
390 const struct dccp_hdr *dh, 390 const struct dccp_hdr *dh,
391 const unsigned len) 391 const unsigned int len)
392{ 392{
393 /* 393 /*
394 * Step 4: Prepare sequence numbers in REQUEST 394 * Step 4: Prepare sequence numbers in REQUEST
@@ -521,7 +521,7 @@ unable_to_proceed:
521static int dccp_rcv_respond_partopen_state_process(struct sock *sk, 521static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
522 struct sk_buff *skb, 522 struct sk_buff *skb,
523 const struct dccp_hdr *dh, 523 const struct dccp_hdr *dh,
524 const unsigned len) 524 const unsigned int len)
525{ 525{
526 struct dccp_sock *dp = dccp_sk(sk); 526 struct dccp_sock *dp = dccp_sk(sk);
527 u32 sample = dp->dccps_options_received.dccpor_timestamp_echo; 527 u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
@@ -572,7 +572,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
572} 572}
573 573
574int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 574int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
575 struct dccp_hdr *dh, unsigned len) 575 struct dccp_hdr *dh, unsigned int len)
576{ 576{
577 struct dccp_sock *dp = dccp_sk(sk); 577 struct dccp_sock *dp = dccp_sk(sk);
578 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 578 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 1c67fe8ff90d..07f5579ca756 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -300,7 +300,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
300 */ 300 */
301 WARN_ON(req->sk); 301 WARN_ON(req->sk);
302 302
303 if (seq != dccp_rsk(req)->dreq_iss) { 303 if (!between48(seq, dccp_rsk(req)->dreq_iss,
304 dccp_rsk(req)->dreq_gss)) {
304 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 305 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
305 goto out; 306 goto out;
306 } 307 }
@@ -573,6 +574,11 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req)
573 kfree(inet_rsk(req)->opt); 574 kfree(inet_rsk(req)->opt);
574} 575}
575 576
577void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
578{
579}
580EXPORT_SYMBOL(dccp_syn_ack_timeout);
581
576static struct request_sock_ops dccp_request_sock_ops __read_mostly = { 582static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
577 .family = PF_INET, 583 .family = PF_INET,
578 .obj_size = sizeof(struct dccp_request_sock), 584 .obj_size = sizeof(struct dccp_request_sock),
@@ -580,6 +586,7 @@ static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
580 .send_ack = dccp_reqsk_send_ack, 586 .send_ack = dccp_reqsk_send_ack,
581 .destructor = dccp_v4_reqsk_destructor, 587 .destructor = dccp_v4_reqsk_destructor,
582 .send_reset = dccp_v4_ctl_send_reset, 588 .send_reset = dccp_v4_ctl_send_reset,
589 .syn_ack_timeout = dccp_syn_ack_timeout,
583}; 590};
584 591
585int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 592int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -639,11 +646,12 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
639 * 646 *
640 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 647 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
641 * 648 *
642 * In fact we defer setting S.GSR, S.SWL, S.SWH to 649 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
643 * dccp_create_openreq_child.
644 */ 650 */
645 dreq->dreq_isr = dcb->dccpd_seq; 651 dreq->dreq_isr = dcb->dccpd_seq;
652 dreq->dreq_gsr = dreq->dreq_isr;
646 dreq->dreq_iss = dccp_v4_init_sequence(skb); 653 dreq->dreq_iss = dccp_v4_init_sequence(skb);
654 dreq->dreq_gss = dreq->dreq_iss;
647 dreq->dreq_service = service; 655 dreq->dreq_service = service;
648 656
649 if (dccp_v4_send_response(sk, req, NULL)) 657 if (dccp_v4_send_response(sk, req, NULL))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ce903f747e64..fa9512d86f3b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -193,7 +193,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
193 */ 193 */
194 WARN_ON(req->sk != NULL); 194 WARN_ON(req->sk != NULL);
195 195
196 if (seq != dccp_rsk(req)->dreq_iss) { 196 if (!between48(seq, dccp_rsk(req)->dreq_iss,
197 dccp_rsk(req)->dreq_gss)) {
197 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 198 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
198 goto out; 199 goto out;
199 } 200 }
@@ -342,6 +343,7 @@ static struct request_sock_ops dccp6_request_sock_ops = {
342 .send_ack = dccp_reqsk_send_ack, 343 .send_ack = dccp_reqsk_send_ack,
343 .destructor = dccp_v6_reqsk_destructor, 344 .destructor = dccp_v6_reqsk_destructor,
344 .send_reset = dccp_v6_ctl_send_reset, 345 .send_reset = dccp_v6_ctl_send_reset,
346 .syn_ack_timeout = dccp_syn_ack_timeout,
345}; 347};
346 348
347static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 349static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
@@ -440,11 +442,12 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
440 * 442 *
441 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 443 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
442 * 444 *
443 * In fact we defer setting S.GSR, S.SWL, S.SWH to 445 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
444 * dccp_create_openreq_child.
445 */ 446 */
446 dreq->dreq_isr = dcb->dccpd_seq; 447 dreq->dreq_isr = dcb->dccpd_seq;
448 dreq->dreq_gsr = dreq->dreq_isr;
447 dreq->dreq_iss = dccp_v6_init_sequence(skb); 449 dreq->dreq_iss = dccp_v6_init_sequence(skb);
450 dreq->dreq_gss = dreq->dreq_iss;
448 dreq->dreq_service = service; 451 dreq->dreq_service = service;
449 452
450 if (dccp_v6_send_response(sk, req, NULL)) 453 if (dccp_v6_send_response(sk, req, NULL))
@@ -577,7 +580,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
577 newnp->pktoptions = NULL; 580 newnp->pktoptions = NULL;
578 if (ireq6->pktopts != NULL) { 581 if (ireq6->pktopts != NULL) {
579 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); 582 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
580 kfree_skb(ireq6->pktopts); 583 consume_skb(ireq6->pktopts);
581 ireq6->pktopts = NULL; 584 ireq6->pktopts = NULL;
582 if (newnp->pktoptions) 585 if (newnp->pktoptions)
583 skb_set_owner_r(newnp->pktoptions, newsk); 586 skb_set_owner_r(newnp->pktoptions, newsk);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 5a7f90bbffac..ea850ce35d4a 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -127,9 +127,11 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
127 * activation below, as these windows all depend on the local 127 * activation below, as these windows all depend on the local
128 * and remote Sequence Window feature values (7.5.2). 128 * and remote Sequence Window feature values (7.5.2).
129 */ 129 */
130 newdp->dccps_gss = newdp->dccps_iss = dreq->dreq_iss; 130 newdp->dccps_iss = dreq->dreq_iss;
131 newdp->dccps_gss = dreq->dreq_gss;
131 newdp->dccps_gar = newdp->dccps_iss; 132 newdp->dccps_gar = newdp->dccps_iss;
132 newdp->dccps_gsr = newdp->dccps_isr = dreq->dreq_isr; 133 newdp->dccps_isr = dreq->dreq_isr;
134 newdp->dccps_gsr = dreq->dreq_gsr;
133 135
134 /* 136 /*
135 * Activate features: initialise CCIDs, sequence windows etc. 137 * Activate features: initialise CCIDs, sequence windows etc.
@@ -164,9 +166,9 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
164 /* Check for retransmitted REQUEST */ 166 /* Check for retransmitted REQUEST */
165 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { 167 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
166 168
167 if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) { 169 if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) {
168 dccp_pr_debug("Retransmitted REQUEST\n"); 170 dccp_pr_debug("Retransmitted REQUEST\n");
169 dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq; 171 dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq;
170 /* 172 /*
171 * Send another RESPONSE packet 173 * Send another RESPONSE packet
172 * To protect against Request floods, increment retrans 174 * To protect against Request floods, increment retrans
@@ -186,12 +188,14 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
186 goto drop; 188 goto drop;
187 189
188 /* Invalid ACK */ 190 /* Invalid ACK */
189 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != dreq->dreq_iss) { 191 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
192 dreq->dreq_iss, dreq->dreq_gss)) {
190 dccp_pr_debug("Invalid ACK number: ack_seq=%llu, " 193 dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
191 "dreq_iss=%llu\n", 194 "dreq_iss=%llu, dreq_gss=%llu\n",
192 (unsigned long long) 195 (unsigned long long)
193 DCCP_SKB_CB(skb)->dccpd_ack_seq, 196 DCCP_SKB_CB(skb)->dccpd_ack_seq,
194 (unsigned long long) dreq->dreq_iss); 197 (unsigned long long) dreq->dreq_iss,
198 (unsigned long long) dreq->dreq_gss);
195 goto drop; 199 goto drop;
196 } 200 }
197 201
diff --git a/net/dccp/output.c b/net/dccp/output.c
index dede3edb8849..787367308797 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -408,10 +408,10 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
408 skb_dst_set(skb, dst_clone(dst)); 408 skb_dst_set(skb, dst_clone(dst));
409 409
410 dreq = dccp_rsk(req); 410 dreq = dccp_rsk(req);
411 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ 411 if (inet_rsk(req)->acked) /* increase GSS upon retransmission */
412 dccp_inc_seqno(&dreq->dreq_iss); 412 dccp_inc_seqno(&dreq->dreq_gss);
413 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; 413 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
414 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; 414 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
415 415
416 /* Resolve feature dependencies resulting from choice of CCID */ 416 /* Resolve feature dependencies resulting from choice of CCID */
417 if (dccp_feat_server_ccid_dependencies(dreq)) 417 if (dccp_feat_server_ccid_dependencies(dreq))
@@ -429,8 +429,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
429 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; 429 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
430 dh->dccph_type = DCCP_PKT_RESPONSE; 430 dh->dccph_type = DCCP_PKT_RESPONSE;
431 dh->dccph_x = 1; 431 dh->dccph_x = 1;
432 dccp_hdr_set_seq(dh, dreq->dreq_iss); 432 dccp_hdr_set_seq(dh, dreq->dreq_gss);
433 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); 433 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
434 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; 434 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
435 435
436 dccp_csum_outgoing(skb); 436 dccp_csum_outgoing(skb);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 7065c0ae1e7b..6c7c78b83940 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
848 default: 848 default:
849 dccp_pr_debug("packet_type=%s\n", 849 dccp_pr_debug("packet_type=%s\n",
850 dccp_packet_name(dh->dccph_type)); 850 dccp_packet_name(dh->dccph_type));
851 sk_eat_skb(sk, skb, 0); 851 sk_eat_skb(sk, skb, false);
852 } 852 }
853verify_sock_status: 853verify_sock_status:
854 if (sock_flag(sk, SOCK_DONE)) { 854 if (sock_flag(sk, SOCK_DONE)) {
@@ -905,7 +905,7 @@ verify_sock_status:
905 len = skb->len; 905 len = skb->len;
906 found_fin_ok: 906 found_fin_ok:
907 if (!(flags & MSG_PEEK)) 907 if (!(flags & MSG_PEEK))
908 sk_eat_skb(sk, skb, 0); 908 sk_eat_skb(sk, skb, false);
909 break; 909 break;
910 } while (1); 910 } while (1);
911out: 911out:
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 42348824ee31..607ab71b5a0c 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -98,18 +98,11 @@ static struct ctl_table dccp_default_table[] = {
98 { } 98 { }
99}; 99};
100 100
101static struct ctl_path dccp_path[] = {
102 { .procname = "net", },
103 { .procname = "dccp", },
104 { .procname = "default", },
105 { }
106};
107
108static struct ctl_table_header *dccp_table_header; 101static struct ctl_table_header *dccp_table_header;
109 102
110int __init dccp_sysctl_init(void) 103int __init dccp_sysctl_init(void)
111{ 104{
112 dccp_table_header = register_sysctl_paths(dccp_path, 105 dccp_table_header = register_net_sysctl(&init_net, "net/dccp/default",
113 dccp_default_table); 106 dccp_default_table);
114 107
115 return dccp_table_header != NULL ? 0 : -ENOMEM; 108 return dccp_table_header != NULL ? 0 : -ENOMEM;
@@ -118,7 +111,7 @@ int __init dccp_sysctl_init(void)
118void dccp_sysctl_exit(void) 111void dccp_sysctl_exit(void)
119{ 112{
120 if (dccp_table_header != NULL) { 113 if (dccp_table_header != NULL) {
121 unregister_sysctl_table(dccp_table_header); 114 unregister_net_sysctl_table(dccp_table_header);
122 dccp_table_header = NULL; 115 dccp_table_header = NULL;
123 } 116 }
124} 117}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 19acd00a6382..2ba1a2814c24 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -119,7 +119,6 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
119#include <net/sock.h> 119#include <net/sock.h>
120#include <net/tcp_states.h> 120#include <net/tcp_states.h>
121#include <net/flow.h> 121#include <net/flow.h>
122#include <asm/system.h>
123#include <asm/ioctls.h> 122#include <asm/ioctls.h>
124#include <linux/capability.h> 123#include <linux/capability.h>
125#include <linux/mm.h> 124#include <linux/mm.h>
@@ -251,7 +250,7 @@ static void dn_unhash_sock_bh(struct sock *sk)
251static struct hlist_head *listen_hash(struct sockaddr_dn *addr) 250static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
252{ 251{
253 int i; 252 int i;
254 unsigned hash = addr->sdn_objnum; 253 unsigned int hash = addr->sdn_objnum;
255 254
256 if (hash == 0) { 255 if (hash == 0) {
257 hash = addr->sdn_objnamel; 256 hash = addr->sdn_objnamel;
@@ -1845,9 +1844,9 @@ static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *que
1845 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't 1844 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1846 * make much practical difference. 1845 * make much practical difference.
1847 */ 1846 */
1848unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu) 1847unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
1849{ 1848{
1850 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; 1849 unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
1851 if (dev) { 1850 if (dev) {
1852 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); 1851 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1853 mtu -= LL_RESERVED_SPACE(dev); 1852 mtu -= LL_RESERVED_SPACE(dev);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 74d321a60e7b..f3924ab1e019 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -42,7 +42,6 @@
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
45#include <asm/system.h>
46#include <net/net_namespace.h> 45#include <net/net_namespace.h>
47#include <net/neighbour.h> 46#include <net/neighbour.h>
48#include <net/dst.h> 47#include <net/dst.h>
@@ -210,15 +209,7 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *
210 struct dn_dev_sysctl_table *t; 209 struct dn_dev_sysctl_table *t;
211 int i; 210 int i;
212 211
213#define DN_CTL_PATH_DEV 3 212 char path[sizeof("net/decnet/conf/") + IFNAMSIZ];
214
215 struct ctl_path dn_ctl_path[] = {
216 { .procname = "net", },
217 { .procname = "decnet", },
218 { .procname = "conf", },
219 { /* to be set */ },
220 { },
221 };
222 213
223 t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); 214 t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
224 if (t == NULL) 215 if (t == NULL)
@@ -229,15 +220,12 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *
229 t->dn_dev_vars[i].data = ((char *)parms) + offset; 220 t->dn_dev_vars[i].data = ((char *)parms) + offset;
230 } 221 }
231 222
232 if (dev) { 223 snprintf(path, sizeof(path), "net/decnet/conf/%s",
233 dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name; 224 dev? dev->name : parms->name);
234 } else {
235 dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name;
236 }
237 225
238 t->dn_dev_vars[0].extra1 = (void *)dev; 226 t->dn_dev_vars[0].extra1 = (void *)dev;
239 227
240 t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars); 228 t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars);
241 if (t->sysctl_header == NULL) 229 if (t->sysctl_header == NULL)
242 kfree(t); 230 kfree(t);
243 else 231 else
@@ -249,7 +237,7 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
249 if (parms->sysctl) { 237 if (parms->sysctl) {
250 struct dn_dev_sysctl_table *t = parms->sysctl; 238 struct dn_dev_sysctl_table *t = parms->sysctl;
251 parms->sysctl = NULL; 239 parms->sysctl = NULL;
252 unregister_sysctl_table(t->sysctl_header); 240 unregister_net_sysctl_table(t->sysctl_header);
253 kfree(t); 241 kfree(t);
254 } 242 }
255} 243}
@@ -695,13 +683,13 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
695 ifm->ifa_scope = ifa->ifa_scope; 683 ifm->ifa_scope = ifa->ifa_scope;
696 ifm->ifa_index = ifa->ifa_dev->dev->ifindex; 684 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
697 685
698 if (ifa->ifa_address) 686 if ((ifa->ifa_address &&
699 NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address); 687 nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) ||
700 if (ifa->ifa_local) 688 (ifa->ifa_local &&
701 NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local); 689 nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
702 if (ifa->ifa_label[0]) 690 (ifa->ifa_label[0] &&
703 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); 691 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
704 692 goto nla_put_failure;
705 return nlmsg_end(skb, nlh); 693 return nlmsg_end(skb, nlh);
706 694
707nla_put_failure: 695nla_put_failure:
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 9e885f180b60..7eaf98799729 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -302,11 +302,12 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
302 struct rtattr *attr = RTA_DATA(rta->rta_mx); 302 struct rtattr *attr = RTA_DATA(rta->rta_mx);
303 303
304 while(RTA_OK(attr, attrlen)) { 304 while(RTA_OK(attr, attrlen)) {
305 unsigned flavour = attr->rta_type; 305 unsigned int flavour = attr->rta_type;
306
306 if (flavour) { 307 if (flavour) {
307 if (flavour > RTAX_MAX) 308 if (flavour > RTAX_MAX)
308 goto err_inval; 309 goto err_inval;
309 fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr); 310 fi->fib_metrics[flavour-1] = *(unsigned int *)RTA_DATA(attr);
310 } 311 }
311 attr = RTA_NEXT(attr, attrlen); 312 attr = RTA_NEXT(attr, attrlen);
312 } 313 }
@@ -437,9 +438,8 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn
437 res->fi = NULL; 438 res->fi = NULL;
438 return 1; 439 return 1;
439 default: 440 default:
440 if (net_ratelimit()) 441 net_err_ratelimited("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
441 printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", 442 type);
442 type);
443 res->fi = NULL; 443 res->fi = NULL;
444 return -EINVAL; 444 return -EINVAL;
445 } 445 }
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index befe426491ba..ac90f658586c 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -162,8 +162,8 @@ static int dn_neigh_construct(struct neighbour *neigh)
162 else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK)) 162 else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK))
163 dn_dn2eth(neigh->ha, dn->addr); 163 dn_dn2eth(neigh->ha, dn->addr);
164 else { 164 else {
165 if (net_ratelimit()) 165 net_dbg_ratelimited("Trying to create neigh for hw %d\n",
166 printk(KERN_DEBUG "Trying to create neigh for hw %d\n", dev->type); 166 dev->type);
167 return -EINVAL; 167 return -EINVAL;
168 } 168 }
169 169
@@ -205,17 +205,23 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
205 struct neighbour *neigh = dst_get_neighbour_noref(dst); 205 struct neighbour *neigh = dst_get_neighbour_noref(dst);
206 struct net_device *dev = neigh->dev; 206 struct net_device *dev = neigh->dev;
207 char mac_addr[ETH_ALEN]; 207 char mac_addr[ETH_ALEN];
208 unsigned int seq;
209 int err;
208 210
209 dn_dn2eth(mac_addr, rt->rt_local_src); 211 dn_dn2eth(mac_addr, rt->rt_local_src);
210 if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, 212 do {
211 mac_addr, skb->len) >= 0) 213 seq = read_seqbegin(&neigh->ha_lock);
212 return dev_queue_xmit(skb); 214 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
213 215 neigh->ha, mac_addr, skb->len);
214 if (net_ratelimit()) 216 } while (read_seqretry(&neigh->ha_lock, seq));
215 printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n"); 217
216 218 if (err >= 0)
217 kfree_skb(skb); 219 err = dev_queue_xmit(skb);
218 return -EINVAL; 220 else {
221 kfree_skb(skb);
222 err = -EINVAL;
223 }
224 return err;
219} 225}
220 226
221static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) 227static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
@@ -230,15 +236,13 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
230 if (skb_headroom(skb) < headroom) { 236 if (skb_headroom(skb) < headroom) {
231 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); 237 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
232 if (skb2 == NULL) { 238 if (skb2 == NULL) {
233 if (net_ratelimit()) 239 net_crit_ratelimited("dn_long_output: no memory\n");
234 printk(KERN_CRIT "dn_long_output: no memory\n");
235 kfree_skb(skb); 240 kfree_skb(skb);
236 return -ENOBUFS; 241 return -ENOBUFS;
237 } 242 }
238 kfree_skb(skb); 243 kfree_skb(skb);
239 skb = skb2; 244 skb = skb2;
240 if (net_ratelimit()) 245 net_info_ratelimited("dn_long_output: Increasing headroom\n");
241 printk(KERN_INFO "dn_long_output: Increasing headroom\n");
242 } 246 }
243 247
244 data = skb_push(skb, sizeof(struct dn_long_packet) + 3); 248 data = skb_push(skb, sizeof(struct dn_long_packet) + 3);
@@ -275,15 +279,13 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
275 if (skb_headroom(skb) < headroom) { 279 if (skb_headroom(skb) < headroom) {
276 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); 280 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
277 if (skb2 == NULL) { 281 if (skb2 == NULL) {
278 if (net_ratelimit()) 282 net_crit_ratelimited("dn_short_output: no memory\n");
279 printk(KERN_CRIT "dn_short_output: no memory\n");
280 kfree_skb(skb); 283 kfree_skb(skb);
281 return -ENOBUFS; 284 return -ENOBUFS;
282 } 285 }
283 kfree_skb(skb); 286 kfree_skb(skb);
284 skb = skb2; 287 skb = skb2;
285 if (net_ratelimit()) 288 net_info_ratelimited("dn_short_output: Increasing headroom\n");
286 printk(KERN_INFO "dn_short_output: Increasing headroom\n");
287 } 289 }
288 290
289 data = skb_push(skb, sizeof(struct dn_short_packet) + 2); 291 data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
@@ -316,15 +318,13 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
316 if (skb_headroom(skb) < headroom) { 318 if (skb_headroom(skb) < headroom) {
317 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); 319 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
318 if (skb2 == NULL) { 320 if (skb2 == NULL) {
319 if (net_ratelimit()) 321 net_crit_ratelimited("dn_phase3_output: no memory\n");
320 printk(KERN_CRIT "dn_phase3_output: no memory\n");
321 kfree_skb(skb); 322 kfree_skb(skb);
322 return -ENOBUFS; 323 return -ENOBUFS;
323 } 324 }
324 kfree_skb(skb); 325 kfree_skb(skb);
325 skb = skb2; 326 skb = skb2;
326 if (net_ratelimit()) 327 net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
327 printk(KERN_INFO "dn_phase3_output: Increasing headroom\n");
328 } 328 }
329 329
330 data = skb_push(skb, sizeof(struct dn_short_packet) + 2); 330 data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 73fa268fe2e8..c344163e6ac0 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -60,7 +60,6 @@
60#include <linux/slab.h> 60#include <linux/slab.h>
61#include <net/sock.h> 61#include <net/sock.h>
62#include <net/tcp_states.h> 62#include <net/tcp_states.h>
63#include <asm/system.h>
64#include <linux/fcntl.h> 63#include <linux/fcntl.h>
65#include <linux/mm.h> 64#include <linux/mm.h>
66#include <linux/termios.h> 65#include <linux/termios.h>
@@ -81,12 +80,15 @@ extern int decnet_log_martians;
81 80
82static void dn_log_martian(struct sk_buff *skb, const char *msg) 81static void dn_log_martian(struct sk_buff *skb, const char *msg)
83{ 82{
84 if (decnet_log_martians && net_ratelimit()) { 83 if (decnet_log_martians) {
85 char *devname = skb->dev ? skb->dev->name : "???"; 84 char *devname = skb->dev ? skb->dev->name : "???";
86 struct dn_skb_cb *cb = DN_SKB_CB(skb); 85 struct dn_skb_cb *cb = DN_SKB_CB(skb);
87 printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n", 86 net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
88 msg, devname, le16_to_cpu(cb->src), le16_to_cpu(cb->dst), 87 msg, devname,
89 le16_to_cpu(cb->src_port), le16_to_cpu(cb->dst_port)); 88 le16_to_cpu(cb->src),
89 le16_to_cpu(cb->dst),
90 le16_to_cpu(cb->src_port),
91 le16_to_cpu(cb->dst_port));
90 } 92 }
91} 93}
92 94
@@ -589,7 +591,7 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
589 number of warnings when compiling with -W --ANK 591 number of warnings when compiling with -W --ANK
590 */ 592 */
591 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 593 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
592 (unsigned)sk->sk_rcvbuf) { 594 (unsigned int)sk->sk_rcvbuf) {
593 err = -ENOMEM; 595 err = -ENOMEM;
594 goto out; 596 goto out;
595 } 597 }
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index bd78836a81eb..564a6ad13ce7 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -1,4 +1,3 @@
1
2/* 1/*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX 2 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket 3 * operating system. DECnet is implemented using the BSD Socket
@@ -52,7 +51,6 @@
52#include <linux/route.h> 51#include <linux/route.h>
53#include <linux/slab.h> 52#include <linux/slab.h>
54#include <net/sock.h> 53#include <net/sock.h>
55#include <asm/system.h>
56#include <linux/fcntl.h> 54#include <linux/fcntl.h>
57#include <linux/mm.h> 55#include <linux/mm.h>
58#include <linux/termios.h> 56#include <linux/termios.h>
@@ -210,7 +208,7 @@ static void dn_nsp_rtt(struct sock *sk, long rtt)
210 * 208 *
211 * Returns: The number of times the packet has been sent previously 209 * Returns: The number of times the packet has been sent previously
212 */ 210 */
213static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, 211static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb,
214 gfp_t gfp) 212 gfp_t gfp)
215{ 213{
216 struct dn_skb_cb *cb = DN_SKB_CB(skb); 214 struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -241,7 +239,7 @@ void dn_nsp_output(struct sock *sk)
241{ 239{
242 struct dn_scp *scp = DN_SK(sk); 240 struct dn_scp *scp = DN_SK(sk);
243 struct sk_buff *skb; 241 struct sk_buff *skb;
244 unsigned reduce_win = 0; 242 unsigned int reduce_win = 0;
245 243
246 /* 244 /*
247 * First we check for otherdata/linkservice messages 245 * First we check for otherdata/linkservice messages
@@ -555,8 +553,8 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
555 unsigned char *msg; 553 unsigned char *msg;
556 554
557 if ((dst == NULL) || (rem == 0)) { 555 if ((dst == NULL) || (rem == 0)) {
558 if (net_ratelimit()) 556 net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n",
559 printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", le16_to_cpu(rem), dst); 557 le16_to_cpu(rem), dst);
560 return; 558 return;
561 } 559 }
562 560
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index f31ce72dca65..586302e557ad 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -122,7 +122,7 @@ static int dn_route_input(struct sk_buff *);
122static void dn_run_flush(unsigned long dummy); 122static void dn_run_flush(unsigned long dummy);
123 123
124static struct dn_rt_hash_bucket *dn_rt_hash_table; 124static struct dn_rt_hash_bucket *dn_rt_hash_table;
125static unsigned dn_rt_hash_mask; 125static unsigned int dn_rt_hash_mask;
126 126
127static struct timer_list dn_route_timer; 127static struct timer_list dn_route_timer;
128static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); 128static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
@@ -149,13 +149,13 @@ static void dn_dst_destroy(struct dst_entry *dst)
149 dst_destroy_metrics_generic(dst); 149 dst_destroy_metrics_generic(dst);
150} 150}
151 151
152static __inline__ unsigned dn_hash(__le16 src, __le16 dst) 152static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
153{ 153{
154 __u16 tmp = (__u16 __force)(src ^ dst); 154 __u16 tmp = (__u16 __force)(src ^ dst);
155 tmp ^= (tmp >> 3); 155 tmp ^= (tmp >> 3);
156 tmp ^= (tmp >> 5); 156 tmp ^= (tmp >> 5);
157 tmp ^= (tmp >> 10); 157 tmp ^= (tmp >> 10);
158 return dn_rt_hash_mask & (unsigned)tmp; 158 return dn_rt_hash_mask & (unsigned int)tmp;
159} 159}
160 160
161static inline void dnrt_free(struct dn_route *rt) 161static inline void dnrt_free(struct dn_route *rt)
@@ -297,7 +297,7 @@ static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
297 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0; 297 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
298} 298}
299 299
300static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 300static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
301{ 301{
302 struct dn_route *rth; 302 struct dn_route *rth;
303 struct dn_route __rcu **rthp; 303 struct dn_route __rcu **rthp;
@@ -724,11 +724,10 @@ static int dn_output(struct sk_buff *skb)
724 struct dn_route *rt = (struct dn_route *)dst; 724 struct dn_route *rt = (struct dn_route *)dst;
725 struct net_device *dev = dst->dev; 725 struct net_device *dev = dst->dev;
726 struct dn_skb_cb *cb = DN_SKB_CB(skb); 726 struct dn_skb_cb *cb = DN_SKB_CB(skb);
727 struct neighbour *neigh;
728 727
729 int err = -EINVAL; 728 int err = -EINVAL;
730 729
731 if ((neigh = dst_get_neighbour_noref(dst)) == NULL) 730 if (dst_get_neighbour_noref(dst) == NULL)
732 goto error; 731 goto error;
733 732
734 skb->dev = dev; 733 skb->dev = dev;
@@ -749,8 +748,7 @@ static int dn_output(struct sk_buff *skb)
749 dn_to_neigh_output); 748 dn_to_neigh_output);
750 749
751error: 750error:
752 if (net_ratelimit()) 751 net_dbg_ratelimited("dn_output: This should not happen\n");
753 printk(KERN_DEBUG "dn_output: This should not happen\n");
754 752
755 kfree_skb(skb); 753 kfree_skb(skb);
756 754
@@ -808,12 +806,10 @@ drop:
808 */ 806 */
809static int dn_rt_bug(struct sk_buff *skb) 807static int dn_rt_bug(struct sk_buff *skb)
810{ 808{
811 if (net_ratelimit()) { 809 struct dn_skb_cb *cb = DN_SKB_CB(skb);
812 struct dn_skb_cb *cb = DN_SKB_CB(skb);
813 810
814 printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", 811 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
815 le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); 812 le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
816 }
817 813
818 kfree_skb(skb); 814 kfree_skb(skb);
819 815
@@ -935,8 +931,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
935 struct dn_route *rt = NULL; 931 struct dn_route *rt = NULL;
936 struct net_device *dev_out = NULL, *dev; 932 struct net_device *dev_out = NULL, *dev;
937 struct neighbour *neigh = NULL; 933 struct neighbour *neigh = NULL;
938 unsigned hash; 934 unsigned int hash;
939 unsigned flags = 0; 935 unsigned int flags = 0;
940 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; 936 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
941 int err; 937 int err;
942 int free_res = 0; 938 int free_res = 0;
@@ -1210,7 +1206,7 @@ e_neighbour:
1210 */ 1206 */
1211static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags) 1207static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
1212{ 1208{
1213 unsigned hash = dn_hash(flp->saddr, flp->daddr); 1209 unsigned int hash = dn_hash(flp->saddr, flp->daddr);
1214 struct dn_route *rt = NULL; 1210 struct dn_route *rt = NULL;
1215 1211
1216 if (!(flags & MSG_TRYHARD)) { 1212 if (!(flags & MSG_TRYHARD)) {
@@ -1276,7 +1272,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1276 struct net_device *out_dev = NULL; 1272 struct net_device *out_dev = NULL;
1277 struct dn_dev *dn_db; 1273 struct dn_dev *dn_db;
1278 struct neighbour *neigh = NULL; 1274 struct neighbour *neigh = NULL;
1279 unsigned hash; 1275 unsigned int hash;
1280 int flags = 0; 1276 int flags = 0;
1281 __le16 gateway = 0; 1277 __le16 gateway = 0;
1282 __le16 local_src = 0; 1278 __le16 local_src = 0;
@@ -1328,9 +1324,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1328 1324
1329 out_dev = DN_FIB_RES_DEV(res); 1325 out_dev = DN_FIB_RES_DEV(res);
1330 if (out_dev == NULL) { 1326 if (out_dev == NULL) {
1331 if (net_ratelimit()) 1327 net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
1332 printk(KERN_CRIT "Bug in dn_route_input_slow() "
1333 "No output device\n");
1334 goto e_inval; 1328 goto e_inval;
1335 } 1329 }
1336 dev_hold(out_dev); 1330 dev_hold(out_dev);
@@ -1491,7 +1485,7 @@ static int dn_route_input(struct sk_buff *skb)
1491{ 1485{
1492 struct dn_route *rt; 1486 struct dn_route *rt;
1493 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1487 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1494 unsigned hash = dn_hash(cb->src, cb->dst); 1488 unsigned int hash = dn_hash(cb->src, cb->dst);
1495 1489
1496 if (skb_dst(skb)) 1490 if (skb_dst(skb))
1497 return 0; 1491 return 0;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index f65c9ddaee41..e65f2c856e06 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -177,11 +177,11 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
177 return 1; 177 return 1;
178} 178}
179 179
180unsigned dnet_addr_type(__le16 addr) 180unsigned int dnet_addr_type(__le16 addr)
181{ 181{
182 struct flowidn fld = { .daddr = addr }; 182 struct flowidn fld = { .daddr = addr };
183 struct dn_fib_res res; 183 struct dn_fib_res res;
184 unsigned ret = RTN_UNICAST; 184 unsigned int ret = RTN_UNICAST;
185 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); 185 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
186 186
187 res.r = NULL; 187 res.r = NULL;
@@ -204,11 +204,11 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
204 frh->src_len = r->src_len; 204 frh->src_len = r->src_len;
205 frh->tos = 0; 205 frh->tos = 0;
206 206
207 if (r->dst_len) 207 if ((r->dst_len &&
208 NLA_PUT_LE16(skb, FRA_DST, r->dst); 208 nla_put_le16(skb, FRA_DST, r->dst)) ||
209 if (r->src_len) 209 (r->src_len &&
210 NLA_PUT_LE16(skb, FRA_SRC, r->src); 210 nla_put_le16(skb, FRA_SRC, r->src)))
211 211 goto nla_put_failure;
212 return 0; 212 return 0;
213 213
214nla_put_failure: 214nla_put_failure:
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index a9a62f225a6b..650f3380c98a 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -836,8 +836,8 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create)
836 if (!create) 836 if (!create)
837 return NULL; 837 return NULL;
838 838
839 if (in_interrupt() && net_ratelimit()) { 839 if (in_interrupt()) {
840 printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); 840 net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n");
841 return NULL; 841 return NULL;
842 } 842 }
843 843
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1531135130db..44b890936fc0 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -57,8 +57,7 @@ nlmsg_failure:
57 if (skb) 57 if (skb)
58 kfree_skb(skb); 58 kfree_skb(skb);
59 *errp = -ENOMEM; 59 *errp = -ENOMEM;
60 if (net_ratelimit()) 60 net_err_ratelimited("dn_rtmsg: error creating netlink message\n");
61 printk(KERN_ERR "dn_rtmsg: error creating netlink message\n");
62 return NULL; 61 return NULL;
63} 62}
64 63
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 02e75d11cfbb..a55eeccaa72f 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -351,20 +351,14 @@ static ctl_table dn_table[] = {
351 { } 351 { }
352}; 352};
353 353
354static struct ctl_path dn_path[] = {
355 { .procname = "net", },
356 { .procname = "decnet", },
357 { }
358};
359
360void dn_register_sysctl(void) 354void dn_register_sysctl(void)
361{ 355{
362 dn_table_header = register_sysctl_paths(dn_path, dn_table); 356 dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table);
363} 357}
364 358
365void dn_unregister_sysctl(void) 359void dn_unregister_sysctl(void)
366{ 360{
367 unregister_sysctl_table(dn_table_header); 361 unregister_net_sysctl_table(dn_table_header);
368} 362}
369 363
370#else /* CONFIG_SYSCTL */ 364#else /* CONFIG_SYSCTL */
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index fa000d26dc60..d9507dd05818 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -38,7 +38,7 @@ MODULE_DESCRIPTION("DNS Resolver");
38MODULE_AUTHOR("Wang Lei"); 38MODULE_AUTHOR("Wang Lei");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41unsigned dns_resolver_debug; 41unsigned int dns_resolver_debug;
42module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO); 42module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
43MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); 43MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
44 44
@@ -249,9 +249,6 @@ static int __init init_dns_resolver(void)
249 struct key *keyring; 249 struct key *keyring;
250 int ret; 250 int ret;
251 251
252 printk(KERN_NOTICE "Registering the %s key type\n",
253 key_type_dns_resolver.name);
254
255 /* create an override credential set with a special thread keyring in 252 /* create an override credential set with a special thread keyring in
256 * which DNS requests are cached 253 * which DNS requests are cached
257 * 254 *
@@ -281,6 +278,7 @@ static int __init init_dns_resolver(void)
281 278
282 /* instruct request_key() to use this special keyring as a cache for 279 /* instruct request_key() to use this special keyring as a cache for
283 * the results it looks up */ 280 * the results it looks up */
281 set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
284 cred->thread_keyring = keyring; 282 cred->thread_keyring = keyring;
285 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; 283 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
286 dns_resolver_cache = cred; 284 dns_resolver_cache = cred;
@@ -300,8 +298,6 @@ static void __exit exit_dns_resolver(void)
300 key_revoke(dns_resolver_cache->thread_keyring); 298 key_revoke(dns_resolver_cache->thread_keyring);
301 unregister_key_type(&key_type_dns_resolver); 299 unregister_key_type(&key_type_dns_resolver);
302 put_cred(dns_resolver_cache); 300 put_cred(dns_resolver_cache);
303 printk(KERN_NOTICE "Unregistered %s key type\n",
304 key_type_dns_resolver.name);
305} 301}
306 302
307module_init(init_dns_resolver) 303module_init(init_dns_resolver)
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
index 189ca9e9b785..17c7886b5b3a 100644
--- a/net/dns_resolver/internal.h
+++ b/net/dns_resolver/internal.h
@@ -31,7 +31,7 @@ extern const struct cred *dns_resolver_cache;
31/* 31/*
32 * debug tracing 32 * debug tracing
33 */ 33 */
34extern unsigned dns_resolver_debug; 34extern unsigned int dns_resolver_debug;
35 35
36#define kdebug(FMT, ...) \ 36#define kdebug(FMT, ...) \
37do { \ 37do { \
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 56cf9b8e1c7c..e32083d5d8f8 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -66,7 +66,7 @@ static int dsa_slave_open(struct net_device *dev)
66 if (!(master->flags & IFF_UP)) 66 if (!(master->flags & IFF_UP))
67 return -ENETDOWN; 67 return -ENETDOWN;
68 68
69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { 69 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
70 err = dev_uc_add(master, dev->dev_addr); 70 err = dev_uc_add(master, dev->dev_addr);
71 if (err < 0) 71 if (err < 0)
72 goto out; 72 goto out;
@@ -89,7 +89,7 @@ clear_allmulti:
89 if (dev->flags & IFF_ALLMULTI) 89 if (dev->flags & IFF_ALLMULTI)
90 dev_set_allmulti(master, -1); 90 dev_set_allmulti(master, -1);
91del_unicast: 91del_unicast:
92 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 92 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
93 dev_uc_del(master, dev->dev_addr); 93 dev_uc_del(master, dev->dev_addr);
94out: 94out:
95 return err; 95 return err;
@@ -107,7 +107,7 @@ static int dsa_slave_close(struct net_device *dev)
107 if (dev->flags & IFF_PROMISC) 107 if (dev->flags & IFF_PROMISC)
108 dev_set_promiscuity(master, -1); 108 dev_set_promiscuity(master, -1);
109 109
110 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 110 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
111 dev_uc_del(master, dev->dev_addr); 111 dev_uc_del(master, dev->dev_addr);
112 112
113 return 0; 113 return 0;
@@ -146,13 +146,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
146 if (!(dev->flags & IFF_UP)) 146 if (!(dev->flags & IFF_UP))
147 goto out; 147 goto out;
148 148
149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) { 149 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
150 err = dev_uc_add(master, addr->sa_data); 150 err = dev_uc_add(master, addr->sa_data);
151 if (err < 0) 151 if (err < 0)
152 return err; 152 return err;
153 } 153 }
154 154
155 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 155 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
156 dev_uc_del(master, dev->dev_addr); 156 dev_uc_del(master, dev->dev_addr);
157 157
158out: 158out:
diff --git a/net/econet/Kconfig b/net/econet/Kconfig
deleted file mode 100644
index 39a2d2975e0e..000000000000
--- a/net/econet/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
1#
2# Acorn Econet/AUN protocols
3#
4
5config ECONET
6 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
7 depends on EXPERIMENTAL && INET
8 ---help---
9 Econet is a fairly old and slow networking protocol mainly used by
10 Acorn computers to access file and print servers. It uses native
11 Econet network cards. AUN is an implementation of the higher level
12 parts of Econet that runs over ordinary Ethernet connections, on
13 top of the UDP packet protocol, which in turn runs on top of the
14 Internet protocol IP.
15
16 If you say Y here, you can choose with the next two options whether
17 to send Econet/AUN traffic over a UDP Ethernet connection or over
18 a native Econet network card.
19
20 To compile this driver as a module, choose M here: the module
21 will be called econet.
22
23config ECONET_AUNUDP
24 bool "AUN over UDP"
25 depends on ECONET
26 help
27 Say Y here if you want to send Econet/AUN traffic over a UDP
28 connection (UDP is a packet based protocol that runs on top of the
29 Internet protocol IP) using an ordinary Ethernet network card.
30
31config ECONET_NATIVE
32 bool "Native Econet"
33 depends on ECONET
34 help
35 Say Y here if you have a native Econet network card installed in
36 your computer.
diff --git a/net/econet/Makefile b/net/econet/Makefile
deleted file mode 100644
index 05fae8be2fed..000000000000
--- a/net/econet/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for Econet support code.
3#
4
5obj-$(CONFIG_ECONET) += econet.o
6
7econet-y := af_econet.o
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
deleted file mode 100644
index 7e717cb35ad1..000000000000
--- a/net/econet/af_econet.c
+++ /dev/null
@@ -1,1173 +0,0 @@
1/*
2 * An implementation of the Acorn Econet and AUN protocols.
3 * Philip Blundell <philb@gnu.org>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12#define pr_fmt(fmt) fmt
13
14#include <linux/module.h>
15
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/socket.h>
21#include <linux/sockios.h>
22#include <linux/in.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/if_ether.h>
26#include <linux/netdevice.h>
27#include <linux/inetdevice.h>
28#include <linux/route.h>
29#include <linux/inet.h>
30#include <linux/etherdevice.h>
31#include <linux/if_arp.h>
32#include <linux/wireless.h>
33#include <linux/skbuff.h>
34#include <linux/udp.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37#include <net/sock.h>
38#include <net/inet_common.h>
39#include <linux/stat.h>
40#include <linux/init.h>
41#include <linux/if_ec.h>
42#include <net/udp.h>
43#include <net/ip.h>
44#include <linux/spinlock.h>
45#include <linux/rcupdate.h>
46#include <linux/bitops.h>
47#include <linux/mutex.h>
48
49#include <linux/uaccess.h>
50#include <asm/system.h>
51
52static const struct proto_ops econet_ops;
53static struct hlist_head econet_sklist;
54static DEFINE_SPINLOCK(econet_lock);
55static DEFINE_MUTEX(econet_mutex);
56
57/* Since there are only 256 possible network numbers (or fewer, depends
58 how you count) it makes sense to use a simple lookup table. */
59static struct net_device *net2dev_map[256];
60
61#define EC_PORT_IP 0xd2
62
63#ifdef CONFIG_ECONET_AUNUDP
64static DEFINE_SPINLOCK(aun_queue_lock);
65static struct socket *udpsock;
66#define AUN_PORT 0x8000
67
68struct aunhdr {
69 unsigned char code; /* AUN magic protocol byte */
70 unsigned char port;
71 unsigned char cb;
72 unsigned char pad;
73 unsigned long handle;
74};
75
76static unsigned long aun_seq;
77
78/* Queue of packets waiting to be transmitted. */
79static struct sk_buff_head aun_queue;
80static struct timer_list ab_cleanup_timer;
81
82#endif /* CONFIG_ECONET_AUNUDP */
83
84/* Per-packet information */
85struct ec_cb {
86 struct sockaddr_ec sec;
87 unsigned long cookie; /* Supplied by user. */
88#ifdef CONFIG_ECONET_AUNUDP
89 int done;
90 unsigned long seq; /* Sequencing */
91 unsigned long timeout; /* Timeout */
92 unsigned long start; /* jiffies */
93#endif
94#ifdef CONFIG_ECONET_NATIVE
95 void (*sent)(struct sk_buff *, int result);
96#endif
97};
98
99static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
100{
101 spin_lock_bh(&econet_lock);
102 sk_del_node_init(sk);
103 spin_unlock_bh(&econet_lock);
104}
105
106static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
107{
108 spin_lock_bh(&econet_lock);
109 sk_add_node(sk, list);
110 spin_unlock_bh(&econet_lock);
111}
112
113/*
114 * Pull a packet from our receive queue and hand it to the user.
115 * If necessary we block.
116 */
117
118static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
119 struct msghdr *msg, size_t len, int flags)
120{
121 struct sock *sk = sock->sk;
122 struct sk_buff *skb;
123 size_t copied;
124 int err;
125
126 msg->msg_namelen = sizeof(struct sockaddr_ec);
127
128 mutex_lock(&econet_mutex);
129
130 /*
131 * Call the generic datagram receiver. This handles all sorts
132 * of horrible races and re-entrancy so we can forget about it
133 * in the protocol layers.
134 *
135 * Now it will return ENETDOWN, if device have just gone down,
136 * but then it will block.
137 */
138
139 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
140
141 /*
142 * An error occurred so return it. Because skb_recv_datagram()
143 * handles the blocking we don't see and worry about blocking
144 * retries.
145 */
146
147 if (skb == NULL)
148 goto out;
149
150 /*
151 * You lose any data beyond the buffer you gave. If it worries a
152 * user program they can ask the device for its MTU anyway.
153 */
154
155 copied = skb->len;
156 if (copied > len) {
157 copied = len;
158 msg->msg_flags |= MSG_TRUNC;
159 }
160
161 /* We can't use skb_copy_datagram here */
162 err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
163 if (err)
164 goto out_free;
165 sk->sk_stamp = skb->tstamp;
166
167 if (msg->msg_name)
168 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
169
170 /*
171 * Free or return the buffer as appropriate. Again this
172 * hides all the races and re-entrancy issues from us.
173 */
174 err = copied;
175
176out_free:
177 skb_free_datagram(sk, skb);
178out:
179 mutex_unlock(&econet_mutex);
180 return err;
181}
182
183/*
184 * Bind an Econet socket.
185 */
186
187static int econet_bind(struct socket *sock, struct sockaddr *uaddr,
188 int addr_len)
189{
190 struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
191 struct sock *sk;
192 struct econet_sock *eo;
193
194 /*
195 * Check legality
196 */
197
198 if (addr_len < sizeof(struct sockaddr_ec) ||
199 sec->sec_family != AF_ECONET)
200 return -EINVAL;
201
202 mutex_lock(&econet_mutex);
203
204 sk = sock->sk;
205 eo = ec_sk(sk);
206
207 eo->cb = sec->cb;
208 eo->port = sec->port;
209 eo->station = sec->addr.station;
210 eo->net = sec->addr.net;
211
212 mutex_unlock(&econet_mutex);
213
214 return 0;
215}
216
217#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
218/*
219 * Queue a transmit result for the user to be told about.
220 */
221
222static void tx_result(struct sock *sk, unsigned long cookie, int result)
223{
224 struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
225 struct ec_cb *eb;
226 struct sockaddr_ec *sec;
227
228 if (skb == NULL) {
229 pr_debug("econet: memory squeeze, transmit result dropped\n");
230 return;
231 }
232
233 eb = (struct ec_cb *)&skb->cb;
234 sec = (struct sockaddr_ec *)&eb->sec;
235 memset(sec, 0, sizeof(struct sockaddr_ec));
236 sec->cookie = cookie;
237 sec->type = ECTYPE_TRANSMIT_STATUS | result;
238 sec->sec_family = AF_ECONET;
239
240 if (sock_queue_rcv_skb(sk, skb) < 0)
241 kfree_skb(skb);
242}
243#endif
244
245#ifdef CONFIG_ECONET_NATIVE
246/*
247 * Called by the Econet hardware driver when a packet transmit
248 * has completed. Tell the user.
249 */
250
251static void ec_tx_done(struct sk_buff *skb, int result)
252{
253 struct ec_cb *eb = (struct ec_cb *)&skb->cb;
254 tx_result(skb->sk, eb->cookie, result);
255}
256#endif
257
258/*
259 * Send a packet. We have to work out which device it's going out on
260 * and hence whether to use real Econet or the UDP emulation.
261 */
262
263static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
264 struct msghdr *msg, size_t len)
265{
266 struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name;
267 struct net_device *dev;
268 struct ec_addr addr;
269 int err;
270 unsigned char port, cb;
271#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
272 struct sock *sk = sock->sk;
273 struct sk_buff *skb;
274 struct ec_cb *eb;
275#endif
276#ifdef CONFIG_ECONET_AUNUDP
277 struct msghdr udpmsg;
278 struct iovec iov[2];
279 struct aunhdr ah;
280 struct sockaddr_in udpdest;
281 __kernel_size_t size;
282 mm_segment_t oldfs;
283 char *userbuf;
284#endif
285
286 /*
287 * Check the flags.
288 */
289
290 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
291 return -EINVAL;
292
293 /*
294 * Get and verify the address.
295 */
296
297 mutex_lock(&econet_mutex);
298
299 if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
300 mutex_unlock(&econet_mutex);
301 return -EINVAL;
302 }
303 addr.station = saddr->addr.station;
304 addr.net = saddr->addr.net;
305 port = saddr->port;
306 cb = saddr->cb;
307
308 /* Look for a device with the right network number. */
309 dev = net2dev_map[addr.net];
310
311 /* If not directly reachable, use some default */
312 if (dev == NULL) {
313 dev = net2dev_map[0];
314 /* No interfaces at all? */
315 if (dev == NULL) {
316 mutex_unlock(&econet_mutex);
317 return -ENETDOWN;
318 }
319 }
320
321 if (dev->type == ARPHRD_ECONET) {
322 /* Real hardware Econet. We're not worthy etc. */
323#ifdef CONFIG_ECONET_NATIVE
324 unsigned short proto = 0;
325 int hlen, tlen;
326 int res;
327
328 if (len + 15 > dev->mtu) {
329 mutex_unlock(&econet_mutex);
330 return -EMSGSIZE;
331 }
332
333 dev_hold(dev);
334
335 hlen = LL_RESERVED_SPACE(dev);
336 tlen = dev->needed_tailroom;
337 skb = sock_alloc_send_skb(sk, len + hlen + tlen,
338 msg->msg_flags & MSG_DONTWAIT, &err);
339 if (skb == NULL)
340 goto out_unlock;
341
342 skb_reserve(skb, hlen);
343 skb_reset_network_header(skb);
344
345 eb = (struct ec_cb *)&skb->cb;
346
347 eb->cookie = saddr->cookie;
348 eb->sec = *saddr;
349 eb->sent = ec_tx_done;
350
351 err = -EINVAL;
352 res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len);
353 if (res < 0)
354 goto out_free;
355 if (res > 0) {
356 struct ec_framehdr *fh;
357 /* Poke in our control byte and
358 port number. Hack, hack. */
359 fh = (struct ec_framehdr *)skb->data;
360 fh->cb = cb;
361 fh->port = port;
362 if (sock->type != SOCK_DGRAM) {
363 skb_reset_tail_pointer(skb);
364 skb->len = 0;
365 }
366 }
367
368 /* Copy the data. Returns -EFAULT on error */
369 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
370 skb->protocol = proto;
371 skb->dev = dev;
372 skb->priority = sk->sk_priority;
373 if (err)
374 goto out_free;
375
376 err = -ENETDOWN;
377 if (!(dev->flags & IFF_UP))
378 goto out_free;
379
380 /*
381 * Now send it
382 */
383
384 dev_queue_xmit(skb);
385 dev_put(dev);
386 mutex_unlock(&econet_mutex);
387 return len;
388
389out_free:
390 kfree_skb(skb);
391out_unlock:
392 if (dev)
393 dev_put(dev);
394#else
395 err = -EPROTOTYPE;
396#endif
397 mutex_unlock(&econet_mutex);
398
399 return err;
400 }
401
402#ifdef CONFIG_ECONET_AUNUDP
403 /* AUN virtual Econet. */
404
405 if (udpsock == NULL) {
406 mutex_unlock(&econet_mutex);
407 return -ENETDOWN; /* No socket - can't send */
408 }
409
410 if (len > 32768) {
411 err = -E2BIG;
412 goto error;
413 }
414
415 /* Make up a UDP datagram and hand it off to some higher intellect. */
416
417 memset(&udpdest, 0, sizeof(udpdest));
418 udpdest.sin_family = AF_INET;
419 udpdest.sin_port = htons(AUN_PORT);
420
421 /* At the moment we use the stupid Acorn scheme of Econet address
422 y.x maps to IP a.b.c.x. This should be replaced with something
423 more flexible and more aware of subnet masks. */
424 {
425 struct in_device *idev;
426 unsigned long network = 0;
427
428 rcu_read_lock();
429 idev = __in_dev_get_rcu(dev);
430 if (idev) {
431 if (idev->ifa_list)
432 network = ntohl(idev->ifa_list->ifa_address) &
433 0xffffff00; /* !!! */
434 }
435 rcu_read_unlock();
436 udpdest.sin_addr.s_addr = htonl(network | addr.station);
437 }
438
439 memset(&ah, 0, sizeof(ah));
440 ah.port = port;
441 ah.cb = cb & 0x7f;
442 ah.code = 2; /* magic */
443
444 /* tack our header on the front of the iovec */
445 size = sizeof(struct aunhdr);
446 iov[0].iov_base = (void *)&ah;
447 iov[0].iov_len = size;
448
449 userbuf = vmalloc(len);
450 if (userbuf == NULL) {
451 err = -ENOMEM;
452 goto error;
453 }
454
455 iov[1].iov_base = userbuf;
456 iov[1].iov_len = len;
457 err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
458 if (err)
459 goto error_free_buf;
460
461 /* Get a skbuff (no data, just holds our cb information) */
462 skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err);
463 if (skb == NULL)
464 goto error_free_buf;
465
466 eb = (struct ec_cb *)&skb->cb;
467
468 eb->cookie = saddr->cookie;
469 eb->timeout = 5 * HZ;
470 eb->start = jiffies;
471 ah.handle = aun_seq;
472 eb->seq = (aun_seq++);
473 eb->sec = *saddr;
474
475 skb_queue_tail(&aun_queue, skb);
476
477 udpmsg.msg_name = (void *)&udpdest;
478 udpmsg.msg_namelen = sizeof(udpdest);
479 udpmsg.msg_iov = &iov[0];
480 udpmsg.msg_iovlen = 2;
481 udpmsg.msg_control = NULL;
482 udpmsg.msg_controllen = 0;
483 udpmsg.msg_flags = 0;
484
485 oldfs = get_fs();
486 set_fs(KERNEL_DS); /* More privs :-) */
487 err = sock_sendmsg(udpsock, &udpmsg, size);
488 set_fs(oldfs);
489
490error_free_buf:
491 vfree(userbuf);
492error:
493#else
494 err = -EPROTOTYPE;
495#endif
496 mutex_unlock(&econet_mutex);
497
498 return err;
499}
500
501/*
502 * Look up the address of a socket.
503 */
504
505static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
506 int *uaddr_len, int peer)
507{
508 struct sock *sk;
509 struct econet_sock *eo;
510 struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
511
512 if (peer)
513 return -EOPNOTSUPP;
514
515 memset(sec, 0, sizeof(*sec));
516 mutex_lock(&econet_mutex);
517
518 sk = sock->sk;
519 eo = ec_sk(sk);
520
521 sec->sec_family = AF_ECONET;
522 sec->port = eo->port;
523 sec->addr.station = eo->station;
524 sec->addr.net = eo->net;
525
526 mutex_unlock(&econet_mutex);
527
528 *uaddr_len = sizeof(*sec);
529 return 0;
530}
531
532static void econet_destroy_timer(unsigned long data)
533{
534 struct sock *sk = (struct sock *)data;
535
536 if (!sk_has_allocations(sk)) {
537 sk_free(sk);
538 return;
539 }
540
541 sk->sk_timer.expires = jiffies + 10 * HZ;
542 add_timer(&sk->sk_timer);
543 pr_debug("econet: socket destroy delayed\n");
544}
545
546/*
547 * Close an econet socket.
548 */
549
550static int econet_release(struct socket *sock)
551{
552 struct sock *sk;
553
554 mutex_lock(&econet_mutex);
555
556 sk = sock->sk;
557 if (!sk)
558 goto out_unlock;
559
560 econet_remove_socket(&econet_sklist, sk);
561
562 /*
563 * Now the socket is dead. No more input will appear.
564 */
565
566 sk->sk_state_change(sk); /* It is useless. Just for sanity. */
567
568 sock_orphan(sk);
569
570 /* Purge queues */
571
572 skb_queue_purge(&sk->sk_receive_queue);
573
574 if (sk_has_allocations(sk)) {
575 sk->sk_timer.data = (unsigned long)sk;
576 sk->sk_timer.expires = jiffies + HZ;
577 sk->sk_timer.function = econet_destroy_timer;
578 add_timer(&sk->sk_timer);
579
580 goto out_unlock;
581 }
582
583 sk_free(sk);
584
585out_unlock:
586 mutex_unlock(&econet_mutex);
587 return 0;
588}
589
590static struct proto econet_proto = {
591 .name = "ECONET",
592 .owner = THIS_MODULE,
593 .obj_size = sizeof(struct econet_sock),
594};
595
596/*
597 * Create an Econet socket
598 */
599
600static int econet_create(struct net *net, struct socket *sock, int protocol,
601 int kern)
602{
603 struct sock *sk;
604 struct econet_sock *eo;
605 int err;
606
607 if (!net_eq(net, &init_net))
608 return -EAFNOSUPPORT;
609
610 /* Econet only provides datagram services. */
611 if (sock->type != SOCK_DGRAM)
612 return -ESOCKTNOSUPPORT;
613
614 sock->state = SS_UNCONNECTED;
615
616 err = -ENOBUFS;
617 sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
618 if (sk == NULL)
619 goto out;
620
621 sk->sk_reuse = 1;
622 sock->ops = &econet_ops;
623 sock_init_data(sock, sk);
624
625 eo = ec_sk(sk);
626 sock_reset_flag(sk, SOCK_ZAPPED);
627 sk->sk_family = PF_ECONET;
628 eo->num = protocol;
629
630 econet_insert_socket(&econet_sklist, sk);
631 return 0;
632out:
633 return err;
634}
635
636/*
637 * Handle Econet specific ioctls
638 */
639
640static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
641{
642 struct ifreq ifr;
643 struct ec_device *edev;
644 struct net_device *dev;
645 struct sockaddr_ec *sec;
646 int err;
647
648 /*
649 * Fetch the caller's info block into kernel space
650 */
651
652 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
653 return -EFAULT;
654
655 dev = dev_get_by_name(&init_net, ifr.ifr_name);
656 if (dev == NULL)
657 return -ENODEV;
658
659 sec = (struct sockaddr_ec *)&ifr.ifr_addr;
660
661 mutex_lock(&econet_mutex);
662
663 err = 0;
664 switch (cmd) {
665 case SIOCSIFADDR:
666 if (!capable(CAP_NET_ADMIN)) {
667 err = -EPERM;
668 break;
669 }
670
671 edev = dev->ec_ptr;
672 if (edev == NULL) {
673 /* Magic up a new one. */
674 edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
675 if (edev == NULL) {
676 err = -ENOMEM;
677 break;
678 }
679 dev->ec_ptr = edev;
680 } else
681 net2dev_map[edev->net] = NULL;
682 edev->station = sec->addr.station;
683 edev->net = sec->addr.net;
684 net2dev_map[sec->addr.net] = dev;
685 if (!net2dev_map[0])
686 net2dev_map[0] = dev;
687 break;
688
689 case SIOCGIFADDR:
690 edev = dev->ec_ptr;
691 if (edev == NULL) {
692 err = -ENODEV;
693 break;
694 }
695 memset(sec, 0, sizeof(struct sockaddr_ec));
696 sec->addr.station = edev->station;
697 sec->addr.net = edev->net;
698 sec->sec_family = AF_ECONET;
699 dev_put(dev);
700 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
701 err = -EFAULT;
702 break;
703
704 default:
705 err = -EINVAL;
706 break;
707 }
708
709 mutex_unlock(&econet_mutex);
710
711 dev_put(dev);
712
713 return err;
714}
715
716/*
717 * Handle generic ioctls
718 */
719
720static int econet_ioctl(struct socket *sock, unsigned int cmd,
721 unsigned long arg)
722{
723 struct sock *sk = sock->sk;
724 void __user *argp = (void __user *)arg;
725
726 switch (cmd) {
727 case SIOCGSTAMP:
728 return sock_get_timestamp(sk, argp);
729
730 case SIOCGSTAMPNS:
731 return sock_get_timestampns(sk, argp);
732
733 case SIOCSIFADDR:
734 case SIOCGIFADDR:
735 return ec_dev_ioctl(sock, cmd, argp);
736
737 }
738
739 return -ENOIOCTLCMD;
740}
741
742static const struct net_proto_family econet_family_ops = {
743 .family = PF_ECONET,
744 .create = econet_create,
745 .owner = THIS_MODULE,
746};
747
748static const struct proto_ops econet_ops = {
749 .family = PF_ECONET,
750 .owner = THIS_MODULE,
751 .release = econet_release,
752 .bind = econet_bind,
753 .connect = sock_no_connect,
754 .socketpair = sock_no_socketpair,
755 .accept = sock_no_accept,
756 .getname = econet_getname,
757 .poll = datagram_poll,
758 .ioctl = econet_ioctl,
759 .listen = sock_no_listen,
760 .shutdown = sock_no_shutdown,
761 .setsockopt = sock_no_setsockopt,
762 .getsockopt = sock_no_getsockopt,
763 .sendmsg = econet_sendmsg,
764 .recvmsg = econet_recvmsg,
765 .mmap = sock_no_mmap,
766 .sendpage = sock_no_sendpage,
767};
768
769#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
770/*
771 * Find the listening socket, if any, for the given data.
772 */
773
774static struct sock *ec_listening_socket(unsigned char port, unsigned char
775 station, unsigned char net)
776{
777 struct sock *sk;
778 struct hlist_node *node;
779
780 spin_lock(&econet_lock);
781 sk_for_each(sk, node, &econet_sklist) {
782 struct econet_sock *opt = ec_sk(sk);
783 if ((opt->port == port || opt->port == 0) &&
784 (opt->station == station || opt->station == 0) &&
785 (opt->net == net || opt->net == 0)) {
786 sock_hold(sk);
787 goto found;
788 }
789 }
790 sk = NULL;
791found:
792 spin_unlock(&econet_lock);
793 return sk;
794}
795
796/*
797 * Queue a received packet for a socket.
798 */
799
800static int ec_queue_packet(struct sock *sk, struct sk_buff *skb,
801 unsigned char stn, unsigned char net,
802 unsigned char cb, unsigned char port)
803{
804 struct ec_cb *eb = (struct ec_cb *)&skb->cb;
805 struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec;
806
807 memset(sec, 0, sizeof(struct sockaddr_ec));
808 sec->sec_family = AF_ECONET;
809 sec->type = ECTYPE_PACKET_RECEIVED;
810 sec->port = port;
811 sec->cb = cb;
812 sec->addr.net = net;
813 sec->addr.station = stn;
814
815 return sock_queue_rcv_skb(sk, skb);
816}
817#endif
818
819#ifdef CONFIG_ECONET_AUNUDP
820/*
821 * Send an AUN protocol response.
822 */
823
824static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
825{
826 struct sockaddr_in sin = {
827 .sin_family = AF_INET,
828 .sin_port = htons(AUN_PORT),
829 .sin_addr = {.s_addr = addr}
830 };
831 struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
832 struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
833 struct msghdr udpmsg;
834
835 udpmsg.msg_name = (void *)&sin;
836 udpmsg.msg_namelen = sizeof(sin);
837 udpmsg.msg_control = NULL;
838 udpmsg.msg_controllen = 0;
839 udpmsg.msg_flags = 0;
840
841 kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
842}
843
844
845/*
846 * Handle incoming AUN packets. Work out if anybody wants them,
847 * and send positive or negative acknowledgements as appropriate.
848 */
849
850static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
851{
852 struct iphdr *ip = ip_hdr(skb);
853 unsigned char stn = ntohl(ip->saddr) & 0xff;
854 struct dst_entry *dst = skb_dst(skb);
855 struct ec_device *edev = NULL;
856 struct sock *sk = NULL;
857 struct sk_buff *newskb;
858
859 if (dst)
860 edev = dst->dev->ec_ptr;
861
862 if (!edev)
863 goto bad;
864
865 sk = ec_listening_socket(ah->port, stn, edev->net);
866 if (sk == NULL)
867 goto bad; /* Nobody wants it */
868
869 newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
870 GFP_ATOMIC);
871 if (newskb == NULL) {
872 pr_debug("AUN: memory squeeze, dropping packet\n");
873 /* Send nack and hope sender tries again */
874 goto bad;
875 }
876
877 memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1),
878 len - sizeof(struct aunhdr));
879
880 if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) {
881 /* Socket is bankrupt. */
882 kfree_skb(newskb);
883 goto bad;
884 }
885
886 aun_send_response(ip->saddr, ah->handle, 3, 0);
887 sock_put(sk);
888 return;
889
890bad:
891 aun_send_response(ip->saddr, ah->handle, 4, 0);
892 if (sk)
893 sock_put(sk);
894}
895
896/*
897 * Handle incoming AUN transmit acknowledgements. If the sequence
898 * number matches something in our backlog then kill it and tell
899 * the user. If the remote took too long to reply then we may have
900 * dropped the packet already.
901 */
902
903static void aun_tx_ack(unsigned long seq, int result)
904{
905 struct sk_buff *skb;
906 unsigned long flags;
907 struct ec_cb *eb;
908
909 spin_lock_irqsave(&aun_queue_lock, flags);
910 skb_queue_walk(&aun_queue, skb) {
911 eb = (struct ec_cb *)&skb->cb;
912 if (eb->seq == seq)
913 goto foundit;
914 }
915 spin_unlock_irqrestore(&aun_queue_lock, flags);
916 pr_debug("AUN: unknown sequence %ld\n", seq);
917 return;
918
919foundit:
920 tx_result(skb->sk, eb->cookie, result);
921 skb_unlink(skb, &aun_queue);
922 spin_unlock_irqrestore(&aun_queue_lock, flags);
923 kfree_skb(skb);
924}
925
926/*
927 * Deal with received AUN frames - sort out what type of thing it is
928 * and hand it to the right function.
929 */
930
931static void aun_data_available(struct sock *sk, int slen)
932{
933 int err;
934 struct sk_buff *skb;
935 unsigned char *data;
936 struct aunhdr *ah;
937 size_t len;
938
939 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
940 if (err == -EAGAIN) {
941 pr_err("AUN: no data available?!\n");
942 return;
943 }
944 pr_debug("AUN: recvfrom() error %d\n", -err);
945 }
946
947 data = skb_transport_header(skb) + sizeof(struct udphdr);
948 ah = (struct aunhdr *)data;
949 len = skb->len - sizeof(struct udphdr);
950
951 switch (ah->code) {
952 case 2:
953 aun_incoming(skb, ah, len);
954 break;
955 case 3:
956 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_OK);
957 break;
958 case 4:
959 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
960 break;
961 default:
962 pr_debug("AUN: unknown packet type: %d\n", data[0]);
963 }
964
965 skb_free_datagram(sk, skb);
966}
967
968/*
969 * Called by the timer to manage the AUN transmit queue. If a packet
970 * was sent to a dead or nonexistent host then we will never get an
971 * acknowledgement back. After a few seconds we need to spot this and
972 * drop the packet.
973 */
974
975static void ab_cleanup(unsigned long h)
976{
977 struct sk_buff *skb, *n;
978 unsigned long flags;
979
980 spin_lock_irqsave(&aun_queue_lock, flags);
981 skb_queue_walk_safe(&aun_queue, skb, n) {
982 struct ec_cb *eb = (struct ec_cb *)&skb->cb;
983 if ((jiffies - eb->start) > eb->timeout) {
984 tx_result(skb->sk, eb->cookie,
985 ECTYPE_TRANSMIT_NOT_PRESENT);
986 skb_unlink(skb, &aun_queue);
987 kfree_skb(skb);
988 }
989 }
990 spin_unlock_irqrestore(&aun_queue_lock, flags);
991
992 mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2));
993}
994
995static int __init aun_udp_initialise(void)
996{
997 int error;
998 struct sockaddr_in sin;
999
1000 skb_queue_head_init(&aun_queue);
1001 setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
1002 ab_cleanup_timer.expires = jiffies + (HZ * 2);
1003 add_timer(&ab_cleanup_timer);
1004
1005 memset(&sin, 0, sizeof(sin));
1006 sin.sin_port = htons(AUN_PORT);
1007
1008 /* We can count ourselves lucky Acorn machines are too dim to
1009 speak IPv6. :-) */
1010 error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock);
1011 if (error < 0) {
1012 pr_err("AUN: socket error %d\n", -error);
1013 return error;
1014 }
1015
1016 udpsock->sk->sk_reuse = 1;
1017 udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
1018 from interrupts */
1019
1020 error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
1021 sizeof(sin));
1022 if (error < 0) {
1023 pr_err("AUN: bind error %d\n", -error);
1024 goto release;
1025 }
1026
1027 udpsock->sk->sk_data_ready = aun_data_available;
1028
1029 return 0;
1030
1031release:
1032 sock_release(udpsock);
1033 udpsock = NULL;
1034 return error;
1035}
1036#endif
1037
1038#ifdef CONFIG_ECONET_NATIVE
1039
1040/*
1041 * Receive an Econet frame from a device.
1042 */
1043
1044static int econet_rcv(struct sk_buff *skb, struct net_device *dev,
1045 struct packet_type *pt, struct net_device *orig_dev)
1046{
1047 struct ec_framehdr *hdr;
1048 struct sock *sk = NULL;
1049 struct ec_device *edev = dev->ec_ptr;
1050
1051 if (!net_eq(dev_net(dev), &init_net))
1052 goto drop;
1053
1054 if (skb->pkt_type == PACKET_OTHERHOST)
1055 goto drop;
1056
1057 if (!edev)
1058 goto drop;
1059
1060 skb = skb_share_check(skb, GFP_ATOMIC);
1061 if (skb == NULL)
1062 return NET_RX_DROP;
1063
1064 if (!pskb_may_pull(skb, sizeof(struct ec_framehdr)))
1065 goto drop;
1066
1067 hdr = (struct ec_framehdr *)skb->data;
1068
1069 /* First check for encapsulated IP */
1070 if (hdr->port == EC_PORT_IP) {
1071 skb->protocol = htons(ETH_P_IP);
1072 skb_pull(skb, sizeof(struct ec_framehdr));
1073 netif_rx(skb);
1074 return NET_RX_SUCCESS;
1075 }
1076
1077 sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net);
1078 if (!sk)
1079 goto drop;
1080
1081 if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
1082 hdr->port))
1083 goto drop;
1084 sock_put(sk);
1085 return NET_RX_SUCCESS;
1086
1087drop:
1088 if (sk)
1089 sock_put(sk);
1090 kfree_skb(skb);
1091 return NET_RX_DROP;
1092}
1093
1094static struct packet_type econet_packet_type __read_mostly = {
1095 .type = cpu_to_be16(ETH_P_ECONET),
1096 .func = econet_rcv,
1097};
1098
1099static void econet_hw_initialise(void)
1100{
1101 dev_add_pack(&econet_packet_type);
1102}
1103
1104#endif
1105
1106static int econet_notifier(struct notifier_block *this, unsigned long msg,
1107 void *data)
1108{
1109 struct net_device *dev = data;
1110 struct ec_device *edev;
1111
1112 if (!net_eq(dev_net(dev), &init_net))
1113 return NOTIFY_DONE;
1114
1115 switch (msg) {
1116 case NETDEV_UNREGISTER:
1117 /* A device has gone down - kill any data we hold for it. */
1118 edev = dev->ec_ptr;
1119 if (edev) {
1120 if (net2dev_map[0] == dev)
1121 net2dev_map[0] = NULL;
1122 net2dev_map[edev->net] = NULL;
1123 kfree(edev);
1124 dev->ec_ptr = NULL;
1125 }
1126 break;
1127 }
1128
1129 return NOTIFY_DONE;
1130}
1131
1132static struct notifier_block econet_netdev_notifier = {
1133 .notifier_call = econet_notifier,
1134};
1135
1136static void __exit econet_proto_exit(void)
1137{
1138#ifdef CONFIG_ECONET_AUNUDP
1139 del_timer(&ab_cleanup_timer);
1140 if (udpsock)
1141 sock_release(udpsock);
1142#endif
1143 unregister_netdevice_notifier(&econet_netdev_notifier);
1144#ifdef CONFIG_ECONET_NATIVE
1145 dev_remove_pack(&econet_packet_type);
1146#endif
1147 sock_unregister(econet_family_ops.family);
1148 proto_unregister(&econet_proto);
1149}
1150
1151static int __init econet_proto_init(void)
1152{
1153 int err = proto_register(&econet_proto, 0);
1154
1155 if (err != 0)
1156 goto out;
1157 sock_register(&econet_family_ops);
1158#ifdef CONFIG_ECONET_AUNUDP
1159 aun_udp_initialise();
1160#endif
1161#ifdef CONFIG_ECONET_NATIVE
1162 econet_hw_initialise();
1163#endif
1164 register_netdevice_notifier(&econet_netdev_notifier);
1165out:
1166 return err;
1167}
1168
1169module_init(econet_proto_init);
1170module_exit(econet_proto_exit);
1171
1172MODULE_LICENSE("GPL");
1173MODULE_ALIAS_NETPROTO(PF_ECONET);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index a2468363978e..36e58800a9e3 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -59,7 +59,6 @@
59#include <net/ip.h> 59#include <net/ip.h>
60#include <net/dsa.h> 60#include <net/dsa.h>
61#include <asm/uaccess.h> 61#include <asm/uaccess.h>
62#include <asm/system.h>
63 62
64__setup("ether=", netdev_boot_setup); 63__setup("ether=", netdev_boot_setup);
65 64
@@ -78,7 +77,7 @@ __setup("ether=", netdev_boot_setup);
78 */ 77 */
79int eth_header(struct sk_buff *skb, struct net_device *dev, 78int eth_header(struct sk_buff *skb, struct net_device *dev,
80 unsigned short type, 79 unsigned short type,
81 const void *daddr, const void *saddr, unsigned len) 80 const void *daddr, const void *saddr, unsigned int len)
82{ 81{
83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); 82 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
84 83
@@ -165,7 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
165 eth = eth_hdr(skb); 164 eth = eth_hdr(skb);
166 165
167 if (unlikely(is_multicast_ether_addr(eth->h_dest))) { 166 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
168 if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) 167 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
169 skb->pkt_type = PACKET_BROADCAST; 168 skb->pkt_type = PACKET_BROADCAST;
170 else 169 else
171 skb->pkt_type = PACKET_MULTICAST; 170 skb->pkt_type = PACKET_MULTICAST;
@@ -180,7 +179,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
180 */ 179 */
181 180
182 else if (1 /*dev->flags&IFF_PROMISC */ ) { 181 else if (1 /*dev->flags&IFF_PROMISC */ ) {
183 if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr))) 182 if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
183 dev->dev_addr)))
184 skb->pkt_type = PACKET_OTHERHOST; 184 skb->pkt_type = PACKET_OTHERHOST;
185 } 185 }
186 186
@@ -288,6 +288,8 @@ int eth_mac_addr(struct net_device *dev, void *p)
288 if (!is_valid_ether_addr(addr->sa_data)) 288 if (!is_valid_ether_addr(addr->sa_data))
289 return -EADDRNOTAVAIL; 289 return -EADDRNOTAVAIL;
290 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 290 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
291 /* if device marked as NET_ADDR_RANDOM, reset it */
292 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
291 return 0; 293 return 0;
292} 294}
293EXPORT_SYMBOL(eth_mac_addr); 295EXPORT_SYMBOL(eth_mac_addr);
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index e4ecc1eef98c..32eb4179e8fa 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -55,6 +55,7 @@
55#include <linux/module.h> 55#include <linux/module.h>
56#include <linux/moduleparam.h> 56#include <linux/moduleparam.h>
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/etherdevice.h>
58#include <net/af_ieee802154.h> 59#include <net/af_ieee802154.h>
59#include <net/ieee802154.h> 60#include <net/ieee802154.h>
60#include <net/ieee802154_netdev.h> 61#include <net/ieee802154_netdev.h>
@@ -195,7 +196,7 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
195static void 196static void
196lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) 197lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
197{ 198{
198 memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN); 199 memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
199 /* second bit-flip (Universe/Local) is done according RFC2464 */ 200 /* second bit-flip (Universe/Local) is done according RFC2464 */
200 ipaddr->s6_addr[8] ^= 0x02; 201 ipaddr->s6_addr[8] ^= 0x02;
201} 202}
@@ -220,7 +221,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
220 221
221 if (lladdr) 222 if (lladdr)
222 lowpan_raw_dump_inline(__func__, "linklocal address", 223 lowpan_raw_dump_inline(__func__, "linklocal address",
223 lladdr, IEEE802154_ALEN); 224 lladdr, IEEE802154_ADDR_LEN);
224 if (prefcount > 0) 225 if (prefcount > 0)
225 memcpy(ipaddr, prefix, prefcount); 226 memcpy(ipaddr, prefix, prefcount);
226 227
@@ -370,7 +371,7 @@ err:
370static int lowpan_header_create(struct sk_buff *skb, 371static int lowpan_header_create(struct sk_buff *skb,
371 struct net_device *dev, 372 struct net_device *dev,
372 unsigned short type, const void *_daddr, 373 unsigned short type, const void *_daddr,
373 const void *_saddr, unsigned len) 374 const void *_saddr, unsigned int len)
374{ 375{
375 u8 tmp, iphc0, iphc1, *hc06_ptr; 376 u8 tmp, iphc0, iphc1, *hc06_ptr;
376 struct ipv6hdr *hdr; 377 struct ipv6hdr *hdr;
@@ -649,6 +650,53 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
649 kfree(entry); 650 kfree(entry);
650} 651}
651 652
653static struct lowpan_fragment *
654lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
655{
656 struct lowpan_fragment *frame;
657
658 frame = kzalloc(sizeof(struct lowpan_fragment),
659 GFP_ATOMIC);
660 if (!frame)
661 goto frame_err;
662
663 INIT_LIST_HEAD(&frame->list);
664
665 frame->length = (iphc0 & 7) | (len << 3);
666 frame->tag = tag;
667
668 /* allocate buffer for frame assembling */
669 frame->skb = alloc_skb(frame->length +
670 sizeof(struct ipv6hdr), GFP_ATOMIC);
671
672 if (!frame->skb)
673 goto skb_err;
674
675 frame->skb->priority = skb->priority;
676 frame->skb->dev = skb->dev;
677
678 /* reserve headroom for uncompressed ipv6 header */
679 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
680 skb_put(frame->skb, frame->length);
681
682 init_timer(&frame->timer);
683 /* time out is the same as for ipv6 - 60 sec */
684 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
685 frame->timer.data = (unsigned long)frame;
686 frame->timer.function = lowpan_fragment_timer_expired;
687
688 add_timer(&frame->timer);
689
690 list_add_tail(&frame->list, &lowpan_fragments);
691
692 return frame;
693
694skb_err:
695 kfree(frame);
696frame_err:
697 return NULL;
698}
699
652static int 700static int
653lowpan_process_data(struct sk_buff *skb) 701lowpan_process_data(struct sk_buff *skb)
654{ 702{
@@ -691,41 +739,9 @@ lowpan_process_data(struct sk_buff *skb)
691 739
692 /* alloc new frame structure */ 740 /* alloc new frame structure */
693 if (!found) { 741 if (!found) {
694 frame = kzalloc(sizeof(struct lowpan_fragment), 742 frame = lowpan_alloc_new_frame(skb, iphc0, len, tag);
695 GFP_ATOMIC);
696 if (!frame) 743 if (!frame)
697 goto unlock_and_drop; 744 goto unlock_and_drop;
698
699 INIT_LIST_HEAD(&frame->list);
700
701 frame->length = (iphc0 & 7) | (len << 3);
702 frame->tag = tag;
703
704 /* allocate buffer for frame assembling */
705 frame->skb = alloc_skb(frame->length +
706 sizeof(struct ipv6hdr), GFP_ATOMIC);
707
708 if (!frame->skb) {
709 kfree(frame);
710 goto unlock_and_drop;
711 }
712
713 frame->skb->priority = skb->priority;
714 frame->skb->dev = skb->dev;
715
716 /* reserve headroom for uncompressed ipv6 header */
717 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
718 skb_put(frame->skb, frame->length);
719
720 init_timer(&frame->timer);
721 /* time out is the same as for ipv6 - 60 sec */
722 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
723 frame->timer.data = (unsigned long)frame;
724 frame->timer.function = lowpan_fragment_timer_expired;
725
726 add_timer(&frame->timer);
727
728 list_add_tail(&frame->list, &lowpan_fragments);
729 } 745 }
730 746
731 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) 747 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
@@ -924,19 +940,6 @@ drop:
924 return -EINVAL; 940 return -EINVAL;
925} 941}
926 942
927static int lowpan_set_address(struct net_device *dev, void *p)
928{
929 struct sockaddr *sa = p;
930
931 if (netif_running(dev))
932 return -EBUSY;
933
934 /* TODO: validate addr */
935 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
936
937 return 0;
938}
939
940static int lowpan_get_mac_header_length(struct sk_buff *skb) 943static int lowpan_get_mac_header_length(struct sk_buff *skb)
941{ 944{
942 /* 945 /*
@@ -1056,13 +1059,37 @@ static void lowpan_dev_free(struct net_device *dev)
1056 free_netdev(dev); 1059 free_netdev(dev);
1057} 1060}
1058 1061
1062static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
1063{
1064 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1065 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
1066}
1067
1068static u16 lowpan_get_pan_id(const struct net_device *dev)
1069{
1070 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1071 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
1072}
1073
1074static u16 lowpan_get_short_addr(const struct net_device *dev)
1075{
1076 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1077 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
1078}
1079
1059static struct header_ops lowpan_header_ops = { 1080static struct header_ops lowpan_header_ops = {
1060 .create = lowpan_header_create, 1081 .create = lowpan_header_create,
1061}; 1082};
1062 1083
1063static const struct net_device_ops lowpan_netdev_ops = { 1084static const struct net_device_ops lowpan_netdev_ops = {
1064 .ndo_start_xmit = lowpan_xmit, 1085 .ndo_start_xmit = lowpan_xmit,
1065 .ndo_set_mac_address = lowpan_set_address, 1086 .ndo_set_mac_address = eth_mac_addr,
1087};
1088
1089static struct ieee802154_mlme_ops lowpan_mlme = {
1090 .get_pan_id = lowpan_get_pan_id,
1091 .get_phy = lowpan_get_phy,
1092 .get_short_addr = lowpan_get_short_addr,
1066}; 1093};
1067 1094
1068static void lowpan_setup(struct net_device *dev) 1095static void lowpan_setup(struct net_device *dev)
@@ -1082,6 +1109,7 @@ static void lowpan_setup(struct net_device *dev)
1082 1109
1083 dev->netdev_ops = &lowpan_netdev_ops; 1110 dev->netdev_ops = &lowpan_netdev_ops;
1084 dev->header_ops = &lowpan_header_ops; 1111 dev->header_ops = &lowpan_header_ops;
1112 dev->ml_priv = &lowpan_mlme;
1085 dev->destructor = lowpan_dev_free; 1113 dev->destructor = lowpan_dev_free;
1086} 1114}
1087 1115
@@ -1155,6 +1183,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1155 list_add_tail(&entry->list, &lowpan_devices); 1183 list_add_tail(&entry->list, &lowpan_devices);
1156 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); 1184 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
1157 1185
1186 spin_lock_init(&flist_lock);
1187
1158 register_netdevice(dev); 1188 register_netdevice(dev);
1159 1189
1160 return 0; 1190 return 0;
@@ -1164,11 +1194,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
1164{ 1194{
1165 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); 1195 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
1166 struct net_device *real_dev = lowpan_dev->real_dev; 1196 struct net_device *real_dev = lowpan_dev->real_dev;
1167 struct lowpan_dev_record *entry; 1197 struct lowpan_dev_record *entry, *tmp;
1168 struct lowpan_dev_record *tmp; 1198 struct lowpan_fragment *frame, *tframe;
1169 1199
1170 ASSERT_RTNL(); 1200 ASSERT_RTNL();
1171 1201
1202 spin_lock(&flist_lock);
1203 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1204 del_timer(&frame->timer);
1205 list_del(&frame->list);
1206 dev_kfree_skb(frame->skb);
1207 kfree(frame);
1208 }
1209 spin_unlock(&flist_lock);
1210
1172 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1211 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1173 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { 1212 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1174 if (entry->ldev == dev) { 1213 if (entry->ldev == dev) {
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index aeff3f310482..8c2251fb0a3f 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -53,9 +53,6 @@
53#ifndef __6LOWPAN_H__ 53#ifndef __6LOWPAN_H__
54#define __6LOWPAN_H__ 54#define __6LOWPAN_H__
55 55
56/* need to know address length to manipulate with it */
57#define IEEE802154_ALEN 8
58
59#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */ 56#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
60#define UIP_IPH_LEN 40 /* ipv6 fixed header size */ 57#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
61#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */ 58#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 1b09eaabaac1..6fbb2ad7bb6d 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -44,8 +44,8 @@ struct dgram_sock {
44 struct ieee802154_addr src_addr; 44 struct ieee802154_addr src_addr;
45 struct ieee802154_addr dst_addr; 45 struct ieee802154_addr dst_addr;
46 46
47 unsigned bound:1; 47 unsigned int bound:1;
48 unsigned want_ack:1; 48 unsigned int want_ack:1;
49}; 49};
50 50
51static inline struct dgram_sock *dgram_sk(const struct sock *sk) 51static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -206,7 +206,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
206 struct msghdr *msg, size_t size) 206 struct msghdr *msg, size_t size)
207{ 207{
208 struct net_device *dev; 208 struct net_device *dev;
209 unsigned mtu; 209 unsigned int mtu;
210 struct sk_buff *skb; 210 struct sk_buff *skb;
211 struct dgram_sock *ro = dgram_sk(sk); 211 struct dgram_sock *ro = dgram_sk(sk);
212 int hlen, tlen; 212 int hlen, tlen;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index adaf46214905..ca92587720f4 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -63,15 +63,14 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
63 if (!msg) 63 if (!msg)
64 return -ENOBUFS; 64 return -ENOBUFS;
65 65
66 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 66 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
67 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 67 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
68 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 68 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
69 dev->dev_addr); 69 dev->dev_addr) ||
70 70 nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
71 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, 71 addr->hwaddr) ||
72 addr->hwaddr); 72 nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
73 73 goto nla_put_failure;
74 NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
75 74
76 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 75 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
77 76
@@ -92,14 +91,13 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
92 if (!msg) 91 if (!msg)
93 return -ENOBUFS; 92 return -ENOBUFS;
94 93
95 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 94 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
96 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 95 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
97 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 96 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
98 dev->dev_addr); 97 dev->dev_addr) ||
99 98 nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
100 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); 99 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
101 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 100 goto nla_put_failure;
102
103 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 101 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
104 102
105nla_put_failure: 103nla_put_failure:
@@ -119,20 +117,22 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev,
119 if (!msg) 117 if (!msg)
120 return -ENOBUFS; 118 return -ENOBUFS;
121 119
122 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 120 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
123 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 121 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
124 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 122 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
125 dev->dev_addr); 123 dev->dev_addr))
126 124 goto nla_put_failure;
127 if (addr->addr_type == IEEE802154_ADDR_LONG) 125 if (addr->addr_type == IEEE802154_ADDR_LONG) {
128 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, 126 if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
129 addr->hwaddr); 127 addr->hwaddr))
130 else 128 goto nla_put_failure;
131 NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, 129 } else {
132 addr->short_addr); 130 if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
133 131 addr->short_addr))
134 NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); 132 goto nla_put_failure;
135 133 }
134 if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
135 goto nla_put_failure;
136 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 136 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
137 137
138nla_put_failure: 138nla_put_failure:
@@ -151,13 +151,12 @@ int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
151 if (!msg) 151 if (!msg)
152 return -ENOBUFS; 152 return -ENOBUFS;
153 153
154 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 154 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
155 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 155 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
156 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 156 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
157 dev->dev_addr); 157 dev->dev_addr) ||
158 158 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
159 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 159 goto nla_put_failure;
160
161 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 160 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
162 161
163nla_put_failure: 162nla_put_failure:
@@ -177,13 +176,13 @@ int ieee802154_nl_beacon_indic(struct net_device *dev,
177 if (!msg) 176 if (!msg)
178 return -ENOBUFS; 177 return -ENOBUFS;
179 178
180 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 179 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
181 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 180 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
182 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 181 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
183 dev->dev_addr); 182 dev->dev_addr) ||
184 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr); 183 nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) ||
185 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid); 184 nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
186 185 goto nla_put_failure;
187 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 186 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
188 187
189nla_put_failure: 188nla_put_failure:
@@ -204,19 +203,17 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
204 if (!msg) 203 if (!msg)
205 return -ENOBUFS; 204 return -ENOBUFS;
206 205
207 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 206 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
208 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 207 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
209 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 208 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
210 dev->dev_addr); 209 dev->dev_addr) ||
211 210 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) ||
212 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 211 nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) ||
213 NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); 212 nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) ||
214 NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); 213 nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) ||
215 NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); 214 (edl &&
216 215 nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl)))
217 if (edl) 216 goto nla_put_failure;
218 NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
219
220 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 217 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
221 218
222nla_put_failure: 219nla_put_failure:
@@ -235,13 +232,12 @@ int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
235 if (!msg) 232 if (!msg)
236 return -ENOBUFS; 233 return -ENOBUFS;
237 234
238 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 235 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
239 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 236 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
240 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 237 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
241 dev->dev_addr); 238 dev->dev_addr) ||
242 239 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
243 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 240 goto nla_put_failure;
244
245 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 241 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
246 242
247nla_put_failure: 243nla_put_failure:
@@ -266,16 +262,16 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
266 phy = ieee802154_mlme_ops(dev)->get_phy(dev); 262 phy = ieee802154_mlme_ops(dev)->get_phy(dev);
267 BUG_ON(!phy); 263 BUG_ON(!phy);
268 264
269 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 265 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
270 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 266 nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
271 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 267 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
272 268 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
273 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 269 dev->dev_addr) ||
274 dev->dev_addr); 270 nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR,
275 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, 271 ieee802154_mlme_ops(dev)->get_short_addr(dev)) ||
276 ieee802154_mlme_ops(dev)->get_short_addr(dev)); 272 nla_put_u16(msg, IEEE802154_ATTR_PAN_ID,
277 NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, 273 ieee802154_mlme_ops(dev)->get_pan_id(dev)))
278 ieee802154_mlme_ops(dev)->get_pan_id(dev)); 274 goto nla_put_failure;
279 wpan_phy_put(phy); 275 wpan_phy_put(phy);
280 return genlmsg_end(msg, hdr); 276 return genlmsg_end(msg, hdr);
281 277
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index c64a38d57aa3..eed291626da6 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -53,18 +53,18 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
53 goto out; 53 goto out;
54 54
55 mutex_lock(&phy->pib_lock); 55 mutex_lock(&phy->pib_lock);
56 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 56 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
57 57 nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
58 NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page); 58 nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
59 NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel); 59 goto nla_put_failure;
60 for (i = 0; i < 32; i++) { 60 for (i = 0; i < 32; i++) {
61 if (phy->channels_supported[i]) 61 if (phy->channels_supported[i])
62 buf[pages++] = phy->channels_supported[i] | (i << 27); 62 buf[pages++] = phy->channels_supported[i] | (i << 27);
63 } 63 }
64 if (pages) 64 if (pages &&
65 NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, 65 nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
66 pages * sizeof(uint32_t), buf); 66 pages * sizeof(uint32_t), buf))
67 67 goto nla_put_failure;
68 mutex_unlock(&phy->pib_lock); 68 mutex_unlock(&phy->pib_lock);
69 kfree(buf); 69 kfree(buf);
70 return genlmsg_end(msg, hdr); 70 return genlmsg_end(msg, hdr);
@@ -179,6 +179,7 @@ static int ieee802154_add_iface(struct sk_buff *skb,
179 const char *devname; 179 const char *devname;
180 int rc = -ENOBUFS; 180 int rc = -ENOBUFS;
181 struct net_device *dev; 181 struct net_device *dev;
182 int type = __IEEE802154_DEV_INVALID;
182 183
183 pr_debug("%s\n", __func__); 184 pr_debug("%s\n", __func__);
184 185
@@ -221,7 +222,13 @@ static int ieee802154_add_iface(struct sk_buff *skb,
221 goto nla_put_failure; 222 goto nla_put_failure;
222 } 223 }
223 224
224 dev = phy->add_iface(phy, devname); 225 if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
226 type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
227 if (type >= __IEEE802154_DEV_MAX)
228 return -EINVAL;
229 }
230
231 dev = phy->add_iface(phy, devname, type);
225 if (IS_ERR(dev)) { 232 if (IS_ERR(dev)) {
226 rc = PTR_ERR(dev); 233 rc = PTR_ERR(dev);
227 goto nla_put_failure; 234 goto nla_put_failure;
@@ -245,9 +252,9 @@ static int ieee802154_add_iface(struct sk_buff *skb,
245 goto dev_unregister; 252 goto dev_unregister;
246 } 253 }
247 254
248 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 255 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
249 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 256 nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
250 257 goto nla_put_failure;
251 dev_put(dev); 258 dev_put(dev);
252 259
253 wpan_phy_put(phy); 260 wpan_phy_put(phy);
@@ -333,10 +340,9 @@ static int ieee802154_del_iface(struct sk_buff *skb,
333 340
334 rtnl_unlock(); 341 rtnl_unlock();
335 342
336 343 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
337 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 344 nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
338 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name); 345 goto nla_put_failure;
339
340 wpan_phy_put(phy); 346 wpan_phy_put(phy);
341 347
342 return ieee802154_nl_reply(msg, info); 348 return ieee802154_nl_reply(msg, info);
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index f96bae8fd330..50e823927d49 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -106,7 +106,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
106 size_t size) 106 size_t size)
107{ 107{
108 struct net_device *dev; 108 struct net_device *dev;
109 unsigned mtu; 109 unsigned int mtu;
110 struct sk_buff *skb; 110 struct sk_buff *skb;
111 int hlen, tlen; 111 int hlen, tlen;
112 int err; 112 int err;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index d183262943d9..20f1cb5c8aba 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -262,8 +262,8 @@ config ARPD
262 bool "IP: ARP daemon support" 262 bool "IP: ARP daemon support"
263 ---help--- 263 ---help---
264 The kernel maintains an internal cache which maps IP addresses to 264 The kernel maintains an internal cache which maps IP addresses to
265 hardware addresses on the local network, so that Ethernet/Token Ring/ 265 hardware addresses on the local network, so that Ethernet
266 etc. frames are sent to the proper address on the physical networking 266 frames are sent to the proper address on the physical networking
267 layer. Normally, kernel uses the ARP protocol to resolve these 267 layer. Normally, kernel uses the ARP protocol to resolve these
268 mappings. 268 mappings.
269 269
@@ -312,7 +312,7 @@ config SYN_COOKIES
312 312
313config INET_AH 313config INET_AH
314 tristate "IP: AH transformation" 314 tristate "IP: AH transformation"
315 select XFRM 315 select XFRM_ALGO
316 select CRYPTO 316 select CRYPTO
317 select CRYPTO_HMAC 317 select CRYPTO_HMAC
318 select CRYPTO_MD5 318 select CRYPTO_MD5
@@ -324,7 +324,7 @@ config INET_AH
324 324
325config INET_ESP 325config INET_ESP
326 tristate "IP: ESP transformation" 326 tristate "IP: ESP transformation"
327 select XFRM 327 select XFRM_ALGO
328 select CRYPTO 328 select CRYPTO
329 select CRYPTO_AUTHENC 329 select CRYPTO_AUTHENC
330 select CRYPTO_HMAC 330 select CRYPTO_HMAC
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f7b5670744f0..c8f7aee587d1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -65,6 +65,8 @@
65 * 2 of the License, or (at your option) any later version. 65 * 2 of the License, or (at your option) any later version.
66 */ 66 */
67 67
68#define pr_fmt(fmt) "IPv4: " fmt
69
68#include <linux/err.h> 70#include <linux/err.h>
69#include <linux/errno.h> 71#include <linux/errno.h>
70#include <linux/types.h> 72#include <linux/types.h>
@@ -89,7 +91,6 @@
89#include <linux/slab.h> 91#include <linux/slab.h>
90 92
91#include <asm/uaccess.h> 93#include <asm/uaccess.h>
92#include <asm/system.h>
93 94
94#include <linux/inet.h> 95#include <linux/inet.h>
95#include <linux/igmp.h> 96#include <linux/igmp.h>
@@ -349,7 +350,7 @@ lookup_protocol:
349 err = 0; 350 err = 0;
350 sk->sk_no_check = answer_no_check; 351 sk->sk_no_check = answer_no_check;
351 if (INET_PROTOSW_REUSE & answer_flags) 352 if (INET_PROTOSW_REUSE & answer_flags)
352 sk->sk_reuse = 1; 353 sk->sk_reuse = SK_CAN_REUSE;
353 354
354 inet = inet_sk(sk); 355 inet = inet_sk(sk);
355 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; 356 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -381,6 +382,7 @@ lookup_protocol:
381 inet->mc_all = 1; 382 inet->mc_all = 1;
382 inet->mc_index = 0; 383 inet->mc_index = 0;
383 inet->mc_list = NULL; 384 inet->mc_list = NULL;
385 inet->rcv_tos = 0;
384 386
385 sk_refcnt_debug_inc(sk); 387 sk_refcnt_debug_inc(sk);
386 388
@@ -539,7 +541,7 @@ out:
539} 541}
540EXPORT_SYMBOL(inet_bind); 542EXPORT_SYMBOL(inet_bind);
541 543
542int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, 544int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
543 int addr_len, int flags) 545 int addr_len, int flags)
544{ 546{
545 struct sock *sk = sock->sk; 547 struct sock *sk = sock->sk;
@@ -1084,13 +1086,11 @@ out:
1084 return; 1086 return;
1085 1087
1086out_permanent: 1088out_permanent:
1087 printk(KERN_ERR "Attempt to override permanent protocol %d.\n", 1089 pr_err("Attempt to override permanent protocol %d\n", protocol);
1088 protocol);
1089 goto out; 1090 goto out;
1090 1091
1091out_illegal: 1092out_illegal:
1092 printk(KERN_ERR 1093 pr_err("Ignoring attempt to register invalid socket type %d\n",
1093 "Ignoring attempt to register invalid socket type %d.\n",
1094 p->type); 1094 p->type);
1095 goto out; 1095 goto out;
1096} 1096}
@@ -1099,8 +1099,7 @@ EXPORT_SYMBOL(inet_register_protosw);
1099void inet_unregister_protosw(struct inet_protosw *p) 1099void inet_unregister_protosw(struct inet_protosw *p)
1100{ 1100{
1101 if (INET_PROTOSW_PERMANENT & p->flags) { 1101 if (INET_PROTOSW_PERMANENT & p->flags) {
1102 printk(KERN_ERR 1102 pr_err("Attempt to unregister permanent protocol %d\n",
1103 "Attempt to unregister permanent protocol %d.\n",
1104 p->protocol); 1103 p->protocol);
1105 } else { 1104 } else {
1106 spin_lock_bh(&inetsw_lock); 1105 spin_lock_bh(&inetsw_lock);
@@ -1149,8 +1148,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1149 return 0; 1148 return 0;
1150 1149
1151 if (sysctl_ip_dynaddr > 1) { 1150 if (sysctl_ip_dynaddr > 1) {
1152 printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n", 1151 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1153 __func__, &old_saddr, &new_saddr); 1152 __func__, &old_saddr, &new_saddr);
1154 } 1153 }
1155 1154
1156 inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; 1155 inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
@@ -1679,14 +1678,14 @@ static int __init inet_init(void)
1679 */ 1678 */
1680 1679
1681 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0) 1680 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1682 printk(KERN_CRIT "inet_init: Cannot add ICMP protocol\n"); 1681 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1683 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0) 1682 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1684 printk(KERN_CRIT "inet_init: Cannot add UDP protocol\n"); 1683 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1685 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0) 1684 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1686 printk(KERN_CRIT "inet_init: Cannot add TCP protocol\n"); 1685 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1687#ifdef CONFIG_IP_MULTICAST 1686#ifdef CONFIG_IP_MULTICAST
1688 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0) 1687 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1689 printk(KERN_CRIT "inet_init: Cannot add IGMP protocol\n"); 1688 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1690#endif 1689#endif
1691 1690
1692 /* Register the socket-side information for inet_create. */ 1691 /* Register the socket-side information for inet_create. */
@@ -1733,14 +1732,14 @@ static int __init inet_init(void)
1733 */ 1732 */
1734#if defined(CONFIG_IP_MROUTE) 1733#if defined(CONFIG_IP_MROUTE)
1735 if (ip_mr_init()) 1734 if (ip_mr_init())
1736 printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n"); 1735 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1737#endif 1736#endif
1738 /* 1737 /*
1739 * Initialise per-cpu ipv4 mibs 1738 * Initialise per-cpu ipv4 mibs
1740 */ 1739 */
1741 1740
1742 if (init_ipv4_mibs()) 1741 if (init_ipv4_mibs())
1743 printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); 1742 pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
1744 1743
1745 ipv4_proc_init(); 1744 ipv4_proc_init();
1746 1745
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 36d14406261e..e8f2617ecd47 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) "IPsec: " fmt
2
1#include <crypto/hash.h> 3#include <crypto/hash.h>
2#include <linux/err.h> 4#include <linux/err.h>
3#include <linux/module.h> 5#include <linux/module.h>
@@ -75,7 +77,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
75 77
76static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr) 78static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
77{ 79{
78 unsigned char * optptr = (unsigned char*)(iph+1); 80 unsigned char *optptr = (unsigned char *)(iph+1);
79 int l = iph->ihl*4 - sizeof(struct iphdr); 81 int l = iph->ihl*4 - sizeof(struct iphdr);
80 int optlen; 82 int optlen;
81 83
@@ -404,8 +406,8 @@ static void ah4_err(struct sk_buff *skb, u32 info)
404 ah->spi, IPPROTO_AH, AF_INET); 406 ah->spi, IPPROTO_AH, AF_INET);
405 if (!x) 407 if (!x)
406 return; 408 return;
407 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", 409 pr_debug("pmtu discovery on SA AH/%08x/%08x\n",
408 ntohl(ah->spi), ntohl(iph->daddr)); 410 ntohl(ah->spi), ntohl(iph->daddr));
409 xfrm_state_put(x); 411 xfrm_state_put(x);
410} 412}
411 413
@@ -445,9 +447,10 @@ static int ah_init_state(struct xfrm_state *x)
445 447
446 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 448 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
447 crypto_ahash_digestsize(ahash)) { 449 crypto_ahash_digestsize(ahash)) {
448 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 450 pr_info("%s: %s digestsize %u != %hu\n",
449 x->aalg->alg_name, crypto_ahash_digestsize(ahash), 451 __func__, x->aalg->alg_name,
450 aalg_desc->uinfo.auth.icv_fullbits/8); 452 crypto_ahash_digestsize(ahash),
453 aalg_desc->uinfo.auth.icv_fullbits / 8);
451 goto error; 454 goto error;
452 } 455 }
453 456
@@ -510,11 +513,11 @@ static const struct net_protocol ah4_protocol = {
510static int __init ah4_init(void) 513static int __init ah4_init(void)
511{ 514{
512 if (xfrm_register_type(&ah_type, AF_INET) < 0) { 515 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
513 printk(KERN_INFO "ip ah init: can't add xfrm type\n"); 516 pr_info("%s: can't add xfrm type\n", __func__);
514 return -EAGAIN; 517 return -EAGAIN;
515 } 518 }
516 if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) { 519 if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
517 printk(KERN_INFO "ip ah init: can't add protocol\n"); 520 pr_info("%s: can't add protocol\n", __func__);
518 xfrm_unregister_type(&ah_type, AF_INET); 521 xfrm_unregister_type(&ah_type, AF_INET);
519 return -EAGAIN; 522 return -EAGAIN;
520 } 523 }
@@ -524,9 +527,9 @@ static int __init ah4_init(void)
524static void __exit ah4_fini(void) 527static void __exit ah4_fini(void)
525{ 528{
526 if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0) 529 if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
527 printk(KERN_INFO "ip ah close: can't remove protocol\n"); 530 pr_info("%s: can't remove protocol\n", __func__);
528 if (xfrm_unregister_type(&ah_type, AF_INET) < 0) 531 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
529 printk(KERN_INFO "ip ah close: can't remove xfrm type\n"); 532 pr_info("%s: can't remove xfrm type\n", __func__);
530} 533}
531 534
532module_init(ah4_init); 535module_init(ah4_init);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 63e49890ad31..cda37be02f8d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -73,6 +73,8 @@
73 * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support. 73 * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
74 */ 74 */
75 75
76#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
77
76#include <linux/module.h> 78#include <linux/module.h>
77#include <linux/types.h> 79#include <linux/types.h>
78#include <linux/string.h> 80#include <linux/string.h>
@@ -89,7 +91,6 @@
89#include <linux/etherdevice.h> 91#include <linux/etherdevice.h>
90#include <linux/fddidevice.h> 92#include <linux/fddidevice.h>
91#include <linux/if_arp.h> 93#include <linux/if_arp.h>
92#include <linux/trdevice.h>
93#include <linux/skbuff.h> 94#include <linux/skbuff.h>
94#include <linux/proc_fs.h> 95#include <linux/proc_fs.h>
95#include <linux/seq_file.h> 96#include <linux/seq_file.h>
@@ -113,7 +114,6 @@
113#include <net/ax25.h> 114#include <net/ax25.h>
114#include <net/netrom.h> 115#include <net/netrom.h>
115 116
116#include <asm/system.h>
117#include <linux/uaccess.h> 117#include <linux/uaccess.h>
118 118
119#include <linux/netfilter_arp.h> 119#include <linux/netfilter_arp.h>
@@ -194,9 +194,6 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
194 case ARPHRD_IEEE802: 194 case ARPHRD_IEEE802:
195 ip_eth_mc_map(addr, haddr); 195 ip_eth_mc_map(addr, haddr);
196 return 0; 196 return 0;
197 case ARPHRD_IEEE802_TR:
198 ip_tr_mc_map(addr, haddr);
199 return 0;
200 case ARPHRD_INFINIBAND: 197 case ARPHRD_INFINIBAND:
201 ip_ib_mc_map(addr, dev->broadcast, haddr); 198 ip_ib_mc_map(addr, dev->broadcast, haddr);
202 return 0; 199 return 0;
@@ -365,8 +362,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
365 probes -= neigh->parms->ucast_probes; 362 probes -= neigh->parms->ucast_probes;
366 if (probes < 0) { 363 if (probes < 0) {
367 if (!(neigh->nud_state & NUD_VALID)) 364 if (!(neigh->nud_state & NUD_VALID))
368 printk(KERN_DEBUG 365 pr_debug("trying to ucast probe in NUD_INVALID\n");
369 "trying to ucast probe in NUD_INVALID\n");
370 dst_ha = neigh->ha; 366 dst_ha = neigh->ha;
371 read_lock_bh(&neigh->lock); 367 read_lock_bh(&neigh->lock);
372 } else { 368 } else {
@@ -453,7 +449,7 @@ static int arp_set_predefined(int addr_hint, unsigned char *haddr,
453{ 449{
454 switch (addr_hint) { 450 switch (addr_hint) {
455 case RTN_LOCAL: 451 case RTN_LOCAL:
456 printk(KERN_DEBUG "ARP: arp called for own IP address\n"); 452 pr_debug("arp called for own IP address\n");
457 memcpy(haddr, dev->dev_addr, dev->addr_len); 453 memcpy(haddr, dev->dev_addr, dev->addr_len);
458 return 1; 454 return 1;
459 case RTN_MULTICAST: 455 case RTN_MULTICAST:
@@ -474,7 +470,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
474 struct neighbour *n; 470 struct neighbour *n;
475 471
476 if (!skb_dst(skb)) { 472 if (!skb_dst(skb)) {
477 printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); 473 pr_debug("arp_find is called with dst==NULL\n");
478 kfree_skb(skb); 474 kfree_skb(skb);
479 return 1; 475 return 1;
480 } 476 }
@@ -649,12 +645,6 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
649 arp->ar_pro = htons(ETH_P_IP); 645 arp->ar_pro = htons(ETH_P_IP);
650 break; 646 break;
651#endif 647#endif
652#if IS_ENABLED(CONFIG_TR)
653 case ARPHRD_IEEE802_TR:
654 arp->ar_hrd = htons(ARPHRD_IEEE802);
655 arp->ar_pro = htons(ETH_P_IP);
656 break;
657#endif
658 } 648 }
659 649
660 arp->ar_hln = dev->addr_len; 650 arp->ar_hln = dev->addr_len;
@@ -752,11 +742,10 @@ static int arp_process(struct sk_buff *skb)
752 goto out; 742 goto out;
753 break; 743 break;
754 case ARPHRD_ETHER: 744 case ARPHRD_ETHER:
755 case ARPHRD_IEEE802_TR:
756 case ARPHRD_FDDI: 745 case ARPHRD_FDDI:
757 case ARPHRD_IEEE802: 746 case ARPHRD_IEEE802:
758 /* 747 /*
759 * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 748 * ETHERNET, and Fibre Channel (which are IEEE 802
760 * devices, according to RFC 2625) devices will accept ARP 749 * devices, according to RFC 2625) devices will accept ARP
761 * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2). 750 * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2).
762 * This is the case also of FDDI, where the RFC 1390 says that 751 * This is the case also of FDDI, where the RFC 1390 says that
@@ -889,7 +878,7 @@ static int arp_process(struct sk_buff *skb)
889 878
890 n = __neigh_lookup(&arp_tbl, &sip, dev, 0); 879 n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
891 880
892 if (IPV4_DEVCONF_ALL(dev_net(dev), ARP_ACCEPT)) { 881 if (IN_DEV_ARP_ACCEPT(in_dev)) {
893 /* Unsolicited ARP is not accepted by default. 882 /* Unsolicited ARP is not accepted by default.
894 It is possible, that this option should be enabled for some 883 It is possible, that this option should be enabled for some
895 devices (strip is candidate) 884 devices (strip is candidate)
@@ -1060,7 +1049,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1060 neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev); 1049 neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev);
1061 err = PTR_ERR(neigh); 1050 err = PTR_ERR(neigh);
1062 if (!IS_ERR(neigh)) { 1051 if (!IS_ERR(neigh)) {
1063 unsigned state = NUD_STALE; 1052 unsigned int state = NUD_STALE;
1064 if (r->arp_flags & ATF_PERM) 1053 if (r->arp_flags & ATF_PERM)
1065 state = NUD_PERMANENT; 1054 state = NUD_PERMANENT;
1066 err = neigh_update(neigh, (r->arp_flags & ATF_COM) ? 1055 err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
@@ -1072,7 +1061,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1072 return err; 1061 return err;
1073} 1062}
1074 1063
1075static unsigned arp_state_to_flags(struct neighbour *neigh) 1064static unsigned int arp_state_to_flags(struct neighbour *neigh)
1076{ 1065{
1077 if (neigh->nud_state&NUD_PERMANENT) 1066 if (neigh->nud_state&NUD_PERMANENT)
1078 return ATF_PERM | ATF_COM; 1067 return ATF_PERM | ATF_COM;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 86f3b885b4f3..c48adc565e92 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1857,11 +1857,6 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1857 return CIPSO_V4_HDR_LEN + ret_val; 1857 return CIPSO_V4_HDR_LEN + ret_val;
1858} 1858}
1859 1859
1860static void opt_kfree_rcu(struct rcu_head *head)
1861{
1862 kfree(container_of(head, struct ip_options_rcu, rcu));
1863}
1864
1865/** 1860/**
1866 * cipso_v4_sock_setattr - Add a CIPSO option to a socket 1861 * cipso_v4_sock_setattr - Add a CIPSO option to a socket
1867 * @sk: the socket 1862 * @sk: the socket
@@ -1938,7 +1933,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
1938 } 1933 }
1939 rcu_assign_pointer(sk_inet->inet_opt, opt); 1934 rcu_assign_pointer(sk_inet->inet_opt, opt);
1940 if (old) 1935 if (old)
1941 call_rcu(&old->rcu, opt_kfree_rcu); 1936 kfree_rcu(old, rcu);
1942 1937
1943 return 0; 1938 return 0;
1944 1939
@@ -2005,7 +2000,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
2005 req_inet = inet_rsk(req); 2000 req_inet = inet_rsk(req);
2006 opt = xchg(&req_inet->opt, opt); 2001 opt = xchg(&req_inet->opt, opt);
2007 if (opt) 2002 if (opt)
2008 call_rcu(&opt->rcu, opt_kfree_rcu); 2003 kfree_rcu(opt, rcu);
2009 2004
2010 return 0; 2005 return 0;
2011 2006
@@ -2075,7 +2070,7 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2075 * remove the entire option struct */ 2070 * remove the entire option struct */
2076 *opt_ptr = NULL; 2071 *opt_ptr = NULL;
2077 hdr_delta = opt->opt.optlen; 2072 hdr_delta = opt->opt.optlen;
2078 call_rcu(&opt->rcu, opt_kfree_rcu); 2073 kfree_rcu(opt, rcu);
2079 } 2074 }
2080 2075
2081 return hdr_delta; 2076 return hdr_delta;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e41c40f48cfe..10e15a144e95 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -27,7 +27,6 @@
27 27
28 28
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/system.h>
31#include <linux/bitops.h> 30#include <linux/bitops.h>
32#include <linux/capability.h> 31#include <linux/capability.h>
33#include <linux/module.h> 32#include <linux/module.h>
@@ -218,8 +217,7 @@ void in_dev_finish_destroy(struct in_device *idev)
218 WARN_ON(idev->ifa_list); 217 WARN_ON(idev->ifa_list);
219 WARN_ON(idev->mc_list); 218 WARN_ON(idev->mc_list);
220#ifdef NET_REFCNT_DEBUG 219#ifdef NET_REFCNT_DEBUG
221 printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n", 220 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
222 idev, dev ? dev->name : "NIL");
223#endif 221#endif
224 dev_put(dev); 222 dev_put(dev);
225 if (!idev->dead) 223 if (!idev->dead)
@@ -1079,6 +1077,7 @@ __be32 inet_confirm_addr(struct in_device *in_dev,
1079 1077
1080 return addr; 1078 return addr;
1081} 1079}
1080EXPORT_SYMBOL(inet_confirm_addr);
1082 1081
1083/* 1082/*
1084 * Device notifier 1083 * Device notifier
@@ -1125,7 +1124,7 @@ skip:
1125 } 1124 }
1126} 1125}
1127 1126
1128static inline bool inetdev_valid_mtu(unsigned mtu) 1127static inline bool inetdev_valid_mtu(unsigned int mtu)
1129{ 1128{
1130 return mtu >= 68; 1129 return mtu >= 68;
1131} 1130}
@@ -1174,7 +1173,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1174 1173
1175 switch (event) { 1174 switch (event) {
1176 case NETDEV_REGISTER: 1175 case NETDEV_REGISTER:
1177 printk(KERN_DEBUG "inetdev_event: bug\n"); 1176 pr_debug("%s: bug\n", __func__);
1178 RCU_INIT_POINTER(dev->ip_ptr, NULL); 1177 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1179 break; 1178 break;
1180 case NETDEV_UP: 1179 case NETDEV_UP:
@@ -1266,17 +1265,15 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1266 ifm->ifa_scope = ifa->ifa_scope; 1265 ifm->ifa_scope = ifa->ifa_scope;
1267 ifm->ifa_index = ifa->ifa_dev->dev->ifindex; 1266 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1268 1267
1269 if (ifa->ifa_address) 1268 if ((ifa->ifa_address &&
1270 NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address); 1269 nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1271 1270 (ifa->ifa_local &&
1272 if (ifa->ifa_local) 1271 nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
1273 NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local); 1272 (ifa->ifa_broadcast &&
1274 1273 nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1275 if (ifa->ifa_broadcast) 1274 (ifa->ifa_label[0] &&
1276 NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast); 1275 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
1277 1276 goto nla_put_failure;
1278 if (ifa->ifa_label[0])
1279 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
1280 1277
1281 return nlmsg_end(skb, nlh); 1278 return nlmsg_end(skb, nlh);
1282 1279
@@ -1587,7 +1584,6 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
1587static struct devinet_sysctl_table { 1584static struct devinet_sysctl_table {
1588 struct ctl_table_header *sysctl_header; 1585 struct ctl_table_header *sysctl_header;
1589 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX]; 1586 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
1590 char *dev_name;
1591} devinet_sysctl = { 1587} devinet_sysctl = {
1592 .devinet_vars = { 1588 .devinet_vars = {
1593 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding", 1589 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
@@ -1629,16 +1625,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
1629{ 1625{
1630 int i; 1626 int i;
1631 struct devinet_sysctl_table *t; 1627 struct devinet_sysctl_table *t;
1632 1628 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
1633#define DEVINET_CTL_PATH_DEV 3
1634
1635 struct ctl_path devinet_ctl_path[] = {
1636 { .procname = "net", },
1637 { .procname = "ipv4", },
1638 { .procname = "conf", },
1639 { /* to be set */ },
1640 { },
1641 };
1642 1629
1643 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL); 1630 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
1644 if (!t) 1631 if (!t)
@@ -1650,27 +1637,15 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
1650 t->devinet_vars[i].extra2 = net; 1637 t->devinet_vars[i].extra2 = net;
1651 } 1638 }
1652 1639
1653 /* 1640 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
1654 * Make a copy of dev_name, because '.procname' is regarded as const
1655 * by sysctl and we wouldn't want anyone to change it under our feet
1656 * (see SIOCSIFNAME).
1657 */
1658 t->dev_name = kstrdup(dev_name, GFP_KERNEL);
1659 if (!t->dev_name)
1660 goto free;
1661
1662 devinet_ctl_path[DEVINET_CTL_PATH_DEV].procname = t->dev_name;
1663 1641
1664 t->sysctl_header = register_net_sysctl_table(net, devinet_ctl_path, 1642 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
1665 t->devinet_vars);
1666 if (!t->sysctl_header) 1643 if (!t->sysctl_header)
1667 goto free_procname; 1644 goto free;
1668 1645
1669 p->sysctl = t; 1646 p->sysctl = t;
1670 return 0; 1647 return 0;
1671 1648
1672free_procname:
1673 kfree(t->dev_name);
1674free: 1649free:
1675 kfree(t); 1650 kfree(t);
1676out: 1651out:
@@ -1686,7 +1661,6 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
1686 1661
1687 cnf->sysctl = NULL; 1662 cnf->sysctl = NULL;
1688 unregister_net_sysctl_table(t->sysctl_header); 1663 unregister_net_sysctl_table(t->sysctl_header);
1689 kfree(t->dev_name);
1690 kfree(t); 1664 kfree(t);
1691} 1665}
1692 1666
@@ -1716,12 +1690,6 @@ static struct ctl_table ctl_forward_entry[] = {
1716 }, 1690 },
1717 { }, 1691 { },
1718}; 1692};
1719
1720static __net_initdata struct ctl_path net_ipv4_path[] = {
1721 { .procname = "net", },
1722 { .procname = "ipv4", },
1723 { },
1724};
1725#endif 1693#endif
1726 1694
1727static __net_init int devinet_init_net(struct net *net) 1695static __net_init int devinet_init_net(struct net *net)
@@ -1767,7 +1735,7 @@ static __net_init int devinet_init_net(struct net *net)
1767 goto err_reg_dflt; 1735 goto err_reg_dflt;
1768 1736
1769 err = -ENOMEM; 1737 err = -ENOMEM;
1770 forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl); 1738 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
1771 if (forw_hdr == NULL) 1739 if (forw_hdr == NULL)
1772 goto err_reg_ctl; 1740 goto err_reg_ctl;
1773 net->ipv4.forw_hdr = forw_hdr; 1741 net->ipv4.forw_hdr = forw_hdr;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index a5b413416da3..cb982a61536f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) "IPsec: " fmt
2
1#include <crypto/aead.h> 3#include <crypto/aead.h>
2#include <crypto/authenc.h> 4#include <crypto/authenc.h>
3#include <linux/err.h> 5#include <linux/err.h>
@@ -457,28 +459,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
457 struct esp_data *esp = x->data; 459 struct esp_data *esp = x->data;
458 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); 460 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
459 u32 align = max_t(u32, blksize, esp->padlen); 461 u32 align = max_t(u32, blksize, esp->padlen);
460 u32 rem; 462 unsigned int net_adj;
461
462 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
463 rem = mtu & (align - 1);
464 mtu &= ~(align - 1);
465 463
466 switch (x->props.mode) { 464 switch (x->props.mode) {
467 case XFRM_MODE_TUNNEL:
468 break;
469 default:
470 case XFRM_MODE_TRANSPORT: 465 case XFRM_MODE_TRANSPORT:
471 /* The worst case */
472 mtu -= blksize - 4;
473 mtu += min_t(u32, blksize - 4, rem);
474 break;
475 case XFRM_MODE_BEET: 466 case XFRM_MODE_BEET:
476 /* The worst case. */ 467 net_adj = sizeof(struct iphdr);
477 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem); 468 break;
469 case XFRM_MODE_TUNNEL:
470 net_adj = 0;
478 break; 471 break;
472 default:
473 BUG();
479 } 474 }
480 475
481 return mtu - 2; 476 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
477 net_adj) & ~(align - 1)) + (net_adj - 2);
482} 478}
483 479
484static void esp4_err(struct sk_buff *skb, u32 info) 480static void esp4_err(struct sk_buff *skb, u32 info)
@@ -706,11 +702,11 @@ static const struct net_protocol esp4_protocol = {
706static int __init esp4_init(void) 702static int __init esp4_init(void)
707{ 703{
708 if (xfrm_register_type(&esp_type, AF_INET) < 0) { 704 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
709 printk(KERN_INFO "ip esp init: can't add xfrm type\n"); 705 pr_info("%s: can't add xfrm type\n", __func__);
710 return -EAGAIN; 706 return -EAGAIN;
711 } 707 }
712 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) { 708 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
713 printk(KERN_INFO "ip esp init: can't add protocol\n"); 709 pr_info("%s: can't add protocol\n", __func__);
714 xfrm_unregister_type(&esp_type, AF_INET); 710 xfrm_unregister_type(&esp_type, AF_INET);
715 return -EAGAIN; 711 return -EAGAIN;
716 } 712 }
@@ -720,9 +716,9 @@ static int __init esp4_init(void)
720static void __exit esp4_fini(void) 716static void __exit esp4_fini(void)
721{ 717{
722 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0) 718 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
723 printk(KERN_INFO "ip esp close: can't remove protocol\n"); 719 pr_info("%s: can't remove protocol\n", __func__);
724 if (xfrm_unregister_type(&esp_type, AF_INET) < 0) 720 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
725 printk(KERN_INFO "ip esp close: can't remove xfrm type\n"); 721 pr_info("%s: can't remove xfrm type\n", __func__);
726} 722}
727 723
728module_init(esp4_init); 724module_init(esp4_init);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 92fc5f69f5da..3854411fa37c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <asm/system.h>
19#include <linux/bitops.h> 18#include <linux/bitops.h>
20#include <linux/capability.h> 19#include <linux/capability.h>
21#include <linux/types.h> 20#include <linux/types.h>
@@ -137,13 +136,13 @@ static void fib_flush(struct net *net)
137 * Find address type as if only "dev" was present in the system. If 136 * Find address type as if only "dev" was present in the system. If
138 * on_dev is NULL then all interfaces are taken into consideration. 137 * on_dev is NULL then all interfaces are taken into consideration.
139 */ 138 */
140static inline unsigned __inet_dev_addr_type(struct net *net, 139static inline unsigned int __inet_dev_addr_type(struct net *net,
141 const struct net_device *dev, 140 const struct net_device *dev,
142 __be32 addr) 141 __be32 addr)
143{ 142{
144 struct flowi4 fl4 = { .daddr = addr }; 143 struct flowi4 fl4 = { .daddr = addr };
145 struct fib_result res; 144 struct fib_result res;
146 unsigned ret = RTN_BROADCAST; 145 unsigned int ret = RTN_BROADCAST;
147 struct fib_table *local_table; 146 struct fib_table *local_table;
148 147
149 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) 148 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
@@ -695,7 +694,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
695 if (ifa->ifa_flags & IFA_F_SECONDARY) { 694 if (ifa->ifa_flags & IFA_F_SECONDARY) {
696 prim = inet_ifa_byprefix(in_dev, prefix, mask); 695 prim = inet_ifa_byprefix(in_dev, prefix, mask);
697 if (prim == NULL) { 696 if (prim == NULL) {
698 printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n"); 697 pr_warn("%s: bug: prim == NULL\n", __func__);
699 return; 698 return;
700 } 699 }
701 } 700 }
@@ -741,7 +740,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
741#define BRD_OK 2 740#define BRD_OK 2
742#define BRD0_OK 4 741#define BRD0_OK 4
743#define BRD1_OK 8 742#define BRD1_OK 8
744 unsigned ok = 0; 743 unsigned int ok = 0;
745 int subnet = 0; /* Primary network */ 744 int subnet = 0; /* Primary network */
746 int gone = 1; /* Address is missing */ 745 int gone = 1; /* Address is missing */
747 int same_prefsrc = 0; /* Another primary with same IP */ 746 int same_prefsrc = 0; /* Another primary with same IP */
@@ -749,11 +748,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
749 if (ifa->ifa_flags & IFA_F_SECONDARY) { 748 if (ifa->ifa_flags & IFA_F_SECONDARY) {
750 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 749 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
751 if (prim == NULL) { 750 if (prim == NULL) {
752 printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n"); 751 pr_warn("%s: bug: prim == NULL\n", __func__);
753 return; 752 return;
754 } 753 }
755 if (iprim && iprim != prim) { 754 if (iprim && iprim != prim) {
756 printk(KERN_WARNING "fib_del_ifaddr: bug: iprim != prim\n"); 755 pr_warn("%s: bug: iprim != prim\n", __func__);
757 return; 756 return;
758 } 757 }
759 } else if (!ipv4_is_zeronet(any) && 758 } else if (!ipv4_is_zeronet(any) &&
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 799fc790b3cf..2d043f71ef70 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -221,15 +221,15 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
221 frh->src_len = rule4->src_len; 221 frh->src_len = rule4->src_len;
222 frh->tos = rule4->tos; 222 frh->tos = rule4->tos;
223 223
224 if (rule4->dst_len) 224 if ((rule4->dst_len &&
225 NLA_PUT_BE32(skb, FRA_DST, rule4->dst); 225 nla_put_be32(skb, FRA_DST, rule4->dst)) ||
226 226 (rule4->src_len &&
227 if (rule4->src_len) 227 nla_put_be32(skb, FRA_SRC, rule4->src)))
228 NLA_PUT_BE32(skb, FRA_SRC, rule4->src); 228 goto nla_put_failure;
229
230#ifdef CONFIG_IP_ROUTE_CLASSID 229#ifdef CONFIG_IP_ROUTE_CLASSID
231 if (rule4->tclassid) 230 if (rule4->tclassid &&
232 NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid); 231 nla_put_u32(skb, FRA_FLOW, rule4->tclassid))
232 goto nla_put_failure;
233#endif 233#endif
234 return 0; 234 return 0;
235 235
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 80106d89d548..e5b7182fa099 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -14,7 +14,6 @@
14 */ 14 */
15 15
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/system.h>
18#include <linux/bitops.h> 17#include <linux/bitops.h>
19#include <linux/types.h> 18#include <linux/types.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -146,6 +145,12 @@ static void free_fib_info_rcu(struct rcu_head *head)
146{ 145{
147 struct fib_info *fi = container_of(head, struct fib_info, rcu); 146 struct fib_info *fi = container_of(head, struct fib_info, rcu);
148 147
148 change_nexthops(fi) {
149 if (nexthop_nh->nh_dev)
150 dev_put(nexthop_nh->nh_dev);
151 } endfor_nexthops(fi);
152
153 release_net(fi->fib_net);
149 if (fi->fib_metrics != (u32 *) dst_default_metrics) 154 if (fi->fib_metrics != (u32 *) dst_default_metrics)
150 kfree(fi->fib_metrics); 155 kfree(fi->fib_metrics);
151 kfree(fi); 156 kfree(fi);
@@ -154,16 +159,10 @@ static void free_fib_info_rcu(struct rcu_head *head)
154void free_fib_info(struct fib_info *fi) 159void free_fib_info(struct fib_info *fi)
155{ 160{
156 if (fi->fib_dead == 0) { 161 if (fi->fib_dead == 0) {
157 pr_warning("Freeing alive fib_info %p\n", fi); 162 pr_warn("Freeing alive fib_info %p\n", fi);
158 return; 163 return;
159 } 164 }
160 change_nexthops(fi) {
161 if (nexthop_nh->nh_dev)
162 dev_put(nexthop_nh->nh_dev);
163 nexthop_nh->nh_dev = NULL;
164 } endfor_nexthops(fi);
165 fib_info_cnt--; 165 fib_info_cnt--;
166 release_net(fi->fib_net);
167 call_rcu(&fi->rcu, free_fib_info_rcu); 166 call_rcu(&fi->rcu, free_fib_info_rcu);
168} 167}
169 168
@@ -932,33 +931,36 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
932 rtm->rtm_table = tb_id; 931 rtm->rtm_table = tb_id;
933 else 932 else
934 rtm->rtm_table = RT_TABLE_COMPAT; 933 rtm->rtm_table = RT_TABLE_COMPAT;
935 NLA_PUT_U32(skb, RTA_TABLE, tb_id); 934 if (nla_put_u32(skb, RTA_TABLE, tb_id))
935 goto nla_put_failure;
936 rtm->rtm_type = type; 936 rtm->rtm_type = type;
937 rtm->rtm_flags = fi->fib_flags; 937 rtm->rtm_flags = fi->fib_flags;
938 rtm->rtm_scope = fi->fib_scope; 938 rtm->rtm_scope = fi->fib_scope;
939 rtm->rtm_protocol = fi->fib_protocol; 939 rtm->rtm_protocol = fi->fib_protocol;
940 940
941 if (rtm->rtm_dst_len) 941 if (rtm->rtm_dst_len &&
942 NLA_PUT_BE32(skb, RTA_DST, dst); 942 nla_put_be32(skb, RTA_DST, dst))
943 943 goto nla_put_failure;
944 if (fi->fib_priority) 944 if (fi->fib_priority &&
945 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); 945 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
946 946 goto nla_put_failure;
947 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 947 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
948 goto nla_put_failure; 948 goto nla_put_failure;
949 949
950 if (fi->fib_prefsrc) 950 if (fi->fib_prefsrc &&
951 NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); 951 nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
952 952 goto nla_put_failure;
953 if (fi->fib_nhs == 1) { 953 if (fi->fib_nhs == 1) {
954 if (fi->fib_nh->nh_gw) 954 if (fi->fib_nh->nh_gw &&
955 NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); 955 nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
956 956 goto nla_put_failure;
957 if (fi->fib_nh->nh_oif) 957 if (fi->fib_nh->nh_oif &&
958 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); 958 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
959 goto nla_put_failure;
959#ifdef CONFIG_IP_ROUTE_CLASSID 960#ifdef CONFIG_IP_ROUTE_CLASSID
960 if (fi->fib_nh[0].nh_tclassid) 961 if (fi->fib_nh[0].nh_tclassid &&
961 NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); 962 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
963 goto nla_put_failure;
962#endif 964#endif
963 } 965 }
964#ifdef CONFIG_IP_ROUTE_MULTIPATH 966#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -979,11 +981,13 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
979 rtnh->rtnh_hops = nh->nh_weight - 1; 981 rtnh->rtnh_hops = nh->nh_weight - 1;
980 rtnh->rtnh_ifindex = nh->nh_oif; 982 rtnh->rtnh_ifindex = nh->nh_oif;
981 983
982 if (nh->nh_gw) 984 if (nh->nh_gw &&
983 NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); 985 nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
986 goto nla_put_failure;
984#ifdef CONFIG_IP_ROUTE_CLASSID 987#ifdef CONFIG_IP_ROUTE_CLASSID
985 if (nh->nh_tclassid) 988 if (nh->nh_tclassid &&
986 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); 989 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
990 goto nla_put_failure;
987#endif 991#endif
988 /* length of rtnetlink header + attributes */ 992 /* length of rtnetlink header + attributes */
989 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 993 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2b555a5521e0..30b88d7b4bd6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -51,7 +51,6 @@
51#define VERSION "0.409" 51#define VERSION "0.409"
52 52
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
54#include <asm/system.h>
55#include <linux/bitops.h> 54#include <linux/bitops.h>
56#include <linux/types.h> 55#include <linux/types.h>
57#include <linux/kernel.h> 56#include <linux/kernel.h>
@@ -1170,9 +1169,8 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1170 } 1169 }
1171 1170
1172 if (tp && tp->pos + tp->bits > 32) 1171 if (tp && tp->pos + tp->bits > 32)
1173 pr_warning("fib_trie" 1172 pr_warn("fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1174 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n", 1173 tp, tp->pos, tp->bits, key, plen);
1175 tp, tp->pos, tp->bits, key, plen);
1176 1174
1177 /* Rebalance the trie */ 1175 /* Rebalance the trie */
1178 1176
@@ -1372,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1372 1370
1373 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1371 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1374 continue; 1372 continue;
1373 if (fi->fib_dead)
1374 continue;
1375 if (fa->fa_info->fib_scope < flp->flowi4_scope) 1375 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1376 continue; 1376 continue;
1377 fib_alias_accessed(fa); 1377 fib_alias_accessed(fa);
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index 8cb1ebb7cd74..42a491055c76 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/kmod.h> 17#include <linux/kmod.h>
@@ -118,10 +120,10 @@ static const struct net_protocol net_gre_protocol = {
118 120
119static int __init gre_init(void) 121static int __init gre_init(void)
120{ 122{
121 pr_info("GRE over IPv4 demultiplexor driver"); 123 pr_info("GRE over IPv4 demultiplexor driver\n");
122 124
123 if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { 125 if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
124 pr_err("gre: can't add protocol\n"); 126 pr_err("can't add protocol\n");
125 return -EAGAIN; 127 return -EAGAIN;
126 } 128 }
127 129
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ab188ae12fd9..c75efbdc71cb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -62,6 +62,8 @@
62 * 62 *
63 */ 63 */
64 64
65#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66
65#include <linux/module.h> 67#include <linux/module.h>
66#include <linux/types.h> 68#include <linux/types.h>
67#include <linux/jiffies.h> 69#include <linux/jiffies.h>
@@ -89,7 +91,6 @@
89#include <linux/errno.h> 91#include <linux/errno.h>
90#include <linux/timer.h> 92#include <linux/timer.h>
91#include <linux/init.h> 93#include <linux/init.h>
92#include <asm/system.h>
93#include <asm/uaccess.h> 94#include <asm/uaccess.h>
94#include <net/checksum.h> 95#include <net/checksum.h>
95#include <net/xfrm.h> 96#include <net/xfrm.h>
@@ -670,7 +671,7 @@ static void icmp_unreach(struct sk_buff *skb)
670 break; 671 break;
671 case ICMP_FRAG_NEEDED: 672 case ICMP_FRAG_NEEDED:
672 if (ipv4_config.no_pmtu_disc) { 673 if (ipv4_config.no_pmtu_disc) {
673 LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n", 674 LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
674 &iph->daddr); 675 &iph->daddr);
675 } else { 676 } else {
676 info = ip_rt_frag_needed(net, iph, 677 info = ip_rt_frag_needed(net, iph,
@@ -681,7 +682,7 @@ static void icmp_unreach(struct sk_buff *skb)
681 } 682 }
682 break; 683 break;
683 case ICMP_SR_FAILED: 684 case ICMP_SR_FAILED:
684 LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n", 685 LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: Source Route Failed\n"),
685 &iph->daddr); 686 &iph->daddr);
686 break; 687 break;
687 default: 688 default:
@@ -712,14 +713,10 @@ static void icmp_unreach(struct sk_buff *skb)
712 713
713 if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && 714 if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
714 inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { 715 inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
715 if (net_ratelimit()) 716 net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
716 printk(KERN_WARNING "%pI4 sent an invalid ICMP " 717 &ip_hdr(skb)->saddr,
717 "type %u, code %u " 718 icmph->type, icmph->code,
718 "error to a broadcast: %pI4 on %s\n", 719 &iph->daddr, skb->dev->name);
719 &ip_hdr(skb)->saddr,
720 icmph->type, icmph->code,
721 &iph->daddr,
722 skb->dev->name);
723 goto out; 720 goto out;
724 } 721 }
725 722
@@ -908,8 +905,7 @@ out_err:
908static void icmp_address(struct sk_buff *skb) 905static void icmp_address(struct sk_buff *skb)
909{ 906{
910#if 0 907#if 0
911 if (net_ratelimit()) 908 net_dbg_ratelimited("a guy asks for address mask. Who is it?\n");
912 printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
913#endif 909#endif
914} 910}
915 911
@@ -945,10 +941,10 @@ static void icmp_address_reply(struct sk_buff *skb)
945 inet_ifa_match(ip_hdr(skb)->saddr, ifa)) 941 inet_ifa_match(ip_hdr(skb)->saddr, ifa))
946 break; 942 break;
947 } 943 }
948 if (!ifa && net_ratelimit()) { 944 if (!ifa)
949 printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", 945 net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n",
950 mp, dev->name, &ip_hdr(skb)->saddr); 946 mp,
951 } 947 dev->name, &ip_hdr(skb)->saddr);
952 } 948 }
953} 949}
954 950
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 450e5d21ed2a..6699f23e6f55 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -73,7 +73,6 @@
73#include <linux/module.h> 73#include <linux/module.h>
74#include <linux/slab.h> 74#include <linux/slab.h>
75#include <asm/uaccess.h> 75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/types.h> 76#include <linux/types.h>
78#include <linux/kernel.h> 77#include <linux/kernel.h>
79#include <linux/jiffies.h> 78#include <linux/jiffies.h>
@@ -345,10 +344,10 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
345 pip->protocol = IPPROTO_IGMP; 344 pip->protocol = IPPROTO_IGMP;
346 pip->tot_len = 0; /* filled in later */ 345 pip->tot_len = 0; /* filled in later */
347 ip_select_ident(pip, &rt->dst, NULL); 346 ip_select_ident(pip, &rt->dst, NULL);
348 ((u8*)&pip[1])[0] = IPOPT_RA; 347 ((u8 *)&pip[1])[0] = IPOPT_RA;
349 ((u8*)&pip[1])[1] = 4; 348 ((u8 *)&pip[1])[1] = 4;
350 ((u8*)&pip[1])[2] = 0; 349 ((u8 *)&pip[1])[2] = 0;
351 ((u8*)&pip[1])[3] = 0; 350 ((u8 *)&pip[1])[3] = 0;
352 351
353 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; 352 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
354 skb_put(skb, sizeof(*pig)); 353 skb_put(skb, sizeof(*pig));
@@ -689,10 +688,10 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
689 iph->saddr = fl4.saddr; 688 iph->saddr = fl4.saddr;
690 iph->protocol = IPPROTO_IGMP; 689 iph->protocol = IPPROTO_IGMP;
691 ip_select_ident(iph, &rt->dst, NULL); 690 ip_select_ident(iph, &rt->dst, NULL);
692 ((u8*)&iph[1])[0] = IPOPT_RA; 691 ((u8 *)&iph[1])[0] = IPOPT_RA;
693 ((u8*)&iph[1])[1] = 4; 692 ((u8 *)&iph[1])[1] = 4;
694 ((u8*)&iph[1])[2] = 0; 693 ((u8 *)&iph[1])[2] = 0;
695 ((u8*)&iph[1])[3] = 0; 694 ((u8 *)&iph[1])[3] = 0;
696 695
697 ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); 696 ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
698 ih->type = type; 697 ih->type = type;
@@ -775,7 +774,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
775 if (psf->sf_count[MCAST_INCLUDE] || 774 if (psf->sf_count[MCAST_INCLUDE] ||
776 pmc->sfcount[MCAST_EXCLUDE] != 775 pmc->sfcount[MCAST_EXCLUDE] !=
777 psf->sf_count[MCAST_EXCLUDE]) 776 psf->sf_count[MCAST_EXCLUDE])
778 continue; 777 break;
779 if (srcs[i] == psf->sf_inaddr) { 778 if (srcs[i] == psf->sf_inaddr) {
780 scount++; 779 scount++;
781 break; 780 break;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 19d66cefd7d3..f9ee7417f6a0 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -42,7 +42,8 @@ EXPORT_SYMBOL(sysctl_local_reserved_ports);
42 42
43void inet_get_local_port_range(int *low, int *high) 43void inet_get_local_port_range(int *low, int *high)
44{ 44{
45 unsigned seq; 45 unsigned int seq;
46
46 do { 47 do {
47 seq = read_seqbegin(&sysctl_local_ports.lock); 48 seq = read_seqbegin(&sysctl_local_ports.lock);
48 49
@@ -53,7 +54,7 @@ void inet_get_local_port_range(int *low, int *high)
53EXPORT_SYMBOL(inet_get_local_port_range); 54EXPORT_SYMBOL(inet_get_local_port_range);
54 55
55int inet_csk_bind_conflict(const struct sock *sk, 56int inet_csk_bind_conflict(const struct sock *sk,
56 const struct inet_bind_bucket *tb) 57 const struct inet_bind_bucket *tb, bool relax)
57{ 58{
58 struct sock *sk2; 59 struct sock *sk2;
59 struct hlist_node *node; 60 struct hlist_node *node;
@@ -79,6 +80,14 @@ int inet_csk_bind_conflict(const struct sock *sk,
79 sk2_rcv_saddr == sk_rcv_saddr(sk)) 80 sk2_rcv_saddr == sk_rcv_saddr(sk))
80 break; 81 break;
81 } 82 }
83 if (!relax && reuse && sk2->sk_reuse &&
84 sk2->sk_state != TCP_LISTEN) {
85 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
86
87 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
88 sk2_rcv_saddr == sk_rcv_saddr(sk))
89 break;
90 }
82 } 91 }
83 } 92 }
84 return node != NULL; 93 return node != NULL;
@@ -122,12 +131,13 @@ again:
122 (tb->num_owners < smallest_size || smallest_size == -1)) { 131 (tb->num_owners < smallest_size || smallest_size == -1)) {
123 smallest_size = tb->num_owners; 132 smallest_size = tb->num_owners;
124 smallest_rover = rover; 133 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 134 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
135 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
126 snum = smallest_rover; 136 snum = smallest_rover;
127 goto tb_found; 137 goto tb_found;
128 } 138 }
129 } 139 }
130 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { 140 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
131 snum = rover; 141 snum = rover;
132 goto tb_found; 142 goto tb_found;
133 } 143 }
@@ -172,18 +182,22 @@ have_snum:
172 goto tb_not_found; 182 goto tb_not_found;
173tb_found: 183tb_found:
174 if (!hlist_empty(&tb->owners)) { 184 if (!hlist_empty(&tb->owners)) {
185 if (sk->sk_reuse == SK_FORCE_REUSE)
186 goto success;
187
175 if (tb->fastreuse > 0 && 188 if (tb->fastreuse > 0 &&
176 sk->sk_reuse && sk->sk_state != TCP_LISTEN && 189 sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
177 smallest_size == -1) { 190 smallest_size == -1) {
178 goto success; 191 goto success;
179 } else { 192 } else {
180 ret = 1; 193 ret = 1;
181 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { 194 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
182 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && 195 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
183 smallest_size != -1 && --attempts >= 0) { 196 smallest_size != -1 && --attempts >= 0) {
184 spin_unlock(&head->lock); 197 spin_unlock(&head->lock);
185 goto again; 198 goto again;
186 } 199 }
200
187 goto fail_unlock; 201 goto fail_unlock;
188 } 202 }
189 } 203 }
@@ -363,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
363 377
364 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
365 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
366 sk->sk_protocol, inet_sk_flowi_flags(sk), 380 sk->sk_protocol,
381 inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
367 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 382 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
368 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 383 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
369 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 384 security_req_classify_flow(req, flowi4_to_flowi(fl4));
@@ -514,7 +529,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
514 529
515 /* Normally all the openreqs are young and become mature 530 /* Normally all the openreqs are young and become mature
516 * (i.e. converted to established socket) for first timeout. 531 * (i.e. converted to established socket) for first timeout.
517 * If synack was not acknowledged for 3 seconds, it means 532 * If synack was not acknowledged for 1 second, it means
518 * one of the following things: synack was lost, ack was lost, 533 * one of the following things: synack was lost, ack was lost,
519 * rtt is high or nobody planned to ack (i.e. synflood). 534 * rtt is high or nobody planned to ack (i.e. synflood).
520 * When server is a bit loaded, queue is populated with old 535 * When server is a bit loaded, queue is populated with old
@@ -555,8 +570,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
555 syn_ack_recalc(req, thresh, max_retries, 570 syn_ack_recalc(req, thresh, max_retries,
556 queue->rskq_defer_accept, 571 queue->rskq_defer_accept,
557 &expire, &resend); 572 &expire, &resend);
558 if (req->rsk_ops->syn_ack_timeout) 573 req->rsk_ops->syn_ack_timeout(parent, req);
559 req->rsk_ops->syn_ack_timeout(parent, req);
560 if (!expire && 574 if (!expire &&
561 (!resend || 575 (!resend ||
562 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || 576 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index fcf281819cd4..46d1e7199a8c 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
141 goto rtattr_failure; 141 goto rtattr_failure;
142 142
143 if (icsk == NULL) { 143 if (icsk == NULL) {
144 r->idiag_rqueue = r->idiag_wqueue = 0; 144 handler->idiag_get_info(sk, r, NULL);
145 goto out; 145 goto out;
146 } 146 }
147 147
@@ -960,9 +960,12 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
960 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 960 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
961 return -EINVAL; 961 return -EINVAL;
962 } 962 }
963 963 {
964 return netlink_dump_start(sock_diag_nlsk, skb, nlh, 964 struct netlink_dump_control c = {
965 inet_diag_dump_compat, NULL, 0); 965 .dump = inet_diag_dump_compat,
966 };
967 return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c);
968 }
966 } 969 }
967 970
968 return inet_diag_get_exact_compat(skb, nlh); 971 return inet_diag_get_exact_compat(skb, nlh);
@@ -985,20 +988,23 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
985 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 988 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
986 return -EINVAL; 989 return -EINVAL;
987 } 990 }
988 991 {
989 return netlink_dump_start(sock_diag_nlsk, skb, h, 992 struct netlink_dump_control c = {
990 inet_diag_dump, NULL, 0); 993 .dump = inet_diag_dump,
994 };
995 return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
996 }
991 } 997 }
992 998
993 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); 999 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
994} 1000}
995 1001
996static struct sock_diag_handler inet_diag_handler = { 1002static const struct sock_diag_handler inet_diag_handler = {
997 .family = AF_INET, 1003 .family = AF_INET,
998 .dump = inet_diag_handler_dump, 1004 .dump = inet_diag_handler_dump,
999}; 1005};
1000 1006
1001static struct sock_diag_handler inet6_diag_handler = { 1007static const struct sock_diag_handler inet6_diag_handler = {
1002 .family = AF_INET6, 1008 .family = AF_INET6,
1003 .dump = inet_diag_handler_dump, 1009 .dump = inet_diag_handler_dump,
1004}; 1010};
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 984ec656b03b..7880af970208 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -217,7 +217,7 @@ begin:
217} 217}
218EXPORT_SYMBOL_GPL(__inet_lookup_listener); 218EXPORT_SYMBOL_GPL(__inet_lookup_listener);
219 219
220struct sock * __inet_lookup_established(struct net *net, 220struct sock *__inet_lookup_established(struct net *net,
221 struct inet_hashinfo *hashinfo, 221 struct inet_hashinfo *hashinfo,
222 const __be32 saddr, const __be16 sport, 222 const __be32 saddr, const __be16 sport,
223 const __be32 daddr, const u16 hnum, 223 const __be32 daddr, const u16 hnum,
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 89168c6351ff..2784db3155fb 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -89,8 +89,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
89 89
90#ifdef SOCK_REFCNT_DEBUG 90#ifdef SOCK_REFCNT_DEBUG
91 if (atomic_read(&tw->tw_refcnt) != 1) { 91 if (atomic_read(&tw->tw_refcnt) != 1) {
92 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", 92 pr_debug("%s timewait_sock %p refcnt=%d\n",
93 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); 93 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
94 } 94 }
95#endif 95#endif
96 while (refcnt) { 96 while (refcnt) {
@@ -263,7 +263,7 @@ rescan:
263void inet_twdr_hangman(unsigned long data) 263void inet_twdr_hangman(unsigned long data)
264{ 264{
265 struct inet_timewait_death_row *twdr; 265 struct inet_timewait_death_row *twdr;
266 int unsigned need_timer; 266 unsigned int need_timer;
267 267
268 twdr = (struct inet_timewait_death_row *)data; 268 twdr = (struct inet_timewait_death_row *)data;
269 spin_lock(&twdr->death_lock); 269 spin_lock(&twdr->death_lock);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 29a07b6c7168..e5c44fc586ab 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -41,7 +41,7 @@
41 41
42static int ip_forward_finish(struct sk_buff *skb) 42static int ip_forward_finish(struct sk_buff *skb)
43{ 43{
44 struct ip_options * opt = &(IPCB(skb)->opt); 44 struct ip_options *opt = &(IPCB(skb)->opt);
45 45
46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
47 47
@@ -55,7 +55,7 @@ int ip_forward(struct sk_buff *skb)
55{ 55{
56 struct iphdr *iph; /* Our header */ 56 struct iphdr *iph; /* Our header */
57 struct rtable *rt; /* Route we use */ 57 struct rtable *rt; /* Route we use */
58 struct ip_options * opt = &(IPCB(skb)->opt); 58 struct ip_options *opt = &(IPCB(skb)->opt);
59 59
60 if (skb_warn_if_lro(skb)) 60 if (skb_warn_if_lro(skb))
61 goto drop; 61 goto drop;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 1f23a57aa9e6..9dbd3dd6022d 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -20,6 +20,8 @@
20 * Patrick McHardy : LRU queue of frag heads for evictor. 20 * Patrick McHardy : LRU queue of frag heads for evictor.
21 */ 21 */
22 22
23#define pr_fmt(fmt) "IPv4: " fmt
24
23#include <linux/compiler.h> 25#include <linux/compiler.h>
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/types.h> 27#include <linux/types.h>
@@ -146,17 +148,17 @@ static unsigned int ip4_hashfn(struct inet_frag_queue *q)
146 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); 148 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
147} 149}
148 150
149static int ip4_frag_match(struct inet_frag_queue *q, void *a) 151static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
150{ 152{
151 struct ipq *qp; 153 struct ipq *qp;
152 struct ip4_create_arg *arg = a; 154 struct ip4_create_arg *arg = a;
153 155
154 qp = container_of(q, struct ipq, q); 156 qp = container_of(q, struct ipq, q);
155 return qp->id == arg->iph->id && 157 return qp->id == arg->iph->id &&
156 qp->saddr == arg->iph->saddr && 158 qp->saddr == arg->iph->saddr &&
157 qp->daddr == arg->iph->daddr && 159 qp->daddr == arg->iph->daddr &&
158 qp->protocol == arg->iph->protocol && 160 qp->protocol == arg->iph->protocol &&
159 qp->user == arg->user; 161 qp->user == arg->user;
160} 162}
161 163
162/* Memory Tracking Functions. */ 164/* Memory Tracking Functions. */
@@ -299,7 +301,7 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
299 return container_of(q, struct ipq, q); 301 return container_of(q, struct ipq, q);
300 302
301out_nomem: 303out_nomem:
302 LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n"); 304 LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
303 return NULL; 305 return NULL;
304} 306}
305 307
@@ -543,6 +545,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
543 int len; 545 int len;
544 int ihlen; 546 int ihlen;
545 int err; 547 int err;
548 int sum_truesize;
546 u8 ecn; 549 u8 ecn;
547 550
548 ipq_kill(qp); 551 ipq_kill(qp);
@@ -567,7 +570,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
567 skb_morph(head, qp->q.fragments); 570 skb_morph(head, qp->q.fragments);
568 head->next = qp->q.fragments->next; 571 head->next = qp->q.fragments->next;
569 572
570 kfree_skb(qp->q.fragments); 573 consume_skb(qp->q.fragments);
571 qp->q.fragments = head; 574 qp->q.fragments = head;
572 } 575 }
573 576
@@ -609,19 +612,32 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
609 atomic_add(clone->truesize, &qp->q.net->mem); 612 atomic_add(clone->truesize, &qp->q.net->mem);
610 } 613 }
611 614
612 skb_shinfo(head)->frag_list = head->next;
613 skb_push(head, head->data - skb_network_header(head)); 615 skb_push(head, head->data - skb_network_header(head));
614 616
615 for (fp=head->next; fp; fp = fp->next) { 617 sum_truesize = head->truesize;
616 head->data_len += fp->len; 618 for (fp = head->next; fp;) {
617 head->len += fp->len; 619 bool headstolen;
620 int delta;
621 struct sk_buff *next = fp->next;
622
623 sum_truesize += fp->truesize;
618 if (head->ip_summed != fp->ip_summed) 624 if (head->ip_summed != fp->ip_summed)
619 head->ip_summed = CHECKSUM_NONE; 625 head->ip_summed = CHECKSUM_NONE;
620 else if (head->ip_summed == CHECKSUM_COMPLETE) 626 else if (head->ip_summed == CHECKSUM_COMPLETE)
621 head->csum = csum_add(head->csum, fp->csum); 627 head->csum = csum_add(head->csum, fp->csum);
622 head->truesize += fp->truesize; 628
629 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
630 kfree_skb_partial(fp, headstolen);
631 } else {
632 if (!skb_shinfo(head)->frag_list)
633 skb_shinfo(head)->frag_list = fp;
634 head->data_len += fp->len;
635 head->len += fp->len;
636 head->truesize += fp->truesize;
637 }
638 fp = next;
623 } 639 }
624 atomic_sub(head->truesize, &qp->q.net->mem); 640 atomic_sub(sum_truesize, &qp->q.net->mem);
625 641
626 head->next = NULL; 642 head->next = NULL;
627 head->dev = dev; 643 head->dev = dev;
@@ -637,14 +653,12 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
637 return 0; 653 return 0;
638 654
639out_nomem: 655out_nomem:
640 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " 656 LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
641 "queue %p\n", qp); 657 qp);
642 err = -ENOMEM; 658 err = -ENOMEM;
643 goto out_fail; 659 goto out_fail;
644out_oversize: 660out_oversize:
645 if (net_ratelimit()) 661 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
646 printk(KERN_INFO "Oversized IP packet from %pI4.\n",
647 &qp->saddr);
648out_fail: 662out_fail:
649 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 663 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
650 return err; 664 return err;
@@ -781,7 +795,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
781 table[2].data = &net->ipv4.frags.timeout; 795 table[2].data = &net->ipv4.frags.timeout;
782 } 796 }
783 797
784 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); 798 hdr = register_net_sysctl(net, "net/ipv4", table);
785 if (hdr == NULL) 799 if (hdr == NULL)
786 goto err_reg; 800 goto err_reg;
787 801
@@ -806,7 +820,7 @@ static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
806 820
807static void ip4_frags_ctl_register(void) 821static void ip4_frags_ctl_register(void)
808{ 822{
809 register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); 823 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
810} 824}
811#else 825#else
812static inline int ip4_frags_ns_ctl_register(struct net *net) 826static inline int ip4_frags_ns_ctl_register(struct net *net)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 38673d2860e2..f49047b79609 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/capability.h> 15#include <linux/capability.h>
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/types.h> 17#include <linux/types.h>
@@ -167,37 +169,56 @@ struct ipgre_net {
167 169
168/* often modified stats are per cpu, other are shared (netdev->stats) */ 170/* often modified stats are per cpu, other are shared (netdev->stats) */
169struct pcpu_tstats { 171struct pcpu_tstats {
170 unsigned long rx_packets; 172 u64 rx_packets;
171 unsigned long rx_bytes; 173 u64 rx_bytes;
172 unsigned long tx_packets; 174 u64 tx_packets;
173 unsigned long tx_bytes; 175 u64 tx_bytes;
174} __attribute__((aligned(4*sizeof(unsigned long)))); 176 struct u64_stats_sync syncp;
177};
175 178
176static struct net_device_stats *ipgre_get_stats(struct net_device *dev) 179static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
180 struct rtnl_link_stats64 *tot)
177{ 181{
178 struct pcpu_tstats sum = { 0 };
179 int i; 182 int i;
180 183
181 for_each_possible_cpu(i) { 184 for_each_possible_cpu(i) {
182 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 185 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
183 186 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
184 sum.rx_packets += tstats->rx_packets; 187 unsigned int start;
185 sum.rx_bytes += tstats->rx_bytes; 188
186 sum.tx_packets += tstats->tx_packets; 189 do {
187 sum.tx_bytes += tstats->tx_bytes; 190 start = u64_stats_fetch_begin_bh(&tstats->syncp);
191 rx_packets = tstats->rx_packets;
192 tx_packets = tstats->tx_packets;
193 rx_bytes = tstats->rx_bytes;
194 tx_bytes = tstats->tx_bytes;
195 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
196
197 tot->rx_packets += rx_packets;
198 tot->tx_packets += tx_packets;
199 tot->rx_bytes += rx_bytes;
200 tot->tx_bytes += tx_bytes;
188 } 201 }
189 dev->stats.rx_packets = sum.rx_packets; 202
190 dev->stats.rx_bytes = sum.rx_bytes; 203 tot->multicast = dev->stats.multicast;
191 dev->stats.tx_packets = sum.tx_packets; 204 tot->rx_crc_errors = dev->stats.rx_crc_errors;
192 dev->stats.tx_bytes = sum.tx_bytes; 205 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
193 return &dev->stats; 206 tot->rx_length_errors = dev->stats.rx_length_errors;
207 tot->rx_errors = dev->stats.rx_errors;
208 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
209 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
210 tot->tx_dropped = dev->stats.tx_dropped;
211 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
212 tot->tx_errors = dev->stats.tx_errors;
213
214 return tot;
194} 215}
195 216
196/* Given src, dst and key, find appropriate for input tunnel. */ 217/* Given src, dst and key, find appropriate for input tunnel. */
197 218
198static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, 219static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
199 __be32 remote, __be32 local, 220 __be32 remote, __be32 local,
200 __be32 key, __be16 gre_proto) 221 __be32 key, __be16 gre_proto)
201{ 222{
202 struct net *net = dev_net(dev); 223 struct net *net = dev_net(dev);
203 int link = dev->ifindex; 224 int link = dev->ifindex;
@@ -462,7 +483,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
462 */ 483 */
463 484
464 const struct iphdr *iph = (const struct iphdr *)skb->data; 485 const struct iphdr *iph = (const struct iphdr *)skb->data;
465 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); 486 __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2));
466 int grehlen = (iph->ihl<<2) + 4; 487 int grehlen = (iph->ihl<<2) + 4;
467 const int type = icmp_hdr(skb)->type; 488 const int type = icmp_hdr(skb)->type;
468 const int code = icmp_hdr(skb)->code; 489 const int code = icmp_hdr(skb)->code;
@@ -572,7 +593,7 @@ static int ipgre_rcv(struct sk_buff *skb)
572 593
573 iph = ip_hdr(skb); 594 iph = ip_hdr(skb);
574 h = skb->data; 595 h = skb->data;
575 flags = *(__be16*)h; 596 flags = *(__be16 *)h;
576 597
577 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { 598 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
578 /* - Version must be 0. 599 /* - Version must be 0.
@@ -596,11 +617,11 @@ static int ipgre_rcv(struct sk_buff *skb)
596 offset += 4; 617 offset += 4;
597 } 618 }
598 if (flags&GRE_KEY) { 619 if (flags&GRE_KEY) {
599 key = *(__be32*)(h + offset); 620 key = *(__be32 *)(h + offset);
600 offset += 4; 621 offset += 4;
601 } 622 }
602 if (flags&GRE_SEQ) { 623 if (flags&GRE_SEQ) {
603 seqno = ntohl(*(__be32*)(h + offset)); 624 seqno = ntohl(*(__be32 *)(h + offset));
604 offset += 4; 625 offset += 4;
605 } 626 }
606 } 627 }
@@ -670,8 +691,10 @@ static int ipgre_rcv(struct sk_buff *skb)
670 } 691 }
671 692
672 tstats = this_cpu_ptr(tunnel->dev->tstats); 693 tstats = this_cpu_ptr(tunnel->dev->tstats);
694 u64_stats_update_begin(&tstats->syncp);
673 tstats->rx_packets++; 695 tstats->rx_packets++;
674 tstats->rx_bytes += skb->len; 696 tstats->rx_bytes += skb->len;
697 u64_stats_update_end(&tstats->syncp);
675 698
676 __skb_tunnel_rx(skb, tunnel->dev); 699 __skb_tunnel_rx(skb, tunnel->dev);
677 700
@@ -730,15 +753,16 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
730 753
731 if (skb->protocol == htons(ETH_P_IP)) { 754 if (skb->protocol == htons(ETH_P_IP)) {
732 rt = skb_rtable(skb); 755 rt = skb_rtable(skb);
733 if ((dst = rt->rt_gateway) == 0) 756 dst = rt->rt_gateway;
734 goto tx_error_icmp;
735 } 757 }
736#if IS_ENABLED(CONFIG_IPV6) 758#if IS_ENABLED(CONFIG_IPV6)
737 else if (skb->protocol == htons(ETH_P_IPV6)) { 759 else if (skb->protocol == htons(ETH_P_IPV6)) {
738 struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
739 const struct in6_addr *addr6; 760 const struct in6_addr *addr6;
761 struct neighbour *neigh;
762 bool do_tx_error_icmp;
740 int addr_type; 763 int addr_type;
741 764
765 neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
742 if (neigh == NULL) 766 if (neigh == NULL)
743 goto tx_error; 767 goto tx_error;
744 768
@@ -751,9 +775,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
751 } 775 }
752 776
753 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 777 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
778 do_tx_error_icmp = true;
779 else {
780 do_tx_error_icmp = false;
781 dst = addr6->s6_addr32[3];
782 }
783 neigh_release(neigh);
784 if (do_tx_error_icmp)
754 goto tx_error_icmp; 785 goto tx_error_icmp;
755
756 dst = addr6->s6_addr32[3];
757 } 786 }
758#endif 787#endif
759 else 788 else
@@ -892,7 +921,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
892 htons(ETH_P_TEB) : skb->protocol; 921 htons(ETH_P_TEB) : skb->protocol;
893 922
894 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { 923 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
895 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4); 924 __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
896 925
897 if (tunnel->parms.o_flags&GRE_SEQ) { 926 if (tunnel->parms.o_flags&GRE_SEQ) {
898 ++tunnel->o_seqno; 927 ++tunnel->o_seqno;
@@ -905,7 +934,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
905 } 934 }
906 if (tunnel->parms.o_flags&GRE_CSUM) { 935 if (tunnel->parms.o_flags&GRE_CSUM) {
907 *ptr = 0; 936 *ptr = 0;
908 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr)); 937 *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
909 } 938 }
910 } 939 }
911 940
@@ -914,9 +943,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
914 __IPTUNNEL_XMIT(tstats, &dev->stats); 943 __IPTUNNEL_XMIT(tstats, &dev->stats);
915 return NETDEV_TX_OK; 944 return NETDEV_TX_OK;
916 945
946#if IS_ENABLED(CONFIG_IPV6)
917tx_error_icmp: 947tx_error_icmp:
918 dst_link_failure(skb); 948 dst_link_failure(skb);
919 949#endif
920tx_error: 950tx_error:
921 dev->stats.tx_errors++; 951 dev->stats.tx_errors++;
922 dev_kfree_skb(skb); 952 dev_kfree_skb(skb);
@@ -1160,7 +1190,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1160{ 1190{
1161 struct ip_tunnel *t = netdev_priv(dev); 1191 struct ip_tunnel *t = netdev_priv(dev);
1162 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 1192 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1163 __be16 *p = (__be16*)(iph+1); 1193 __be16 *p = (__be16 *)(iph+1);
1164 1194
1165 memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); 1195 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1166 p[0] = t->parms.o_flags; 1196 p[0] = t->parms.o_flags;
@@ -1244,7 +1274,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
1244 .ndo_start_xmit = ipgre_tunnel_xmit, 1274 .ndo_start_xmit = ipgre_tunnel_xmit,
1245 .ndo_do_ioctl = ipgre_tunnel_ioctl, 1275 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1246 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1276 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1247 .ndo_get_stats = ipgre_get_stats, 1277 .ndo_get_stats64 = ipgre_get_stats64,
1248}; 1278};
1249 1279
1250static void ipgre_dev_free(struct net_device *dev) 1280static void ipgre_dev_free(struct net_device *dev)
@@ -1498,7 +1528,7 @@ static const struct net_device_ops ipgre_tap_netdev_ops = {
1498 .ndo_set_mac_address = eth_mac_addr, 1528 .ndo_set_mac_address = eth_mac_addr,
1499 .ndo_validate_addr = eth_validate_addr, 1529 .ndo_validate_addr = eth_validate_addr,
1500 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1530 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1501 .ndo_get_stats = ipgre_get_stats, 1531 .ndo_get_stats64 = ipgre_get_stats64,
1502}; 1532};
1503 1533
1504static void ipgre_tap_setup(struct net_device *dev) 1534static void ipgre_tap_setup(struct net_device *dev)
@@ -1529,7 +1559,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nla
1529 return -EEXIST; 1559 return -EEXIST;
1530 1560
1531 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1561 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1532 random_ether_addr(dev->dev_addr); 1562 eth_hw_addr_random(dev);
1533 1563
1534 mtu = ipgre_tunnel_bind_dev(dev); 1564 mtu = ipgre_tunnel_bind_dev(dev);
1535 if (!tb[IFLA_MTU]) 1565 if (!tb[IFLA_MTU])
@@ -1645,17 +1675,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1645 struct ip_tunnel *t = netdev_priv(dev); 1675 struct ip_tunnel *t = netdev_priv(dev);
1646 struct ip_tunnel_parm *p = &t->parms; 1676 struct ip_tunnel_parm *p = &t->parms;
1647 1677
1648 NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link); 1678 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1649 NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags); 1679 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1650 NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags); 1680 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1651 NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key); 1681 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1652 NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key); 1682 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1653 NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr); 1683 nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1654 NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr); 1684 nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1655 NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl); 1685 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1656 NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos); 1686 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1657 NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF))); 1687 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1658 1688 !!(p->iph.frag_off & htons(IP_DF))))
1689 goto nla_put_failure;
1659 return 0; 1690 return 0;
1660 1691
1661nla_put_failure: 1692nla_put_failure:
@@ -1709,7 +1740,7 @@ static int __init ipgre_init(void)
1709{ 1740{
1710 int err; 1741 int err;
1711 1742
1712 printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); 1743 pr_info("GRE over IPv4 tunneling driver\n");
1713 1744
1714 err = register_pernet_device(&ipgre_net_ops); 1745 err = register_pernet_device(&ipgre_net_ops);
1715 if (err < 0) 1746 if (err < 0)
@@ -1717,7 +1748,7 @@ static int __init ipgre_init(void)
1717 1748
1718 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); 1749 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1719 if (err < 0) { 1750 if (err < 0) {
1720 printk(KERN_INFO "ipgre init: can't add protocol\n"); 1751 pr_info("%s: can't add protocol\n", __func__);
1721 goto add_proto_failed; 1752 goto add_proto_failed;
1722 } 1753 }
1723 1754
@@ -1746,7 +1777,7 @@ static void __exit ipgre_fini(void)
1746 rtnl_link_unregister(&ipgre_tap_ops); 1777 rtnl_link_unregister(&ipgre_tap_ops);
1747 rtnl_link_unregister(&ipgre_link_ops); 1778 rtnl_link_unregister(&ipgre_link_ops);
1748 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) 1779 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1749 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1780 pr_info("%s: can't remove protocol\n", __func__);
1750 unregister_pernet_device(&ipgre_net_ops); 1781 unregister_pernet_device(&ipgre_net_ops);
1751} 1782}
1752 1783
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 073a9b01c40c..8590144ca330 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -113,7 +113,8 @@
113 * 2 of the License, or (at your option) any later version. 113 * 2 of the License, or (at your option) any later version.
114 */ 114 */
115 115
116#include <asm/system.h> 116#define pr_fmt(fmt) "IPv4: " fmt
117
117#include <linux/module.h> 118#include <linux/module.h>
118#include <linux/types.h> 119#include <linux/types.h>
119#include <linux/kernel.h> 120#include <linux/kernel.h>
@@ -148,7 +149,7 @@
148/* 149/*
149 * Process Router Attention IP option (RFC 2113) 150 * Process Router Attention IP option (RFC 2113)
150 */ 151 */
151int ip_call_ra_chain(struct sk_buff *skb) 152bool ip_call_ra_chain(struct sk_buff *skb)
152{ 153{
153 struct ip_ra_chain *ra; 154 struct ip_ra_chain *ra;
154 u8 protocol = ip_hdr(skb)->protocol; 155 u8 protocol = ip_hdr(skb)->protocol;
@@ -167,7 +168,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
167 net_eq(sock_net(sk), dev_net(dev))) { 168 net_eq(sock_net(sk), dev_net(dev))) {
168 if (ip_is_fragment(ip_hdr(skb))) { 169 if (ip_is_fragment(ip_hdr(skb))) {
169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) 170 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
170 return 1; 171 return true;
171 } 172 }
172 if (last) { 173 if (last) {
173 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 174 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -180,9 +181,9 @@ int ip_call_ra_chain(struct sk_buff *skb)
180 181
181 if (last) { 182 if (last) {
182 raw_rcv(last, skb); 183 raw_rcv(last, skb);
183 return 1; 184 return true;
184 } 185 }
185 return 0; 186 return false;
186} 187}
187 188
188static int ip_local_deliver_finish(struct sk_buff *skb) 189static int ip_local_deliver_finish(struct sk_buff *skb)
@@ -209,9 +210,8 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
209 int ret; 210 int ret;
210 211
211 if (!net_eq(net, &init_net) && !ipprot->netns_ok) { 212 if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
212 if (net_ratelimit()) 213 net_info_ratelimited("%s: proto %d isn't netns-ready\n",
213 printk("%s: proto %d isn't netns-ready\n", 214 __func__, protocol);
214 __func__, protocol);
215 kfree_skb(skb); 215 kfree_skb(skb);
216 goto out; 216 goto out;
217 } 217 }
@@ -265,7 +265,7 @@ int ip_local_deliver(struct sk_buff *skb)
265 ip_local_deliver_finish); 265 ip_local_deliver_finish);
266} 266}
267 267
268static inline int ip_rcv_options(struct sk_buff *skb) 268static inline bool ip_rcv_options(struct sk_buff *skb)
269{ 269{
270 struct ip_options *opt; 270 struct ip_options *opt;
271 const struct iphdr *iph; 271 const struct iphdr *iph;
@@ -297,10 +297,10 @@ static inline int ip_rcv_options(struct sk_buff *skb)
297 297
298 if (in_dev) { 298 if (in_dev) {
299 if (!IN_DEV_SOURCE_ROUTE(in_dev)) { 299 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
300 if (IN_DEV_LOG_MARTIANS(in_dev) && 300 if (IN_DEV_LOG_MARTIANS(in_dev))
301 net_ratelimit()) 301 net_info_ratelimited("source route option %pI4 -> %pI4\n",
302 printk(KERN_INFO "source route option %pI4 -> %pI4\n", 302 &iph->saddr,
303 &iph->saddr, &iph->daddr); 303 &iph->daddr);
304 goto drop; 304 goto drop;
305 } 305 }
306 } 306 }
@@ -309,9 +309,9 @@ static inline int ip_rcv_options(struct sk_buff *skb)
309 goto drop; 309 goto drop;
310 } 310 }
311 311
312 return 0; 312 return false;
313drop: 313drop:
314 return -1; 314 return true;
315} 315}
316 316
317static int ip_rcv_finish(struct sk_buff *skb) 317static int ip_rcv_finish(struct sk_buff *skb)
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 42dd1a90edea..708b99494e23 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) "IPv4: " fmt
13
12#include <linux/capability.h> 14#include <linux/capability.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
@@ -208,10 +210,10 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
208 * Simple and stupid 8), but the most efficient way. 210 * Simple and stupid 8), but the most efficient way.
209 */ 211 */
210 212
211void ip_options_fragment(struct sk_buff * skb) 213void ip_options_fragment(struct sk_buff *skb)
212{ 214{
213 unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); 215 unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
214 struct ip_options * opt = &(IPCB(skb)->opt); 216 struct ip_options *opt = &(IPCB(skb)->opt);
215 int l = opt->optlen; 217 int l = opt->optlen;
216 int optlen; 218 int optlen;
217 219
@@ -246,13 +248,13 @@ void ip_options_fragment(struct sk_buff * skb)
246 */ 248 */
247 249
248int ip_options_compile(struct net *net, 250int ip_options_compile(struct net *net,
249 struct ip_options * opt, struct sk_buff * skb) 251 struct ip_options *opt, struct sk_buff *skb)
250{ 252{
251 int l; 253 int l;
252 unsigned char * iph; 254 unsigned char *iph;
253 unsigned char * optptr; 255 unsigned char *optptr;
254 int optlen; 256 int optlen;
255 unsigned char * pp_ptr = NULL; 257 unsigned char *pp_ptr = NULL;
256 struct rtable *rt = NULL; 258 struct rtable *rt = NULL;
257 259
258 if (skb != NULL) { 260 if (skb != NULL) {
@@ -411,7 +413,7 @@ int ip_options_compile(struct net *net,
411 opt->is_changed = 1; 413 opt->is_changed = 1;
412 } 414 }
413 } else { 415 } else {
414 unsigned overflow = optptr[3]>>4; 416 unsigned int overflow = optptr[3]>>4;
415 if (overflow == 15) { 417 if (overflow == 15) {
416 pp_ptr = optptr + 3; 418 pp_ptr = optptr + 3;
417 goto error; 419 goto error;
@@ -471,20 +473,20 @@ EXPORT_SYMBOL(ip_options_compile);
471 * Undo all the changes done by ip_options_compile(). 473 * Undo all the changes done by ip_options_compile().
472 */ 474 */
473 475
474void ip_options_undo(struct ip_options * opt) 476void ip_options_undo(struct ip_options *opt)
475{ 477{
476 if (opt->srr) { 478 if (opt->srr) {
477 unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr); 479 unsigned char *optptr = opt->__data+opt->srr-sizeof(struct iphdr);
478 memmove(optptr+7, optptr+3, optptr[1]-7); 480 memmove(optptr+7, optptr+3, optptr[1]-7);
479 memcpy(optptr+3, &opt->faddr, 4); 481 memcpy(optptr+3, &opt->faddr, 4);
480 } 482 }
481 if (opt->rr_needaddr) { 483 if (opt->rr_needaddr) {
482 unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr); 484 unsigned char *optptr = opt->__data+opt->rr-sizeof(struct iphdr);
483 optptr[2] -= 4; 485 optptr[2] -= 4;
484 memset(&optptr[optptr[2]-1], 0, 4); 486 memset(&optptr[optptr[2]-1], 0, 4);
485 } 487 }
486 if (opt->ts) { 488 if (opt->ts) {
487 unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr); 489 unsigned char *optptr = opt->__data+opt->ts-sizeof(struct iphdr);
488 if (opt->ts_needtime) { 490 if (opt->ts_needtime) {
489 optptr[2] -= 4; 491 optptr[2] -= 4;
490 memset(&optptr[optptr[2]-1], 0, 4); 492 memset(&optptr[optptr[2]-1], 0, 4);
@@ -547,8 +549,8 @@ int ip_options_get(struct net *net, struct ip_options_rcu **optp,
547 549
548void ip_forward_options(struct sk_buff *skb) 550void ip_forward_options(struct sk_buff *skb)
549{ 551{
550 struct ip_options * opt = &(IPCB(skb)->opt); 552 struct ip_options *opt = &(IPCB(skb)->opt);
551 unsigned char * optptr; 553 unsigned char *optptr;
552 struct rtable *rt = skb_rtable(skb); 554 struct rtable *rt = skb_rtable(skb);
553 unsigned char *raw = skb_network_header(skb); 555 unsigned char *raw = skb_network_header(skb);
554 556
@@ -576,8 +578,10 @@ void ip_forward_options(struct sk_buff *skb)
576 ip_hdr(skb)->daddr = opt->nexthop; 578 ip_hdr(skb)->daddr = opt->nexthop;
577 ip_rt_get_source(&optptr[srrptr-1], skb, rt); 579 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 optptr[2] = srrptr+4; 580 optptr[2] = srrptr+4;
579 } else if (net_ratelimit()) 581 } else {
580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 582 net_crit_ratelimited("%s(): Argh! Destination lost!\n",
583 __func__);
584 }
581 if (opt->ts_needaddr) { 585 if (opt->ts_needaddr) {
582 optptr = raw + opt->ts; 586 optptr = raw + opt->ts;
583 ip_rt_get_source(&optptr[optptr[2]-9], skb, rt); 587 ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ff302bde8890..451f97c42eb4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -43,7 +43,6 @@
43 */ 43 */
44 44
45#include <asm/uaccess.h> 45#include <asm/uaccess.h>
46#include <asm/system.h>
47#include <linux/module.h> 46#include <linux/module.h>
48#include <linux/types.h> 47#include <linux/types.h>
49#include <linux/kernel.h> 48#include <linux/kernel.h>
@@ -215,8 +214,8 @@ static inline int ip_finish_output2(struct sk_buff *skb)
215 } 214 }
216 rcu_read_unlock(); 215 rcu_read_unlock();
217 216
218 if (net_ratelimit()) 217 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
219 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n"); 218 __func__);
220 kfree_skb(skb); 219 kfree_skb(skb);
221 return -EINVAL; 220 return -EINVAL;
222} 221}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8aa87c19fa00..0d11f234d615 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -90,7 +90,7 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
90static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) 90static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
91{ 91{
92 unsigned char optbuf[sizeof(struct ip_options) + 40]; 92 unsigned char optbuf[sizeof(struct ip_options) + 40];
93 struct ip_options * opt = (struct ip_options *)optbuf; 93 struct ip_options *opt = (struct ip_options *)optbuf;
94 94
95 if (IPCB(skb)->opt.optlen == 0) 95 if (IPCB(skb)->opt.optlen == 0)
96 return; 96 return;
@@ -147,7 +147,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
147void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) 147void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
148{ 148{
149 struct inet_sock *inet = inet_sk(skb->sk); 149 struct inet_sock *inet = inet_sk(skb->sk);
150 unsigned flags = inet->cmsg_flags; 150 unsigned int flags = inet->cmsg_flags;
151 151
152 /* Ordered by supposed usage frequency */ 152 /* Ordered by supposed usage frequency */
153 if (flags & 1) 153 if (flags & 1)
@@ -445,11 +445,6 @@ out:
445} 445}
446 446
447 447
448static void opt_kfree_rcu(struct rcu_head *head)
449{
450 kfree(container_of(head, struct ip_options_rcu, rcu));
451}
452
453/* 448/*
454 * Socket option code for IP. This is the end of the line after any 449 * Socket option code for IP. This is the end of the line after any
455 * TCP,UDP etc options on an IP socket. 450 * TCP,UDP etc options on an IP socket.
@@ -469,6 +464,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
469 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 464 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
470 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | 465 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
471 (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) || 466 (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
467 optname == IP_UNICAST_IF ||
472 optname == IP_MULTICAST_TTL || 468 optname == IP_MULTICAST_TTL ||
473 optname == IP_MULTICAST_ALL || 469 optname == IP_MULTICAST_ALL ||
474 optname == IP_MULTICAST_LOOP || 470 optname == IP_MULTICAST_LOOP ||
@@ -525,7 +521,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
525 } 521 }
526 rcu_assign_pointer(inet->inet_opt, opt); 522 rcu_assign_pointer(inet->inet_opt, opt);
527 if (old) 523 if (old)
528 call_rcu(&old->rcu, opt_kfree_rcu); 524 kfree_rcu(old, rcu);
529 break; 525 break;
530 } 526 }
531 case IP_PKTINFO: 527 case IP_PKTINFO:
@@ -628,6 +624,35 @@ static int do_ip_setsockopt(struct sock *sk, int level,
628 goto e_inval; 624 goto e_inval;
629 inet->mc_loop = !!val; 625 inet->mc_loop = !!val;
630 break; 626 break;
627 case IP_UNICAST_IF:
628 {
629 struct net_device *dev = NULL;
630 int ifindex;
631
632 if (optlen != sizeof(int))
633 goto e_inval;
634
635 ifindex = (__force int)ntohl((__force __be32)val);
636 if (ifindex == 0) {
637 inet->uc_index = 0;
638 err = 0;
639 break;
640 }
641
642 dev = dev_get_by_index(sock_net(sk), ifindex);
643 err = -EADDRNOTAVAIL;
644 if (!dev)
645 break;
646 dev_put(dev);
647
648 err = -EINVAL;
649 if (sk->sk_bound_dev_if)
650 break;
651
652 inet->uc_index = ifindex;
653 err = 0;
654 break;
655 }
631 case IP_MULTICAST_IF: 656 case IP_MULTICAST_IF:
632 { 657 {
633 struct ip_mreqn mreq; 658 struct ip_mreqn mreq;
@@ -648,10 +673,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
648 break; 673 break;
649 } else { 674 } else {
650 memset(&mreq, 0, sizeof(mreq)); 675 memset(&mreq, 0, sizeof(mreq));
651 if (optlen >= sizeof(struct in_addr) && 676 if (optlen >= sizeof(struct ip_mreq)) {
652 copy_from_user(&mreq.imr_address, optval, 677 if (copy_from_user(&mreq, optval,
653 sizeof(struct in_addr))) 678 sizeof(struct ip_mreq)))
654 break; 679 break;
680 } else if (optlen >= sizeof(struct in_addr)) {
681 if (copy_from_user(&mreq.imr_address, optval,
682 sizeof(struct in_addr)))
683 break;
684 }
655 } 685 }
656 686
657 if (!mreq.imr_ifindex) { 687 if (!mreq.imr_ifindex) {
@@ -1069,7 +1099,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt);
1069 */ 1099 */
1070 1100
1071static int do_ip_getsockopt(struct sock *sk, int level, int optname, 1101static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1072 char __user *optval, int __user *optlen, unsigned flags) 1102 char __user *optval, int __user *optlen, unsigned int flags)
1073{ 1103{
1074 struct inet_sock *inet = inet_sk(sk); 1104 struct inet_sock *inet = inet_sk(sk);
1075 int val; 1105 int val;
@@ -1178,6 +1208,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1178 case IP_MULTICAST_LOOP: 1208 case IP_MULTICAST_LOOP:
1179 val = inet->mc_loop; 1209 val = inet->mc_loop;
1180 break; 1210 break;
1211 case IP_UNICAST_IF:
1212 val = (__force int)htonl((__u32) inet->uc_index);
1213 break;
1181 case IP_MULTICAST_IF: 1214 case IP_MULTICAST_IF:
1182 { 1215 {
1183 struct in_addr addr; 1216 struct in_addr addr;
@@ -1256,6 +1289,10 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1256 int hlim = inet->mc_ttl; 1289 int hlim = inet->mc_ttl;
1257 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim); 1290 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1258 } 1291 }
1292 if (inet->cmsg_flags & IP_CMSG_TOS) {
1293 int tos = inet->rcv_tos;
1294 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1295 }
1259 len -= msg.msg_controllen; 1296 len -= msg.msg_controllen;
1260 return put_user(len, optlen); 1297 return put_user(len, optlen);
1261 } 1298 }
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index c857f6f49b03..63b64c45a826 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -156,11 +156,11 @@ static const struct net_protocol ipcomp4_protocol = {
156static int __init ipcomp4_init(void) 156static int __init ipcomp4_init(void)
157{ 157{
158 if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) { 158 if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) {
159 printk(KERN_INFO "ipcomp init: can't add xfrm type\n"); 159 pr_info("%s: can't add xfrm type\n", __func__);
160 return -EAGAIN; 160 return -EAGAIN;
161 } 161 }
162 if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) { 162 if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
163 printk(KERN_INFO "ipcomp init: can't add protocol\n"); 163 pr_info("%s: can't add protocol\n", __func__);
164 xfrm_unregister_type(&ipcomp_type, AF_INET); 164 xfrm_unregister_type(&ipcomp_type, AF_INET);
165 return -EAGAIN; 165 return -EAGAIN;
166 } 166 }
@@ -170,9 +170,9 @@ static int __init ipcomp4_init(void)
170static void __exit ipcomp4_fini(void) 170static void __exit ipcomp4_fini(void)
171{ 171{
172 if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) 172 if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0)
173 printk(KERN_INFO "ip ipcomp close: can't remove protocol\n"); 173 pr_info("%s: can't remove protocol\n", __func__);
174 if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0) 174 if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0)
175 printk(KERN_INFO "ip ipcomp close: can't remove xfrm type\n"); 175 pr_info("%s: can't remove xfrm type\n", __func__);
176} 176}
177 177
178module_init(ipcomp4_init); 178module_init(ipcomp4_init);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 6e412a60a91f..67e8a6b086ea 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -214,7 +214,7 @@ static int __init ic_open_devs(void)
214 if (!(dev->flags & IFF_LOOPBACK)) 214 if (!(dev->flags & IFF_LOOPBACK))
215 continue; 215 continue;
216 if (dev_change_flags(dev, dev->flags | IFF_UP) < 0) 216 if (dev_change_flags(dev, dev->flags | IFF_UP) < 0)
217 printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name); 217 pr_err("IP-Config: Failed to open %s\n", dev->name);
218 } 218 }
219 219
220 for_each_netdev(&init_net, dev) { 220 for_each_netdev(&init_net, dev) {
@@ -223,7 +223,8 @@ static int __init ic_open_devs(void)
223 if (dev->mtu >= 364) 223 if (dev->mtu >= 364)
224 able |= IC_BOOTP; 224 able |= IC_BOOTP;
225 else 225 else
226 printk(KERN_WARNING "DHCP/BOOTP: Ignoring device %s, MTU %d too small", dev->name, dev->mtu); 226 pr_warn("DHCP/BOOTP: Ignoring device %s, MTU %d too small",
227 dev->name, dev->mtu);
227 if (!(dev->flags & IFF_NOARP)) 228 if (!(dev->flags & IFF_NOARP))
228 able |= IC_RARP; 229 able |= IC_RARP;
229 able &= ic_proto_enabled; 230 able &= ic_proto_enabled;
@@ -231,7 +232,8 @@ static int __init ic_open_devs(void)
231 continue; 232 continue;
232 oflags = dev->flags; 233 oflags = dev->flags;
233 if (dev_change_flags(dev, oflags | IFF_UP) < 0) { 234 if (dev_change_flags(dev, oflags | IFF_UP) < 0) {
234 printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name); 235 pr_err("IP-Config: Failed to open %s\n",
236 dev->name);
235 continue; 237 continue;
236 } 238 }
237 if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { 239 if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) {
@@ -273,9 +275,10 @@ have_carrier:
273 275
274 if (!ic_first_dev) { 276 if (!ic_first_dev) {
275 if (user_dev_name[0]) 277 if (user_dev_name[0])
276 printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); 278 pr_err("IP-Config: Device `%s' not found\n",
279 user_dev_name);
277 else 280 else
278 printk(KERN_ERR "IP-Config: No network devices available.\n"); 281 pr_err("IP-Config: No network devices available\n");
279 return -ENODEV; 282 return -ENODEV;
280 } 283 }
281 return 0; 284 return 0;
@@ -359,17 +362,20 @@ static int __init ic_setup_if(void)
359 strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name); 362 strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name);
360 set_sockaddr(sin, ic_myaddr, 0); 363 set_sockaddr(sin, ic_myaddr, 0);
361 if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) { 364 if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) {
362 printk(KERN_ERR "IP-Config: Unable to set interface address (%d).\n", err); 365 pr_err("IP-Config: Unable to set interface address (%d)\n",
366 err);
363 return -1; 367 return -1;
364 } 368 }
365 set_sockaddr(sin, ic_netmask, 0); 369 set_sockaddr(sin, ic_netmask, 0);
366 if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) { 370 if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) {
367 printk(KERN_ERR "IP-Config: Unable to set interface netmask (%d).\n", err); 371 pr_err("IP-Config: Unable to set interface netmask (%d)\n",
372 err);
368 return -1; 373 return -1;
369 } 374 }
370 set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0); 375 set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0);
371 if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) { 376 if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) {
372 printk(KERN_ERR "IP-Config: Unable to set interface broadcast address (%d).\n", err); 377 pr_err("IP-Config: Unable to set interface broadcast address (%d)\n",
378 err);
373 return -1; 379 return -1;
374 } 380 }
375 /* Handle the case where we need non-standard MTU on the boot link (a network 381 /* Handle the case where we need non-standard MTU on the boot link (a network
@@ -380,8 +386,8 @@ static int __init ic_setup_if(void)
380 strcpy(ir.ifr_name, ic_dev->name); 386 strcpy(ir.ifr_name, ic_dev->name);
381 ir.ifr_mtu = ic_dev_mtu; 387 ir.ifr_mtu = ic_dev_mtu;
382 if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0) 388 if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0)
383 printk(KERN_ERR "IP-Config: Unable to set interface mtu to %d (%d).\n", 389 pr_err("IP-Config: Unable to set interface mtu to %d (%d)\n",
384 ic_dev_mtu, err); 390 ic_dev_mtu, err);
385 } 391 }
386 return 0; 392 return 0;
387} 393}
@@ -396,7 +402,7 @@ static int __init ic_setup_routes(void)
396 402
397 memset(&rm, 0, sizeof(rm)); 403 memset(&rm, 0, sizeof(rm));
398 if ((ic_gateway ^ ic_myaddr) & ic_netmask) { 404 if ((ic_gateway ^ ic_myaddr) & ic_netmask) {
399 printk(KERN_ERR "IP-Config: Gateway not on directly connected network.\n"); 405 pr_err("IP-Config: Gateway not on directly connected network\n");
400 return -1; 406 return -1;
401 } 407 }
402 set_sockaddr((struct sockaddr_in *) &rm.rt_dst, 0, 0); 408 set_sockaddr((struct sockaddr_in *) &rm.rt_dst, 0, 0);
@@ -404,7 +410,8 @@ static int __init ic_setup_routes(void)
404 set_sockaddr((struct sockaddr_in *) &rm.rt_gateway, ic_gateway, 0); 410 set_sockaddr((struct sockaddr_in *) &rm.rt_gateway, ic_gateway, 0);
405 rm.rt_flags = RTF_UP | RTF_GATEWAY; 411 rm.rt_flags = RTF_UP | RTF_GATEWAY;
406 if ((err = ic_route_ioctl(SIOCADDRT, &rm)) < 0) { 412 if ((err = ic_route_ioctl(SIOCADDRT, &rm)) < 0) {
407 printk(KERN_ERR "IP-Config: Cannot add default route (%d).\n", err); 413 pr_err("IP-Config: Cannot add default route (%d)\n",
414 err);
408 return -1; 415 return -1;
409 } 416 }
410 } 417 }
@@ -437,8 +444,8 @@ static int __init ic_defaults(void)
437 else if (IN_CLASSC(ntohl(ic_myaddr))) 444 else if (IN_CLASSC(ntohl(ic_myaddr)))
438 ic_netmask = htonl(IN_CLASSC_NET); 445 ic_netmask = htonl(IN_CLASSC_NET);
439 else { 446 else {
440 printk(KERN_ERR "IP-Config: Unable to guess netmask for address %pI4\n", 447 pr_err("IP-Config: Unable to guess netmask for address %pI4\n",
441 &ic_myaddr); 448 &ic_myaddr);
442 return -1; 449 return -1;
443 } 450 }
444 printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask); 451 printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask);
@@ -688,8 +695,8 @@ ic_dhcp_init_options(u8 *options)
688 e += len; 695 e += len;
689 } 696 }
690 if (*vendor_class_identifier) { 697 if (*vendor_class_identifier) {
691 printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", 698 pr_info("DHCP: sending class identifier \"%s\"\n",
692 vendor_class_identifier); 699 vendor_class_identifier);
693 *e++ = 60; /* Class-identifier */ 700 *e++ = 60; /* Class-identifier */
694 len = strlen(vendor_class_identifier); 701 len = strlen(vendor_class_identifier);
695 *e++ = len; 702 *e++ = len;
@@ -801,8 +808,6 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
801 b->op = BOOTP_REQUEST; 808 b->op = BOOTP_REQUEST;
802 if (dev->type < 256) /* check for false types */ 809 if (dev->type < 256) /* check for false types */
803 b->htype = dev->type; 810 b->htype = dev->type;
804 else if (dev->type == ARPHRD_IEEE802_TR) /* fix for token ring */
805 b->htype = ARPHRD_IEEE802;
806 else if (dev->type == ARPHRD_FDDI) 811 else if (dev->type == ARPHRD_FDDI)
807 b->htype = ARPHRD_ETHER; 812 b->htype = ARPHRD_ETHER;
808 else { 813 else {
@@ -948,9 +953,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
948 953
949 /* Fragments are not supported */ 954 /* Fragments are not supported */
950 if (ip_is_fragment(h)) { 955 if (ip_is_fragment(h)) {
951 if (net_ratelimit()) 956 net_err_ratelimited("DHCP/BOOTP: Ignoring fragmented reply\n");
952 printk(KERN_ERR "DHCP/BOOTP: Ignoring fragmented "
953 "reply.\n");
954 goto drop; 957 goto drop;
955 } 958 }
956 959
@@ -998,17 +1001,14 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
998 /* Is it a reply to our BOOTP request? */ 1001 /* Is it a reply to our BOOTP request? */
999 if (b->op != BOOTP_REPLY || 1002 if (b->op != BOOTP_REPLY ||
1000 b->xid != d->xid) { 1003 b->xid != d->xid) {
1001 if (net_ratelimit()) 1004 net_err_ratelimited("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
1002 printk(KERN_ERR "DHCP/BOOTP: Reply not for us, " 1005 b->op, b->xid);
1003 "op[%x] xid[%x]\n",
1004 b->op, b->xid);
1005 goto drop_unlock; 1006 goto drop_unlock;
1006 } 1007 }
1007 1008
1008 /* Is it a reply for the device we are configuring? */ 1009 /* Is it a reply for the device we are configuring? */
1009 if (b->xid != ic_dev_xid) { 1010 if (b->xid != ic_dev_xid) {
1010 if (net_ratelimit()) 1011 net_err_ratelimited("DHCP/BOOTP: Ignoring delayed packet\n");
1011 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n");
1012 goto drop_unlock; 1012 goto drop_unlock;
1013 } 1013 }
1014 1014
@@ -1146,17 +1146,17 @@ static int __init ic_dynamic(void)
1146 * are missing, and without DHCP/BOOTP/RARP we are unable to get it. 1146 * are missing, and without DHCP/BOOTP/RARP we are unable to get it.
1147 */ 1147 */
1148 if (!ic_proto_enabled) { 1148 if (!ic_proto_enabled) {
1149 printk(KERN_ERR "IP-Config: Incomplete network configuration information.\n"); 1149 pr_err("IP-Config: Incomplete network configuration information\n");
1150 return -1; 1150 return -1;
1151 } 1151 }
1152 1152
1153#ifdef IPCONFIG_BOOTP 1153#ifdef IPCONFIG_BOOTP
1154 if ((ic_proto_enabled ^ ic_proto_have_if) & IC_BOOTP) 1154 if ((ic_proto_enabled ^ ic_proto_have_if) & IC_BOOTP)
1155 printk(KERN_ERR "DHCP/BOOTP: No suitable device found.\n"); 1155 pr_err("DHCP/BOOTP: No suitable device found\n");
1156#endif 1156#endif
1157#ifdef IPCONFIG_RARP 1157#ifdef IPCONFIG_RARP
1158 if ((ic_proto_enabled ^ ic_proto_have_if) & IC_RARP) 1158 if ((ic_proto_enabled ^ ic_proto_have_if) & IC_RARP)
1159 printk(KERN_ERR "RARP: No suitable device found.\n"); 1159 pr_err("RARP: No suitable device found\n");
1160#endif 1160#endif
1161 1161
1162 if (!ic_proto_have_if) 1162 if (!ic_proto_have_if)
@@ -1183,17 +1183,17 @@ static int __init ic_dynamic(void)
1183 * [Actually we could now, but the nothing else running note still 1183 * [Actually we could now, but the nothing else running note still
1184 * applies.. - AC] 1184 * applies.. - AC]
1185 */ 1185 */
1186 printk(KERN_NOTICE "Sending %s%s%s requests .", 1186 pr_notice("Sending %s%s%s requests .",
1187 do_bootp 1187 do_bootp
1188 ? ((ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP") : "", 1188 ? ((ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP") : "",
1189 (do_bootp && do_rarp) ? " and " : "", 1189 (do_bootp && do_rarp) ? " and " : "",
1190 do_rarp ? "RARP" : ""); 1190 do_rarp ? "RARP" : "");
1191 1191
1192 start_jiffies = jiffies; 1192 start_jiffies = jiffies;
1193 d = ic_first_dev; 1193 d = ic_first_dev;
1194 retries = CONF_SEND_RETRIES; 1194 retries = CONF_SEND_RETRIES;
1195 get_random_bytes(&timeout, sizeof(timeout)); 1195 get_random_bytes(&timeout, sizeof(timeout));
1196 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); 1196 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM);
1197 for (;;) { 1197 for (;;) {
1198 /* Track the device we are configuring */ 1198 /* Track the device we are configuring */
1199 ic_dev_xid = d->xid; 1199 ic_dev_xid = d->xid;
@@ -1216,13 +1216,13 @@ static int __init ic_dynamic(void)
1216 (ic_proto_enabled & IC_USE_DHCP) && 1216 (ic_proto_enabled & IC_USE_DHCP) &&
1217 ic_dhcp_msgtype != DHCPACK) { 1217 ic_dhcp_msgtype != DHCPACK) {
1218 ic_got_reply = 0; 1218 ic_got_reply = 0;
1219 printk(KERN_CONT ","); 1219 pr_cont(",");
1220 continue; 1220 continue;
1221 } 1221 }
1222#endif /* IPCONFIG_DHCP */ 1222#endif /* IPCONFIG_DHCP */
1223 1223
1224 if (ic_got_reply) { 1224 if (ic_got_reply) {
1225 printk(KERN_CONT " OK\n"); 1225 pr_cont(" OK\n");
1226 break; 1226 break;
1227 } 1227 }
1228 1228
@@ -1230,7 +1230,7 @@ static int __init ic_dynamic(void)
1230 continue; 1230 continue;
1231 1231
1232 if (! --retries) { 1232 if (! --retries) {
1233 printk(KERN_CONT " timed out!\n"); 1233 pr_cont(" timed out!\n");
1234 break; 1234 break;
1235 } 1235 }
1236 1236
@@ -1240,7 +1240,7 @@ static int __init ic_dynamic(void)
1240 if (timeout > CONF_TIMEOUT_MAX) 1240 if (timeout > CONF_TIMEOUT_MAX)
1241 timeout = CONF_TIMEOUT_MAX; 1241 timeout = CONF_TIMEOUT_MAX;
1242 1242
1243 printk(KERN_CONT "."); 1243 pr_cont(".");
1244 } 1244 }
1245 1245
1246#ifdef IPCONFIG_BOOTP 1246#ifdef IPCONFIG_BOOTP
@@ -1260,8 +1260,8 @@ static int __init ic_dynamic(void)
1260 printk("IP-Config: Got %s answer from %pI4, ", 1260 printk("IP-Config: Got %s answer from %pI4, ",
1261 ((ic_got_reply & IC_RARP) ? "RARP" 1261 ((ic_got_reply & IC_RARP) ? "RARP"
1262 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1262 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1263 &ic_servaddr); 1263 &ic_servaddr);
1264 printk(KERN_CONT "my address is %pI4\n", &ic_myaddr); 1264 pr_cont("my address is %pI4\n", &ic_myaddr);
1265 1265
1266 return 0; 1266 return 0;
1267} 1267}
@@ -1437,24 +1437,22 @@ static int __init ip_auto_config(void)
1437 */ 1437 */
1438#ifdef CONFIG_ROOT_NFS 1438#ifdef CONFIG_ROOT_NFS
1439 if (ROOT_DEV == Root_NFS) { 1439 if (ROOT_DEV == Root_NFS) {
1440 printk(KERN_ERR 1440 pr_err("IP-Config: Retrying forever (NFS root)...\n");
1441 "IP-Config: Retrying forever (NFS root)...\n");
1442 goto try_try_again; 1441 goto try_try_again;
1443 } 1442 }
1444#endif 1443#endif
1445 1444
1446 if (--retries) { 1445 if (--retries) {
1447 printk(KERN_ERR 1446 pr_err("IP-Config: Reopening network devices...\n");
1448 "IP-Config: Reopening network devices...\n");
1449 goto try_try_again; 1447 goto try_try_again;
1450 } 1448 }
1451 1449
1452 /* Oh, well. At least we tried. */ 1450 /* Oh, well. At least we tried. */
1453 printk(KERN_ERR "IP-Config: Auto-configuration of network failed.\n"); 1451 pr_err("IP-Config: Auto-configuration of network failed\n");
1454 return -1; 1452 return -1;
1455 } 1453 }
1456#else /* !DYNAMIC */ 1454#else /* !DYNAMIC */
1457 printk(KERN_ERR "IP-Config: Incomplete network configuration information.\n"); 1455 pr_err("IP-Config: Incomplete network configuration information\n");
1458 ic_close_devs(); 1456 ic_close_devs();
1459 return -1; 1457 return -1;
1460#endif /* IPCONFIG_DYNAMIC */ 1458#endif /* IPCONFIG_DYNAMIC */
@@ -1492,19 +1490,16 @@ static int __init ip_auto_config(void)
1492 /* 1490 /*
1493 * Clue in the operator. 1491 * Clue in the operator.
1494 */ 1492 */
1495 printk("IP-Config: Complete:\n"); 1493 pr_info("IP-Config: Complete:\n");
1496 printk(" device=%s", ic_dev->name); 1494 pr_info(" device=%s, addr=%pI4, mask=%pI4, gw=%pI4\n",
1497 printk(KERN_CONT ", addr=%pI4", &ic_myaddr); 1495 ic_dev->name, &ic_myaddr, &ic_netmask, &ic_gateway);
1498 printk(KERN_CONT ", mask=%pI4", &ic_netmask); 1496 pr_info(" host=%s, domain=%s, nis-domain=%s\n",
1499 printk(KERN_CONT ", gw=%pI4", &ic_gateway); 1497 utsname()->nodename, ic_domain, utsname()->domainname);
1500 printk(KERN_CONT ",\n host=%s, domain=%s, nis-domain=%s", 1498 pr_info(" bootserver=%pI4, rootserver=%pI4, rootpath=%s",
1501 utsname()->nodename, ic_domain, utsname()->domainname); 1499 &ic_servaddr, &root_server_addr, root_server_path);
1502 printk(KERN_CONT ",\n bootserver=%pI4", &ic_servaddr);
1503 printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
1504 printk(KERN_CONT ", rootpath=%s", root_server_path);
1505 if (ic_dev_mtu) 1500 if (ic_dev_mtu)
1506 printk(KERN_CONT ", mtu=%d", ic_dev_mtu); 1501 pr_cont(", mtu=%d", ic_dev_mtu);
1507 printk(KERN_CONT "\n"); 1502 pr_cont("\n");
1508#endif /* !SILENT */ 1503#endif /* !SILENT */
1509 1504
1510 return 0; 1505 return 0;
@@ -1626,22 +1621,21 @@ static int __init ip_auto_config_setup(char *addrs)
1626 1621
1627 return 1; 1622 return 1;
1628} 1623}
1624__setup("ip=", ip_auto_config_setup);
1629 1625
1630static int __init nfsaddrs_config_setup(char *addrs) 1626static int __init nfsaddrs_config_setup(char *addrs)
1631{ 1627{
1632 return ip_auto_config_setup(addrs); 1628 return ip_auto_config_setup(addrs);
1633} 1629}
1630__setup("nfsaddrs=", nfsaddrs_config_setup);
1634 1631
1635static int __init vendor_class_identifier_setup(char *addrs) 1632static int __init vendor_class_identifier_setup(char *addrs)
1636{ 1633{
1637 if (strlcpy(vendor_class_identifier, addrs, 1634 if (strlcpy(vendor_class_identifier, addrs,
1638 sizeof(vendor_class_identifier)) 1635 sizeof(vendor_class_identifier))
1639 >= sizeof(vendor_class_identifier)) 1636 >= sizeof(vendor_class_identifier))
1640 printk(KERN_WARNING "DHCP: vendorclass too long, truncated to \"%s\"", 1637 pr_warn("DHCP: vendorclass too long, truncated to \"%s\"",
1641 vendor_class_identifier); 1638 vendor_class_identifier);
1642 return 1; 1639 return 1;
1643} 1640}
1644
1645__setup("ip=", ip_auto_config_setup);
1646__setup("nfsaddrs=", nfsaddrs_config_setup);
1647__setup("dhcpclass=", vendor_class_identifier_setup); 1641__setup("dhcpclass=", vendor_class_identifier_setup);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 22a199315309..2d0f99bf61b3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -144,33 +144,48 @@ static void ipip_dev_free(struct net_device *dev);
144 144
145/* often modified stats are per cpu, other are shared (netdev->stats) */ 145/* often modified stats are per cpu, other are shared (netdev->stats) */
146struct pcpu_tstats { 146struct pcpu_tstats {
147 unsigned long rx_packets; 147 u64 rx_packets;
148 unsigned long rx_bytes; 148 u64 rx_bytes;
149 unsigned long tx_packets; 149 u64 tx_packets;
150 unsigned long tx_bytes; 150 u64 tx_bytes;
151} __attribute__((aligned(4*sizeof(unsigned long)))); 151 struct u64_stats_sync syncp;
152};
152 153
153static struct net_device_stats *ipip_get_stats(struct net_device *dev) 154static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
155 struct rtnl_link_stats64 *tot)
154{ 156{
155 struct pcpu_tstats sum = { 0 };
156 int i; 157 int i;
157 158
158 for_each_possible_cpu(i) { 159 for_each_possible_cpu(i) {
159 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 160 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
160 161 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
161 sum.rx_packets += tstats->rx_packets; 162 unsigned int start;
162 sum.rx_bytes += tstats->rx_bytes; 163
163 sum.tx_packets += tstats->tx_packets; 164 do {
164 sum.tx_bytes += tstats->tx_bytes; 165 start = u64_stats_fetch_begin_bh(&tstats->syncp);
166 rx_packets = tstats->rx_packets;
167 tx_packets = tstats->tx_packets;
168 rx_bytes = tstats->rx_bytes;
169 tx_bytes = tstats->tx_bytes;
170 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
171
172 tot->rx_packets += rx_packets;
173 tot->tx_packets += tx_packets;
174 tot->rx_bytes += rx_bytes;
175 tot->tx_bytes += tx_bytes;
165 } 176 }
166 dev->stats.rx_packets = sum.rx_packets; 177
167 dev->stats.rx_bytes = sum.rx_bytes; 178 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
168 dev->stats.tx_packets = sum.tx_packets; 179 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
169 dev->stats.tx_bytes = sum.tx_bytes; 180 tot->tx_dropped = dev->stats.tx_dropped;
170 return &dev->stats; 181 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
182 tot->tx_errors = dev->stats.tx_errors;
183 tot->collisions = dev->stats.collisions;
184
185 return tot;
171} 186}
172 187
173static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, 188static struct ip_tunnel *ipip_tunnel_lookup(struct net *net,
174 __be32 remote, __be32 local) 189 __be32 remote, __be32 local)
175{ 190{
176 unsigned int h0 = HASH(remote); 191 unsigned int h0 = HASH(remote);
@@ -245,7 +260,7 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
245 rcu_assign_pointer(*tp, t); 260 rcu_assign_pointer(*tp, t);
246} 261}
247 262
248static struct ip_tunnel * ipip_tunnel_locate(struct net *net, 263static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
249 struct ip_tunnel_parm *parms, int create) 264 struct ip_tunnel_parm *parms, int create)
250{ 265{
251 __be32 remote = parms->iph.daddr; 266 __be32 remote = parms->iph.daddr;
@@ -404,8 +419,10 @@ static int ipip_rcv(struct sk_buff *skb)
404 skb->pkt_type = PACKET_HOST; 419 skb->pkt_type = PACKET_HOST;
405 420
406 tstats = this_cpu_ptr(tunnel->dev->tstats); 421 tstats = this_cpu_ptr(tunnel->dev->tstats);
422 u64_stats_update_begin(&tstats->syncp);
407 tstats->rx_packets++; 423 tstats->rx_packets++;
408 tstats->rx_bytes += skb->len; 424 tstats->rx_bytes += skb->len;
425 u64_stats_update_end(&tstats->syncp);
409 426
410 __skb_tunnel_rx(skb, tunnel->dev); 427 __skb_tunnel_rx(skb, tunnel->dev);
411 428
@@ -454,8 +471,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
454 dev->stats.tx_fifo_errors++; 471 dev->stats.tx_fifo_errors++;
455 goto tx_error; 472 goto tx_error;
456 } 473 }
457 if ((dst = rt->rt_gateway) == 0) 474 dst = rt->rt_gateway;
458 goto tx_error_icmp;
459 } 475 }
460 476
461 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, 477 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
@@ -731,7 +747,7 @@ static const struct net_device_ops ipip_netdev_ops = {
731 .ndo_start_xmit = ipip_tunnel_xmit, 747 .ndo_start_xmit = ipip_tunnel_xmit,
732 .ndo_do_ioctl = ipip_tunnel_ioctl, 748 .ndo_do_ioctl = ipip_tunnel_ioctl,
733 .ndo_change_mtu = ipip_tunnel_change_mtu, 749 .ndo_change_mtu = ipip_tunnel_change_mtu,
734 .ndo_get_stats = ipip_get_stats, 750 .ndo_get_stats64 = ipip_get_stats64,
735}; 751};
736 752
737static void ipip_dev_free(struct net_device *dev) 753static void ipip_dev_free(struct net_device *dev)
@@ -893,7 +909,7 @@ static int __init ipip_init(void)
893 err = xfrm4_tunnel_register(&ipip_handler, AF_INET); 909 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
894 if (err < 0) { 910 if (err < 0) {
895 unregister_pernet_device(&ipip_net_ops); 911 unregister_pernet_device(&ipip_net_ops);
896 printk(KERN_INFO "ipip init: can't register tunnel\n"); 912 pr_info("%s: can't register tunnel\n", __func__);
897 } 913 }
898 return err; 914 return err;
899} 915}
@@ -901,7 +917,7 @@ static int __init ipip_init(void)
901static void __exit ipip_fini(void) 917static void __exit ipip_fini(void)
902{ 918{
903 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) 919 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
904 printk(KERN_INFO "ipip close: can't deregister tunnel\n"); 920 pr_info("%s: can't deregister tunnel\n", __func__);
905 921
906 unregister_pernet_device(&ipip_net_ops); 922 unregister_pernet_device(&ipip_net_ops);
907} 923}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 7bc2db6db8d4..a9e519ad6db5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -26,7 +26,6 @@
26 * 26 *
27 */ 27 */
28 28
29#include <asm/system.h>
30#include <asm/uaccess.h> 29#include <asm/uaccess.h>
31#include <linux/types.h> 30#include <linux/types.h>
32#include <linux/capability.h> 31#include <linux/capability.h>
@@ -950,8 +949,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
950 ret = sock_queue_rcv_skb(mroute_sk, skb); 949 ret = sock_queue_rcv_skb(mroute_sk, skb);
951 rcu_read_unlock(); 950 rcu_read_unlock();
952 if (ret < 0) { 951 if (ret < 0) {
953 if (net_ratelimit()) 952 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
954 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
955 kfree_skb(skb); 953 kfree_skb(skb);
956 } 954 }
957 955
@@ -2120,15 +2118,16 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2120 rtm->rtm_src_len = 32; 2118 rtm->rtm_src_len = 32;
2121 rtm->rtm_tos = 0; 2119 rtm->rtm_tos = 0;
2122 rtm->rtm_table = mrt->id; 2120 rtm->rtm_table = mrt->id;
2123 NLA_PUT_U32(skb, RTA_TABLE, mrt->id); 2121 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2122 goto nla_put_failure;
2124 rtm->rtm_type = RTN_MULTICAST; 2123 rtm->rtm_type = RTN_MULTICAST;
2125 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2124 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2126 rtm->rtm_protocol = RTPROT_UNSPEC; 2125 rtm->rtm_protocol = RTPROT_UNSPEC;
2127 rtm->rtm_flags = 0; 2126 rtm->rtm_flags = 0;
2128 2127
2129 NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); 2128 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
2130 NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); 2129 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
2131 2130 goto nla_put_failure;
2132 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) 2131 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2133 goto nla_put_failure; 2132 goto nla_put_failure;
2134 2133
@@ -2538,7 +2537,7 @@ int __init ip_mr_init(void)
2538 goto reg_notif_fail; 2537 goto reg_notif_fail;
2539#ifdef CONFIG_IP_PIMSM_V2 2538#ifdef CONFIG_IP_PIMSM_V2
2540 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { 2539 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2541 printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n"); 2540 pr_err("%s: can't add PIM protocol\n", __func__);
2542 err = -EAGAIN; 2541 err = -EAGAIN;
2543 goto add_proto_fail; 2542 goto add_proto_fail;
2544 } 2543 }
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 4f47e064e262..ed1b36783192 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -12,7 +12,7 @@
12#include <net/netfilter/nf_queue.h> 12#include <net/netfilter/nf_queue.h>
13 13
14/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ 14/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
15int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) 15int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
16{ 16{
17 struct net *net = dev_net(skb_dst(skb)->dev); 17 struct net *net = dev_net(skb_dst(skb)->dev);
18 const struct iphdr *iph = ip_hdr(skb); 18 const struct iphdr *iph = ip_hdr(skb);
@@ -237,13 +237,3 @@ static void ipv4_netfilter_fini(void)
237 237
238module_init(ipv4_netfilter_init); 238module_init(ipv4_netfilter_init);
239module_exit(ipv4_netfilter_fini); 239module_exit(ipv4_netfilter_fini);
240
241#ifdef CONFIG_SYSCTL
242struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = {
243 { .procname = "net", },
244 { .procname = "ipv4", },
245 { .procname = "netfilter", },
246 { }
247};
248EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path);
249#endif /* CONFIG_SYSCTL */
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 74dfc9e5211f..fcc543cd987a 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -123,15 +123,6 @@ config IP_NF_TARGET_REJECT
123 123
124 To compile it as a module, choose M here. If unsure, say N. 124 To compile it as a module, choose M here. If unsure, say N.
125 125
126config IP_NF_TARGET_LOG
127 tristate "LOG target support"
128 default m if NETFILTER_ADVANCED=n
129 help
130 This option adds a `LOG' target, which allows you to create rules in
131 any iptables table which records the packet header to the syslog.
132
133 To compile it as a module, choose M here. If unsure, say N.
134
135config IP_NF_TARGET_ULOG 126config IP_NF_TARGET_ULOG
136 tristate "ULOG target support" 127 tristate "ULOG target support"
137 default m if NETFILTER_ADVANCED=n 128 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 213a462b739b..c20674dc9452 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
54# targets 54# targets
55obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o 55obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
56obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o 56obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
57obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
58obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o 57obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
59obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o 58obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
60obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o 59obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
@@ -67,6 +66,3 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
67 66
68# just filtering instance of ARP tables for now 67# just filtering instance of ARP tables for now
69obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o 68obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
70
71obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
72
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index fd7a3f68917f..97e61eadf580 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -221,9 +221,8 @@ static inline int arp_checkentry(const struct arpt_arp *arp)
221static unsigned int 221static unsigned int
222arpt_error(struct sk_buff *skb, const struct xt_action_param *par) 222arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
223{ 223{
224 if (net_ratelimit()) 224 net_err_ratelimited("arp_tables: error: '%s'\n",
225 pr_err("arp_tables: error: '%s'\n", 225 (const char *)par->targinfo);
226 (const char *)par->targinfo);
227 226
228 return NF_DROP; 227 return NF_DROP;
229} 228}
@@ -303,7 +302,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
303 if (v < 0) { 302 if (v < 0) {
304 /* Pop from stack? */ 303 /* Pop from stack? */
305 if (v != XT_RETURN) { 304 if (v != XT_RETURN) {
306 verdict = (unsigned)(-v) - 1; 305 verdict = (unsigned int)(-v) - 1;
307 break; 306 break;
308 } 307 }
309 e = back; 308 e = back;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
deleted file mode 100644
index 94d45e1f8882..000000000000
--- a/net/ipv4/netfilter/ip_queue.c
+++ /dev/null
@@ -1,639 +0,0 @@
1/*
2 * This is a module which is used for queueing IPv4 packets and
3 * communicating with userspace via netlink.
4 *
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/init.h>
15#include <linux/ip.h>
16#include <linux/notifier.h>
17#include <linux/netdevice.h>
18#include <linux/netfilter.h>
19#include <linux/netfilter_ipv4/ip_queue.h>
20#include <linux/netfilter_ipv4/ip_tables.h>
21#include <linux/netlink.h>
22#include <linux/spinlock.h>
23#include <linux/sysctl.h>
24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
26#include <linux/security.h>
27#include <linux/net.h>
28#include <linux/mutex.h>
29#include <linux/slab.h>
30#include <net/net_namespace.h>
31#include <net/sock.h>
32#include <net/route.h>
33#include <net/netfilter/nf_queue.h>
34#include <net/ip.h>
35
36#define IPQ_QMAX_DEFAULT 1024
37#define IPQ_PROC_FS_NAME "ip_queue"
38#define NET_IPQ_QMAX 2088
39#define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
40
41typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
42
43static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
44static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
45static DEFINE_SPINLOCK(queue_lock);
46static int peer_pid __read_mostly;
47static unsigned int copy_range __read_mostly;
48static unsigned int queue_total;
49static unsigned int queue_dropped = 0;
50static unsigned int queue_user_dropped = 0;
51static struct sock *ipqnl __read_mostly;
52static LIST_HEAD(queue_list);
53static DEFINE_MUTEX(ipqnl_mutex);
54
55static inline void
56__ipq_enqueue_entry(struct nf_queue_entry *entry)
57{
58 list_add_tail(&entry->list, &queue_list);
59 queue_total++;
60}
61
62static inline int
63__ipq_set_mode(unsigned char mode, unsigned int range)
64{
65 int status = 0;
66
67 switch(mode) {
68 case IPQ_COPY_NONE:
69 case IPQ_COPY_META:
70 copy_mode = mode;
71 copy_range = 0;
72 break;
73
74 case IPQ_COPY_PACKET:
75 if (range > 0xFFFF)
76 range = 0xFFFF;
77 copy_range = range;
78 copy_mode = mode;
79 break;
80
81 default:
82 status = -EINVAL;
83
84 }
85 return status;
86}
87
88static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
89
90static inline void
91__ipq_reset(void)
92{
93 peer_pid = 0;
94 net_disable_timestamp();
95 __ipq_set_mode(IPQ_COPY_NONE, 0);
96 __ipq_flush(NULL, 0);
97}
98
99static struct nf_queue_entry *
100ipq_find_dequeue_entry(unsigned long id)
101{
102 struct nf_queue_entry *entry = NULL, *i;
103
104 spin_lock_bh(&queue_lock);
105
106 list_for_each_entry(i, &queue_list, list) {
107 if ((unsigned long)i == id) {
108 entry = i;
109 break;
110 }
111 }
112
113 if (entry) {
114 list_del(&entry->list);
115 queue_total--;
116 }
117
118 spin_unlock_bh(&queue_lock);
119 return entry;
120}
121
122static void
123__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
124{
125 struct nf_queue_entry *entry, *next;
126
127 list_for_each_entry_safe(entry, next, &queue_list, list) {
128 if (!cmpfn || cmpfn(entry, data)) {
129 list_del(&entry->list);
130 queue_total--;
131 nf_reinject(entry, NF_DROP);
132 }
133 }
134}
135
136static void
137ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
138{
139 spin_lock_bh(&queue_lock);
140 __ipq_flush(cmpfn, data);
141 spin_unlock_bh(&queue_lock);
142}
143
144static struct sk_buff *
145ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
146{
147 sk_buff_data_t old_tail;
148 size_t size = 0;
149 size_t data_len = 0;
150 struct sk_buff *skb;
151 struct ipq_packet_msg *pmsg;
152 struct nlmsghdr *nlh;
153 struct timeval tv;
154
155 switch (ACCESS_ONCE(copy_mode)) {
156 case IPQ_COPY_META:
157 case IPQ_COPY_NONE:
158 size = NLMSG_SPACE(sizeof(*pmsg));
159 break;
160
161 case IPQ_COPY_PACKET:
162 if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
163 (*errp = skb_checksum_help(entry->skb)))
164 return NULL;
165
166 data_len = ACCESS_ONCE(copy_range);
167 if (data_len == 0 || data_len > entry->skb->len)
168 data_len = entry->skb->len;
169
170 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
171 break;
172
173 default:
174 *errp = -EINVAL;
175 return NULL;
176 }
177
178 skb = alloc_skb(size, GFP_ATOMIC);
179 if (!skb)
180 goto nlmsg_failure;
181
182 old_tail = skb->tail;
183 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
184 pmsg = NLMSG_DATA(nlh);
185 memset(pmsg, 0, sizeof(*pmsg));
186
187 pmsg->packet_id = (unsigned long )entry;
188 pmsg->data_len = data_len;
189 tv = ktime_to_timeval(entry->skb->tstamp);
190 pmsg->timestamp_sec = tv.tv_sec;
191 pmsg->timestamp_usec = tv.tv_usec;
192 pmsg->mark = entry->skb->mark;
193 pmsg->hook = entry->hook;
194 pmsg->hw_protocol = entry->skb->protocol;
195
196 if (entry->indev)
197 strcpy(pmsg->indev_name, entry->indev->name);
198 else
199 pmsg->indev_name[0] = '\0';
200
201 if (entry->outdev)
202 strcpy(pmsg->outdev_name, entry->outdev->name);
203 else
204 pmsg->outdev_name[0] = '\0';
205
206 if (entry->indev && entry->skb->dev &&
207 entry->skb->mac_header != entry->skb->network_header) {
208 pmsg->hw_type = entry->skb->dev->type;
209 pmsg->hw_addrlen = dev_parse_header(entry->skb,
210 pmsg->hw_addr);
211 }
212
213 if (data_len)
214 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
215 BUG();
216
217 nlh->nlmsg_len = skb->tail - old_tail;
218 return skb;
219
220nlmsg_failure:
221 kfree_skb(skb);
222 *errp = -EINVAL;
223 printk(KERN_ERR "ip_queue: error creating packet message\n");
224 return NULL;
225}
226
227static int
228ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
229{
230 int status = -EINVAL;
231 struct sk_buff *nskb;
232
233 if (copy_mode == IPQ_COPY_NONE)
234 return -EAGAIN;
235
236 nskb = ipq_build_packet_message(entry, &status);
237 if (nskb == NULL)
238 return status;
239
240 spin_lock_bh(&queue_lock);
241
242 if (!peer_pid)
243 goto err_out_free_nskb;
244
245 if (queue_total >= queue_maxlen) {
246 queue_dropped++;
247 status = -ENOSPC;
248 if (net_ratelimit())
249 printk (KERN_WARNING "ip_queue: full at %d entries, "
250 "dropping packets(s). Dropped: %d\n", queue_total,
251 queue_dropped);
252 goto err_out_free_nskb;
253 }
254
255 /* netlink_unicast will either free the nskb or attach it to a socket */
256 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
257 if (status < 0) {
258 queue_user_dropped++;
259 goto err_out_unlock;
260 }
261
262 __ipq_enqueue_entry(entry);
263
264 spin_unlock_bh(&queue_lock);
265 return status;
266
267err_out_free_nskb:
268 kfree_skb(nskb);
269
270err_out_unlock:
271 spin_unlock_bh(&queue_lock);
272 return status;
273}
274
275static int
276ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
277{
278 int diff;
279 struct iphdr *user_iph = (struct iphdr *)v->payload;
280 struct sk_buff *nskb;
281
282 if (v->data_len < sizeof(*user_iph))
283 return 0;
284 diff = v->data_len - e->skb->len;
285 if (diff < 0) {
286 if (pskb_trim(e->skb, v->data_len))
287 return -ENOMEM;
288 } else if (diff > 0) {
289 if (v->data_len > 0xFFFF)
290 return -EINVAL;
291 if (diff > skb_tailroom(e->skb)) {
292 nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
293 diff, GFP_ATOMIC);
294 if (!nskb) {
295 printk(KERN_WARNING "ip_queue: error "
296 "in mangle, dropping packet\n");
297 return -ENOMEM;
298 }
299 kfree_skb(e->skb);
300 e->skb = nskb;
301 }
302 skb_put(e->skb, diff);
303 }
304 if (!skb_make_writable(e->skb, v->data_len))
305 return -ENOMEM;
306 skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
307 e->skb->ip_summed = CHECKSUM_NONE;
308
309 return 0;
310}
311
312static int
313ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
314{
315 struct nf_queue_entry *entry;
316
317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
318 return -EINVAL;
319
320 entry = ipq_find_dequeue_entry(vmsg->id);
321 if (entry == NULL)
322 return -ENOENT;
323 else {
324 int verdict = vmsg->value;
325
326 if (vmsg->data_len && vmsg->data_len == len)
327 if (ipq_mangle_ipv4(vmsg, entry) < 0)
328 verdict = NF_DROP;
329
330 nf_reinject(entry, verdict);
331 return 0;
332 }
333}
334
335static int
336ipq_set_mode(unsigned char mode, unsigned int range)
337{
338 int status;
339
340 spin_lock_bh(&queue_lock);
341 status = __ipq_set_mode(mode, range);
342 spin_unlock_bh(&queue_lock);
343 return status;
344}
345
346static int
347ipq_receive_peer(struct ipq_peer_msg *pmsg,
348 unsigned char type, unsigned int len)
349{
350 int status = 0;
351
352 if (len < sizeof(*pmsg))
353 return -EINVAL;
354
355 switch (type) {
356 case IPQM_MODE:
357 status = ipq_set_mode(pmsg->msg.mode.value,
358 pmsg->msg.mode.range);
359 break;
360
361 case IPQM_VERDICT:
362 status = ipq_set_verdict(&pmsg->msg.verdict,
363 len - sizeof(*pmsg));
364 break;
365 default:
366 status = -EINVAL;
367 }
368 return status;
369}
370
371static int
372dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
373{
374 if (entry->indev)
375 if (entry->indev->ifindex == ifindex)
376 return 1;
377 if (entry->outdev)
378 if (entry->outdev->ifindex == ifindex)
379 return 1;
380#ifdef CONFIG_BRIDGE_NETFILTER
381 if (entry->skb->nf_bridge) {
382 if (entry->skb->nf_bridge->physindev &&
383 entry->skb->nf_bridge->physindev->ifindex == ifindex)
384 return 1;
385 if (entry->skb->nf_bridge->physoutdev &&
386 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
387 return 1;
388 }
389#endif
390 return 0;
391}
392
393static void
394ipq_dev_drop(int ifindex)
395{
396 ipq_flush(dev_cmp, ifindex);
397}
398
399#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
400
401static inline void
402__ipq_rcv_skb(struct sk_buff *skb)
403{
404 int status, type, pid, flags;
405 unsigned int nlmsglen, skblen;
406 struct nlmsghdr *nlh;
407 bool enable_timestamp = false;
408
409 skblen = skb->len;
410 if (skblen < sizeof(*nlh))
411 return;
412
413 nlh = nlmsg_hdr(skb);
414 nlmsglen = nlh->nlmsg_len;
415 if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
416 return;
417
418 pid = nlh->nlmsg_pid;
419 flags = nlh->nlmsg_flags;
420
421 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
422 RCV_SKB_FAIL(-EINVAL);
423
424 if (flags & MSG_TRUNC)
425 RCV_SKB_FAIL(-ECOMM);
426
427 type = nlh->nlmsg_type;
428 if (type < NLMSG_NOOP || type >= IPQM_MAX)
429 RCV_SKB_FAIL(-EINVAL);
430
431 if (type <= IPQM_BASE)
432 return;
433
434 if (!capable(CAP_NET_ADMIN))
435 RCV_SKB_FAIL(-EPERM);
436
437 spin_lock_bh(&queue_lock);
438
439 if (peer_pid) {
440 if (peer_pid != pid) {
441 spin_unlock_bh(&queue_lock);
442 RCV_SKB_FAIL(-EBUSY);
443 }
444 } else {
445 enable_timestamp = true;
446 peer_pid = pid;
447 }
448
449 spin_unlock_bh(&queue_lock);
450 if (enable_timestamp)
451 net_enable_timestamp();
452 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
453 nlmsglen - NLMSG_LENGTH(0));
454 if (status < 0)
455 RCV_SKB_FAIL(status);
456
457 if (flags & NLM_F_ACK)
458 netlink_ack(skb, nlh, 0);
459}
460
461static void
462ipq_rcv_skb(struct sk_buff *skb)
463{
464 mutex_lock(&ipqnl_mutex);
465 __ipq_rcv_skb(skb);
466 mutex_unlock(&ipqnl_mutex);
467}
468
469static int
470ipq_rcv_dev_event(struct notifier_block *this,
471 unsigned long event, void *ptr)
472{
473 struct net_device *dev = ptr;
474
475 if (!net_eq(dev_net(dev), &init_net))
476 return NOTIFY_DONE;
477
478 /* Drop any packets associated with the downed device */
479 if (event == NETDEV_DOWN)
480 ipq_dev_drop(dev->ifindex);
481 return NOTIFY_DONE;
482}
483
484static struct notifier_block ipq_dev_notifier = {
485 .notifier_call = ipq_rcv_dev_event,
486};
487
488static int
489ipq_rcv_nl_event(struct notifier_block *this,
490 unsigned long event, void *ptr)
491{
492 struct netlink_notify *n = ptr;
493
494 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
495 spin_lock_bh(&queue_lock);
496 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
497 __ipq_reset();
498 spin_unlock_bh(&queue_lock);
499 }
500 return NOTIFY_DONE;
501}
502
503static struct notifier_block ipq_nl_notifier = {
504 .notifier_call = ipq_rcv_nl_event,
505};
506
507#ifdef CONFIG_SYSCTL
508static struct ctl_table_header *ipq_sysctl_header;
509
510static ctl_table ipq_table[] = {
511 {
512 .procname = NET_IPQ_QMAX_NAME,
513 .data = &queue_maxlen,
514 .maxlen = sizeof(queue_maxlen),
515 .mode = 0644,
516 .proc_handler = proc_dointvec
517 },
518 { }
519};
520#endif
521
522#ifdef CONFIG_PROC_FS
523static int ip_queue_show(struct seq_file *m, void *v)
524{
525 spin_lock_bh(&queue_lock);
526
527 seq_printf(m,
528 "Peer PID : %d\n"
529 "Copy mode : %hu\n"
530 "Copy range : %u\n"
531 "Queue length : %u\n"
532 "Queue max. length : %u\n"
533 "Queue dropped : %u\n"
534 "Netlink dropped : %u\n",
535 peer_pid,
536 copy_mode,
537 copy_range,
538 queue_total,
539 queue_maxlen,
540 queue_dropped,
541 queue_user_dropped);
542
543 spin_unlock_bh(&queue_lock);
544 return 0;
545}
546
547static int ip_queue_open(struct inode *inode, struct file *file)
548{
549 return single_open(file, ip_queue_show, NULL);
550}
551
552static const struct file_operations ip_queue_proc_fops = {
553 .open = ip_queue_open,
554 .read = seq_read,
555 .llseek = seq_lseek,
556 .release = single_release,
557 .owner = THIS_MODULE,
558};
559#endif
560
561static const struct nf_queue_handler nfqh = {
562 .name = "ip_queue",
563 .outfn = &ipq_enqueue_packet,
564};
565
566static int __init ip_queue_init(void)
567{
568 int status = -ENOMEM;
569 struct proc_dir_entry *proc __maybe_unused;
570
571 netlink_register_notifier(&ipq_nl_notifier);
572 ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
573 ipq_rcv_skb, NULL, THIS_MODULE);
574 if (ipqnl == NULL) {
575 printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
576 goto cleanup_netlink_notifier;
577 }
578
579#ifdef CONFIG_PROC_FS
580 proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
581 &ip_queue_proc_fops);
582 if (!proc) {
583 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
584 goto cleanup_ipqnl;
585 }
586#endif
587 register_netdevice_notifier(&ipq_dev_notifier);
588#ifdef CONFIG_SYSCTL
589 ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
590#endif
591 status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh);
592 if (status < 0) {
593 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
594 goto cleanup_sysctl;
595 }
596 return status;
597
598cleanup_sysctl:
599#ifdef CONFIG_SYSCTL
600 unregister_sysctl_table(ipq_sysctl_header);
601#endif
602 unregister_netdevice_notifier(&ipq_dev_notifier);
603 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
604cleanup_ipqnl: __maybe_unused
605 netlink_kernel_release(ipqnl);
606 mutex_lock(&ipqnl_mutex);
607 mutex_unlock(&ipqnl_mutex);
608
609cleanup_netlink_notifier:
610 netlink_unregister_notifier(&ipq_nl_notifier);
611 return status;
612}
613
614static void __exit ip_queue_fini(void)
615{
616 nf_unregister_queue_handlers(&nfqh);
617
618 ipq_flush(NULL, 0);
619
620#ifdef CONFIG_SYSCTL
621 unregister_sysctl_table(ipq_sysctl_header);
622#endif
623 unregister_netdevice_notifier(&ipq_dev_notifier);
624 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
625
626 netlink_kernel_release(ipqnl);
627 mutex_lock(&ipqnl_mutex);
628 mutex_unlock(&ipqnl_mutex);
629
630 netlink_unregister_notifier(&ipq_nl_notifier);
631}
632
633MODULE_DESCRIPTION("IPv4 packet queue handler");
634MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
635MODULE_LICENSE("GPL");
636MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL);
637
638module_init(ip_queue_init);
639module_exit(ip_queue_fini);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 24e556e83a3b..170b1fdd6b72 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -153,8 +153,7 @@ ip_checkentry(const struct ipt_ip *ip)
153static unsigned int 153static unsigned int
154ipt_error(struct sk_buff *skb, const struct xt_action_param *par) 154ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
155{ 155{
156 if (net_ratelimit()) 156 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
157 pr_info("error: `%s'\n", (const char *)par->targinfo);
158 157
159 return NF_DROP; 158 return NF_DROP;
160} 159}
@@ -377,7 +376,7 @@ ipt_do_table(struct sk_buff *skb,
377 if (v < 0) { 376 if (v < 0) {
378 /* Pop from stack? */ 377 /* Pop from stack? */
379 if (v != XT_RETURN) { 378 if (v != XT_RETURN) {
380 verdict = (unsigned)(-v) - 1; 379 verdict = (unsigned int)(-v) - 1;
381 break; 380 break;
382 } 381 }
383 if (*stackptr <= origptr) { 382 if (*stackptr <= origptr) {
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a639967eb727..fe5daea5214d 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -246,8 +246,7 @@ clusterip_hashfn(const struct sk_buff *skb,
246 dport = ports[1]; 246 dport = ports[1];
247 } 247 }
248 } else { 248 } else {
249 if (net_ratelimit()) 249 net_info_ratelimited("unknown protocol %u\n", iph->protocol);
250 pr_info("unknown protocol %u\n", iph->protocol);
251 } 250 }
252 251
253 switch (config->hash_mode) { 252 switch (config->hash_mode) {
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
deleted file mode 100644
index d76d6c9ed946..000000000000
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ /dev/null
@@ -1,516 +0,0 @@
1/*
2 * This is a module which is used for logging packets.
3 */
4
5/* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/module.h>
14#include <linux/spinlock.h>
15#include <linux/skbuff.h>
16#include <linux/if_arp.h>
17#include <linux/ip.h>
18#include <net/icmp.h>
19#include <net/udp.h>
20#include <net/tcp.h>
21#include <net/route.h>
22
23#include <linux/netfilter.h>
24#include <linux/netfilter/x_tables.h>
25#include <linux/netfilter_ipv4/ipt_LOG.h>
26#include <net/netfilter/nf_log.h>
27#include <net/netfilter/xt_log.h>
28
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
31MODULE_DESCRIPTION("Xtables: IPv4 packet logging to syslog");
32
33/* One level of recursion won't kill us */
34static void dump_packet(struct sbuff *m,
35 const struct nf_loginfo *info,
36 const struct sk_buff *skb,
37 unsigned int iphoff)
38{
39 struct iphdr _iph;
40 const struct iphdr *ih;
41 unsigned int logflags;
42
43 if (info->type == NF_LOG_TYPE_LOG)
44 logflags = info->u.log.logflags;
45 else
46 logflags = NF_LOG_MASK;
47
48 ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
49 if (ih == NULL) {
50 sb_add(m, "TRUNCATED");
51 return;
52 }
53
54 /* Important fields:
55 * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
56 /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
57 sb_add(m, "SRC=%pI4 DST=%pI4 ",
58 &ih->saddr, &ih->daddr);
59
60 /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
61 sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
62 ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
63 ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
64
65 /* Max length: 6 "CE DF MF " */
66 if (ntohs(ih->frag_off) & IP_CE)
67 sb_add(m, "CE ");
68 if (ntohs(ih->frag_off) & IP_DF)
69 sb_add(m, "DF ");
70 if (ntohs(ih->frag_off) & IP_MF)
71 sb_add(m, "MF ");
72
73 /* Max length: 11 "FRAG:65535 " */
74 if (ntohs(ih->frag_off) & IP_OFFSET)
75 sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
76
77 if ((logflags & IPT_LOG_IPOPT) &&
78 ih->ihl * 4 > sizeof(struct iphdr)) {
79 const unsigned char *op;
80 unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
81 unsigned int i, optsize;
82
83 optsize = ih->ihl * 4 - sizeof(struct iphdr);
84 op = skb_header_pointer(skb, iphoff+sizeof(_iph),
85 optsize, _opt);
86 if (op == NULL) {
87 sb_add(m, "TRUNCATED");
88 return;
89 }
90
91 /* Max length: 127 "OPT (" 15*4*2chars ") " */
92 sb_add(m, "OPT (");
93 for (i = 0; i < optsize; i++)
94 sb_add(m, "%02X", op[i]);
95 sb_add(m, ") ");
96 }
97
98 switch (ih->protocol) {
99 case IPPROTO_TCP: {
100 struct tcphdr _tcph;
101 const struct tcphdr *th;
102
103 /* Max length: 10 "PROTO=TCP " */
104 sb_add(m, "PROTO=TCP ");
105
106 if (ntohs(ih->frag_off) & IP_OFFSET)
107 break;
108
109 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
110 th = skb_header_pointer(skb, iphoff + ih->ihl * 4,
111 sizeof(_tcph), &_tcph);
112 if (th == NULL) {
113 sb_add(m, "INCOMPLETE [%u bytes] ",
114 skb->len - iphoff - ih->ihl*4);
115 break;
116 }
117
118 /* Max length: 20 "SPT=65535 DPT=65535 " */
119 sb_add(m, "SPT=%u DPT=%u ",
120 ntohs(th->source), ntohs(th->dest));
121 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
122 if (logflags & IPT_LOG_TCPSEQ)
123 sb_add(m, "SEQ=%u ACK=%u ",
124 ntohl(th->seq), ntohl(th->ack_seq));
125 /* Max length: 13 "WINDOW=65535 " */
126 sb_add(m, "WINDOW=%u ", ntohs(th->window));
127 /* Max length: 9 "RES=0x3F " */
128 sb_add(m, "RES=0x%02x ", (u8)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
129 /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
130 if (th->cwr)
131 sb_add(m, "CWR ");
132 if (th->ece)
133 sb_add(m, "ECE ");
134 if (th->urg)
135 sb_add(m, "URG ");
136 if (th->ack)
137 sb_add(m, "ACK ");
138 if (th->psh)
139 sb_add(m, "PSH ");
140 if (th->rst)
141 sb_add(m, "RST ");
142 if (th->syn)
143 sb_add(m, "SYN ");
144 if (th->fin)
145 sb_add(m, "FIN ");
146 /* Max length: 11 "URGP=65535 " */
147 sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
148
149 if ((logflags & IPT_LOG_TCPOPT) &&
150 th->doff * 4 > sizeof(struct tcphdr)) {
151 unsigned char _opt[4 * 15 - sizeof(struct tcphdr)];
152 const unsigned char *op;
153 unsigned int i, optsize;
154
155 optsize = th->doff * 4 - sizeof(struct tcphdr);
156 op = skb_header_pointer(skb,
157 iphoff+ih->ihl*4+sizeof(_tcph),
158 optsize, _opt);
159 if (op == NULL) {
160 sb_add(m, "TRUNCATED");
161 return;
162 }
163
164 /* Max length: 127 "OPT (" 15*4*2chars ") " */
165 sb_add(m, "OPT (");
166 for (i = 0; i < optsize; i++)
167 sb_add(m, "%02X", op[i]);
168 sb_add(m, ") ");
169 }
170 break;
171 }
172 case IPPROTO_UDP:
173 case IPPROTO_UDPLITE: {
174 struct udphdr _udph;
175 const struct udphdr *uh;
176
177 if (ih->protocol == IPPROTO_UDP)
178 /* Max length: 10 "PROTO=UDP " */
179 sb_add(m, "PROTO=UDP " );
180 else /* Max length: 14 "PROTO=UDPLITE " */
181 sb_add(m, "PROTO=UDPLITE ");
182
183 if (ntohs(ih->frag_off) & IP_OFFSET)
184 break;
185
186 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
187 uh = skb_header_pointer(skb, iphoff+ih->ihl*4,
188 sizeof(_udph), &_udph);
189 if (uh == NULL) {
190 sb_add(m, "INCOMPLETE [%u bytes] ",
191 skb->len - iphoff - ih->ihl*4);
192 break;
193 }
194
195 /* Max length: 20 "SPT=65535 DPT=65535 " */
196 sb_add(m, "SPT=%u DPT=%u LEN=%u ",
197 ntohs(uh->source), ntohs(uh->dest),
198 ntohs(uh->len));
199 break;
200 }
201 case IPPROTO_ICMP: {
202 struct icmphdr _icmph;
203 const struct icmphdr *ich;
204 static const size_t required_len[NR_ICMP_TYPES+1]
205 = { [ICMP_ECHOREPLY] = 4,
206 [ICMP_DEST_UNREACH]
207 = 8 + sizeof(struct iphdr),
208 [ICMP_SOURCE_QUENCH]
209 = 8 + sizeof(struct iphdr),
210 [ICMP_REDIRECT]
211 = 8 + sizeof(struct iphdr),
212 [ICMP_ECHO] = 4,
213 [ICMP_TIME_EXCEEDED]
214 = 8 + sizeof(struct iphdr),
215 [ICMP_PARAMETERPROB]
216 = 8 + sizeof(struct iphdr),
217 [ICMP_TIMESTAMP] = 20,
218 [ICMP_TIMESTAMPREPLY] = 20,
219 [ICMP_ADDRESS] = 12,
220 [ICMP_ADDRESSREPLY] = 12 };
221
222 /* Max length: 11 "PROTO=ICMP " */
223 sb_add(m, "PROTO=ICMP ");
224
225 if (ntohs(ih->frag_off) & IP_OFFSET)
226 break;
227
228 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
229 ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
230 sizeof(_icmph), &_icmph);
231 if (ich == NULL) {
232 sb_add(m, "INCOMPLETE [%u bytes] ",
233 skb->len - iphoff - ih->ihl*4);
234 break;
235 }
236
237 /* Max length: 18 "TYPE=255 CODE=255 " */
238 sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
239
240 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
241 if (ich->type <= NR_ICMP_TYPES &&
242 required_len[ich->type] &&
243 skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
244 sb_add(m, "INCOMPLETE [%u bytes] ",
245 skb->len - iphoff - ih->ihl*4);
246 break;
247 }
248
249 switch (ich->type) {
250 case ICMP_ECHOREPLY:
251 case ICMP_ECHO:
252 /* Max length: 19 "ID=65535 SEQ=65535 " */
253 sb_add(m, "ID=%u SEQ=%u ",
254 ntohs(ich->un.echo.id),
255 ntohs(ich->un.echo.sequence));
256 break;
257
258 case ICMP_PARAMETERPROB:
259 /* Max length: 14 "PARAMETER=255 " */
260 sb_add(m, "PARAMETER=%u ",
261 ntohl(ich->un.gateway) >> 24);
262 break;
263 case ICMP_REDIRECT:
264 /* Max length: 24 "GATEWAY=255.255.255.255 " */
265 sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
266 /* Fall through */
267 case ICMP_DEST_UNREACH:
268 case ICMP_SOURCE_QUENCH:
269 case ICMP_TIME_EXCEEDED:
270 /* Max length: 3+maxlen */
271 if (!iphoff) { /* Only recurse once. */
272 sb_add(m, "[");
273 dump_packet(m, info, skb,
274 iphoff + ih->ihl*4+sizeof(_icmph));
275 sb_add(m, "] ");
276 }
277
278 /* Max length: 10 "MTU=65535 " */
279 if (ich->type == ICMP_DEST_UNREACH &&
280 ich->code == ICMP_FRAG_NEEDED)
281 sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
282 }
283 break;
284 }
285 /* Max Length */
286 case IPPROTO_AH: {
287 struct ip_auth_hdr _ahdr;
288 const struct ip_auth_hdr *ah;
289
290 if (ntohs(ih->frag_off) & IP_OFFSET)
291 break;
292
293 /* Max length: 9 "PROTO=AH " */
294 sb_add(m, "PROTO=AH ");
295
296 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
297 ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
298 sizeof(_ahdr), &_ahdr);
299 if (ah == NULL) {
300 sb_add(m, "INCOMPLETE [%u bytes] ",
301 skb->len - iphoff - ih->ihl*4);
302 break;
303 }
304
305 /* Length: 15 "SPI=0xF1234567 " */
306 sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
307 break;
308 }
309 case IPPROTO_ESP: {
310 struct ip_esp_hdr _esph;
311 const struct ip_esp_hdr *eh;
312
313 /* Max length: 10 "PROTO=ESP " */
314 sb_add(m, "PROTO=ESP ");
315
316 if (ntohs(ih->frag_off) & IP_OFFSET)
317 break;
318
319 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
320 eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
321 sizeof(_esph), &_esph);
322 if (eh == NULL) {
323 sb_add(m, "INCOMPLETE [%u bytes] ",
324 skb->len - iphoff - ih->ihl*4);
325 break;
326 }
327
328 /* Length: 15 "SPI=0xF1234567 " */
329 sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
330 break;
331 }
332 /* Max length: 10 "PROTO 255 " */
333 default:
334 sb_add(m, "PROTO=%u ", ih->protocol);
335 }
336
337 /* Max length: 15 "UID=4294967295 " */
338 if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
339 read_lock_bh(&skb->sk->sk_callback_lock);
340 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
341 sb_add(m, "UID=%u GID=%u ",
342 skb->sk->sk_socket->file->f_cred->fsuid,
343 skb->sk->sk_socket->file->f_cred->fsgid);
344 read_unlock_bh(&skb->sk->sk_callback_lock);
345 }
346
347 /* Max length: 16 "MARK=0xFFFFFFFF " */
348 if (!iphoff && skb->mark)
349 sb_add(m, "MARK=0x%x ", skb->mark);
350
351 /* Proto Max log string length */
352 /* IP: 40+46+6+11+127 = 230 */
353 /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
354 /* UDP: 10+max(25,20) = 35 */
355 /* UDPLITE: 14+max(25,20) = 39 */
356 /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
357 /* ESP: 10+max(25)+15 = 50 */
358 /* AH: 9+max(25)+15 = 49 */
359 /* unknown: 10 */
360
361 /* (ICMP allows recursion one level deep) */
362 /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
363 /* maxlen = 230+ 91 + 230 + 252 = 803 */
364}
365
366static void dump_mac_header(struct sbuff *m,
367 const struct nf_loginfo *info,
368 const struct sk_buff *skb)
369{
370 struct net_device *dev = skb->dev;
371 unsigned int logflags = 0;
372
373 if (info->type == NF_LOG_TYPE_LOG)
374 logflags = info->u.log.logflags;
375
376 if (!(logflags & IPT_LOG_MACDECODE))
377 goto fallback;
378
379 switch (dev->type) {
380 case ARPHRD_ETHER:
381 sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
382 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
383 ntohs(eth_hdr(skb)->h_proto));
384 return;
385 default:
386 break;
387 }
388
389fallback:
390 sb_add(m, "MAC=");
391 if (dev->hard_header_len &&
392 skb->mac_header != skb->network_header) {
393 const unsigned char *p = skb_mac_header(skb);
394 unsigned int i;
395
396 sb_add(m, "%02x", *p++);
397 for (i = 1; i < dev->hard_header_len; i++, p++)
398 sb_add(m, ":%02x", *p);
399 }
400 sb_add(m, " ");
401}
402
403static struct nf_loginfo default_loginfo = {
404 .type = NF_LOG_TYPE_LOG,
405 .u = {
406 .log = {
407 .level = 5,
408 .logflags = NF_LOG_MASK,
409 },
410 },
411};
412
413static void
414ipt_log_packet(u_int8_t pf,
415 unsigned int hooknum,
416 const struct sk_buff *skb,
417 const struct net_device *in,
418 const struct net_device *out,
419 const struct nf_loginfo *loginfo,
420 const char *prefix)
421{
422 struct sbuff *m = sb_open();
423
424 if (!loginfo)
425 loginfo = &default_loginfo;
426
427 sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
428 prefix,
429 in ? in->name : "",
430 out ? out->name : "");
431#ifdef CONFIG_BRIDGE_NETFILTER
432 if (skb->nf_bridge) {
433 const struct net_device *physindev;
434 const struct net_device *physoutdev;
435
436 physindev = skb->nf_bridge->physindev;
437 if (physindev && in != physindev)
438 sb_add(m, "PHYSIN=%s ", physindev->name);
439 physoutdev = skb->nf_bridge->physoutdev;
440 if (physoutdev && out != physoutdev)
441 sb_add(m, "PHYSOUT=%s ", physoutdev->name);
442 }
443#endif
444
445 if (in != NULL)
446 dump_mac_header(m, loginfo, skb);
447
448 dump_packet(m, loginfo, skb, 0);
449
450 sb_close(m);
451}
452
453static unsigned int
454log_tg(struct sk_buff *skb, const struct xt_action_param *par)
455{
456 const struct ipt_log_info *loginfo = par->targinfo;
457 struct nf_loginfo li;
458
459 li.type = NF_LOG_TYPE_LOG;
460 li.u.log.level = loginfo->level;
461 li.u.log.logflags = loginfo->logflags;
462
463 ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in, par->out, &li,
464 loginfo->prefix);
465 return XT_CONTINUE;
466}
467
468static int log_tg_check(const struct xt_tgchk_param *par)
469{
470 const struct ipt_log_info *loginfo = par->targinfo;
471
472 if (loginfo->level >= 8) {
473 pr_debug("level %u >= 8\n", loginfo->level);
474 return -EINVAL;
475 }
476 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
477 pr_debug("prefix is not null-terminated\n");
478 return -EINVAL;
479 }
480 return 0;
481}
482
483static struct xt_target log_tg_reg __read_mostly = {
484 .name = "LOG",
485 .family = NFPROTO_IPV4,
486 .target = log_tg,
487 .targetsize = sizeof(struct ipt_log_info),
488 .checkentry = log_tg_check,
489 .me = THIS_MODULE,
490};
491
492static struct nf_logger ipt_log_logger __read_mostly = {
493 .name = "ipt_LOG",
494 .logfn = &ipt_log_packet,
495 .me = THIS_MODULE,
496};
497
498static int __init log_tg_init(void)
499{
500 int ret;
501
502 ret = xt_register_target(&log_tg_reg);
503 if (ret < 0)
504 return ret;
505 nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
506 return 0;
507}
508
509static void __exit log_tg_exit(void)
510{
511 nf_log_unregister(&ipt_log_logger);
512 xt_unregister_target(&log_tg_reg);
513}
514
515module_init(log_tg_init);
516module_exit(log_tg_exit);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 0e58f09e59fb..851acec852d2 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -52,7 +52,7 @@ iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
52static struct nf_hook_ops *filter_ops __read_mostly; 52static struct nf_hook_ops *filter_ops __read_mostly;
53 53
54/* Default to forward because I got too much mail already. */ 54/* Default to forward because I got too much mail already. */
55static bool forward = NF_ACCEPT; 55static bool forward = true;
56module_param(forward, bool, 0000); 56module_param(forward, bool, 0000);
57 57
58static int __net_init iptable_filter_net_init(struct net *net) 58static int __net_init iptable_filter_net_init(struct net *net)
@@ -64,7 +64,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
64 return -ENOMEM; 64 return -ENOMEM;
65 /* Entry 1 is the FORWARD hook */ 65 /* Entry 1 is the FORWARD hook */
66 ((struct ipt_standard *)repl->entries)[1].target.verdict = 66 ((struct ipt_standard *)repl->entries)[1].target.verdict =
67 -forward - 1; 67 forward ? -NF_ACCEPT - 1 : -NF_DROP - 1;
68 68
69 net->ipv4.iptable_filter = 69 net->ipv4.iptable_filter =
70 ipt_register_table(net, &packet_filter, repl); 70 ipt_register_table(net, &packet_filter, repl);
@@ -88,11 +88,6 @@ static int __init iptable_filter_init(void)
88{ 88{
89 int ret; 89 int ret;
90 90
91 if (forward < 0 || forward > NF_MAX_VERDICT) {
92 pr_err("iptables forward must be 0 or 1\n");
93 return -EINVAL;
94 }
95
96 ret = register_pernet_subsys(&iptable_filter_net_ops); 91 ret = register_pernet_subsys(&iptable_filter_net_ops);
97 if (ret < 0) 92 if (ret < 0)
98 return ret; 93 return ret;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index de9da21113a1..91747d4ebc26 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -74,16 +74,24 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
74 74
75 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); 75 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
76 if (iph == NULL) 76 if (iph == NULL)
77 return -NF_DROP; 77 return -NF_ACCEPT;
78 78
79 /* Conntrack defragments packets, we might still see fragments 79 /* Conntrack defragments packets, we might still see fragments
80 * inside ICMP packets though. */ 80 * inside ICMP packets though. */
81 if (iph->frag_off & htons(IP_OFFSET)) 81 if (iph->frag_off & htons(IP_OFFSET))
82 return -NF_DROP; 82 return -NF_ACCEPT;
83 83
84 *dataoff = nhoff + (iph->ihl << 2); 84 *dataoff = nhoff + (iph->ihl << 2);
85 *protonum = iph->protocol; 85 *protonum = iph->protocol;
86 86
87 /* Check bogus IP headers */
88 if (*dataoff > skb->len) {
89 pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
90 "nhoff %u, ihl %u, skblen %u\n",
91 nhoff, iph->ihl << 2, skb->len);
92 return -NF_ACCEPT;
93 }
94
87 return NF_ACCEPT; 95 return NF_ACCEPT;
88} 96}
89 97
@@ -303,8 +311,9 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
303static int ipv4_tuple_to_nlattr(struct sk_buff *skb, 311static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
304 const struct nf_conntrack_tuple *tuple) 312 const struct nf_conntrack_tuple *tuple)
305{ 313{
306 NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip); 314 if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
307 NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip); 315 nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
316 goto nla_put_failure;
308 return 0; 317 return 0;
309 318
310nla_put_failure: 319nla_put_failure:
@@ -356,7 +365,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
356 .nla_policy = ipv4_nla_policy, 365 .nla_policy = ipv4_nla_policy,
357#endif 366#endif
358#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 367#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
359 .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path, 368 .ctl_table_path = "net/ipv4/netfilter",
360 .ctl_table = ip_ct_sysctl_table, 369 .ctl_table = ip_ct_sysctl_table,
361#endif 370#endif
362 .me = THIS_MODULE, 371 .me = THIS_MODULE,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index ab5b27a2916f..0847e373d33c 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -75,25 +75,31 @@ static int icmp_print_tuple(struct seq_file *s,
75 ntohs(tuple->src.u.icmp.id)); 75 ntohs(tuple->src.u.icmp.id));
76} 76}
77 77
78static unsigned int *icmp_get_timeouts(struct net *net)
79{
80 return &nf_ct_icmp_timeout;
81}
82
78/* Returns verdict for packet, or -1 for invalid. */ 83/* Returns verdict for packet, or -1 for invalid. */
79static int icmp_packet(struct nf_conn *ct, 84static int icmp_packet(struct nf_conn *ct,
80 const struct sk_buff *skb, 85 const struct sk_buff *skb,
81 unsigned int dataoff, 86 unsigned int dataoff,
82 enum ip_conntrack_info ctinfo, 87 enum ip_conntrack_info ctinfo,
83 u_int8_t pf, 88 u_int8_t pf,
84 unsigned int hooknum) 89 unsigned int hooknum,
90 unsigned int *timeout)
85{ 91{
86 /* Do not immediately delete the connection after the first 92 /* Do not immediately delete the connection after the first
87 successful reply to avoid excessive conntrackd traffic 93 successful reply to avoid excessive conntrackd traffic
88 and also to handle correctly ICMP echo reply duplicates. */ 94 and also to handle correctly ICMP echo reply duplicates. */
89 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout); 95 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
90 96
91 return NF_ACCEPT; 97 return NF_ACCEPT;
92} 98}
93 99
94/* Called when a new connection for this protocol found. */ 100/* Called when a new connection for this protocol found. */
95static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, 101static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
96 unsigned int dataoff) 102 unsigned int dataoff, unsigned int *timeouts)
97{ 103{
98 static const u_int8_t valid_new[] = { 104 static const u_int8_t valid_new[] = {
99 [ICMP_ECHO] = 1, 105 [ICMP_ECHO] = 1,
@@ -222,10 +228,10 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
222static int icmp_tuple_to_nlattr(struct sk_buff *skb, 228static int icmp_tuple_to_nlattr(struct sk_buff *skb,
223 const struct nf_conntrack_tuple *t) 229 const struct nf_conntrack_tuple *t)
224{ 230{
225 NLA_PUT_BE16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id); 231 if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
226 NLA_PUT_U8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type); 232 nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
227 NLA_PUT_U8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code); 233 nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
228 234 goto nla_put_failure;
229 return 0; 235 return 0;
230 236
231nla_put_failure: 237nla_put_failure:
@@ -263,6 +269,44 @@ static int icmp_nlattr_tuple_size(void)
263} 269}
264#endif 270#endif
265 271
272#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
273
274#include <linux/netfilter/nfnetlink.h>
275#include <linux/netfilter/nfnetlink_cttimeout.h>
276
277static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
278{
279 unsigned int *timeout = data;
280
281 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
282 *timeout =
283 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
284 } else {
285 /* Set default ICMP timeout. */
286 *timeout = nf_ct_icmp_timeout;
287 }
288 return 0;
289}
290
291static int
292icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
293{
294 const unsigned int *timeout = data;
295
296 if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)))
297 goto nla_put_failure;
298 return 0;
299
300nla_put_failure:
301 return -ENOSPC;
302}
303
304static const struct nla_policy
305icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
306 [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
307};
308#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
309
266#ifdef CONFIG_SYSCTL 310#ifdef CONFIG_SYSCTL
267static struct ctl_table_header *icmp_sysctl_header; 311static struct ctl_table_header *icmp_sysctl_header;
268static struct ctl_table icmp_sysctl_table[] = { 312static struct ctl_table icmp_sysctl_table[] = {
@@ -298,6 +342,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
298 .invert_tuple = icmp_invert_tuple, 342 .invert_tuple = icmp_invert_tuple,
299 .print_tuple = icmp_print_tuple, 343 .print_tuple = icmp_print_tuple,
300 .packet = icmp_packet, 344 .packet = icmp_packet,
345 .get_timeouts = icmp_get_timeouts,
301 .new = icmp_new, 346 .new = icmp_new,
302 .error = icmp_error, 347 .error = icmp_error,
303 .destroy = NULL, 348 .destroy = NULL,
@@ -308,6 +353,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
308 .nlattr_to_tuple = icmp_nlattr_to_tuple, 353 .nlattr_to_tuple = icmp_nlattr_to_tuple,
309 .nla_policy = icmp_nla_policy, 354 .nla_policy = icmp_nla_policy,
310#endif 355#endif
356#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
357 .ctnl_timeout = {
358 .nlattr_to_obj = icmp_timeout_nlattr_to_obj,
359 .obj_to_nlattr = icmp_timeout_obj_to_nlattr,
360 .nlattr_max = CTA_TIMEOUT_ICMP_MAX,
361 .obj_size = sizeof(unsigned int),
362 .nla_policy = icmp_timeout_nla_policy,
363 },
364#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
311#ifdef CONFIG_SYSCTL 365#ifdef CONFIG_SYSCTL
312 .ctl_table_header = &icmp_sysctl_header, 366 .ctl_table_header = &icmp_sysctl_header,
313 .ctl_table = icmp_sysctl_table, 367 .ctl_table = icmp_sysctl_table,
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index a708933dc230..abb52adf5acd 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -686,6 +686,11 @@ static struct pernet_operations nf_nat_net_ops = {
686 .exit = nf_nat_net_exit, 686 .exit = nf_nat_net_exit,
687}; 687};
688 688
689static struct nf_ct_helper_expectfn follow_master_nat = {
690 .name = "nat-follow-master",
691 .expectfn = nf_nat_follow_master,
692};
693
689static int __init nf_nat_init(void) 694static int __init nf_nat_init(void)
690{ 695{
691 size_t i; 696 size_t i;
@@ -717,6 +722,8 @@ static int __init nf_nat_init(void)
717 722
718 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 723 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
719 724
725 nf_ct_helper_expectfn_register(&follow_master_nat);
726
720 BUG_ON(nf_nat_seq_adjust_hook != NULL); 727 BUG_ON(nf_nat_seq_adjust_hook != NULL);
721 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); 728 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
722 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 729 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
@@ -736,6 +743,7 @@ static void __exit nf_nat_cleanup(void)
736 unregister_pernet_subsys(&nf_nat_net_ops); 743 unregister_pernet_subsys(&nf_nat_net_ops);
737 nf_ct_l3proto_put(l3proto); 744 nf_ct_l3proto_put(l3proto);
738 nf_ct_extend_unregister(&nat_extend); 745 nf_ct_extend_unregister(&nat_extend);
746 nf_ct_helper_expectfn_unregister(&follow_master_nat);
739 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); 747 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
740 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 748 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
741 RCU_INIT_POINTER(nf_ct_nat_offset, NULL); 749 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index dc1dd912baf4..cad29c121318 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -42,9 +42,7 @@ static int set_addr(struct sk_buff *skb,
42 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, 42 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
43 addroff, sizeof(buf), 43 addroff, sizeof(buf),
44 (char *) &buf, sizeof(buf))) { 44 (char *) &buf, sizeof(buf))) {
45 if (net_ratelimit()) 45 net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n");
46 pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet"
47 " error\n");
48 return -1; 46 return -1;
49 } 47 }
50 48
@@ -58,9 +56,7 @@ static int set_addr(struct sk_buff *skb,
58 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, 56 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
59 addroff, sizeof(buf), 57 addroff, sizeof(buf),
60 (char *) &buf, sizeof(buf))) { 58 (char *) &buf, sizeof(buf))) {
61 if (net_ratelimit()) 59 net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n");
62 pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet"
63 " error\n");
64 return -1; 60 return -1;
65 } 61 }
66 /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy 62 /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy
@@ -214,8 +210,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
214 210
215 /* Run out of expectations */ 211 /* Run out of expectations */
216 if (i >= H323_RTP_CHANNEL_MAX) { 212 if (i >= H323_RTP_CHANNEL_MAX) {
217 if (net_ratelimit()) 213 net_notice_ratelimited("nf_nat_h323: out of expectations\n");
218 pr_notice("nf_nat_h323: out of expectations\n");
219 return 0; 214 return 0;
220 } 215 }
221 216
@@ -244,8 +239,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
244 } 239 }
245 240
246 if (nated_port == 0) { /* No port available */ 241 if (nated_port == 0) { /* No port available */
247 if (net_ratelimit()) 242 net_notice_ratelimited("nf_nat_h323: out of RTP ports\n");
248 pr_notice("nf_nat_h323: out of RTP ports\n");
249 return 0; 243 return 0;
250 } 244 }
251 245
@@ -308,8 +302,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
308 } 302 }
309 303
310 if (nated_port == 0) { /* No port available */ 304 if (nated_port == 0) { /* No port available */
311 if (net_ratelimit()) 305 net_notice_ratelimited("nf_nat_h323: out of TCP ports\n");
312 pr_notice("nf_nat_h323: out of TCP ports\n");
313 return 0; 306 return 0;
314 } 307 }
315 308
@@ -365,8 +358,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
365 } 358 }
366 359
367 if (nated_port == 0) { /* No port available */ 360 if (nated_port == 0) { /* No port available */
368 if (net_ratelimit()) 361 net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
369 pr_notice("nf_nat_q931: out of TCP ports\n");
370 return 0; 362 return 0;
371 } 363 }
372 364
@@ -456,8 +448,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
456 } 448 }
457 449
458 if (nated_port == 0) { /* No port available */ 450 if (nated_port == 0) { /* No port available */
459 if (net_ratelimit()) 451 net_notice_ratelimited("nf_nat_ras: out of TCP ports\n");
460 pr_notice("nf_nat_ras: out of TCP ports\n");
461 return 0; 452 return 0;
462 } 453 }
463 454
@@ -545,8 +536,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
545 } 536 }
546 537
547 if (nated_port == 0) { /* No port available */ 538 if (nated_port == 0) { /* No port available */
548 if (net_ratelimit()) 539 net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
549 pr_notice("nf_nat_q931: out of TCP ports\n");
550 return 0; 540 return 0;
551 } 541 }
552 542
@@ -568,6 +558,16 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
568 return 0; 558 return 0;
569} 559}
570 560
561static struct nf_ct_helper_expectfn q931_nat = {
562 .name = "Q.931",
563 .expectfn = ip_nat_q931_expect,
564};
565
566static struct nf_ct_helper_expectfn callforwarding_nat = {
567 .name = "callforwarding",
568 .expectfn = ip_nat_callforwarding_expect,
569};
570
571/****************************************************************************/ 571/****************************************************************************/
572static int __init init(void) 572static int __init init(void)
573{ 573{
@@ -590,6 +590,8 @@ static int __init init(void)
590 RCU_INIT_POINTER(nat_h245_hook, nat_h245); 590 RCU_INIT_POINTER(nat_h245_hook, nat_h245);
591 RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding); 591 RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
592 RCU_INIT_POINTER(nat_q931_hook, nat_q931); 592 RCU_INIT_POINTER(nat_q931_hook, nat_q931);
593 nf_ct_helper_expectfn_register(&q931_nat);
594 nf_ct_helper_expectfn_register(&callforwarding_nat);
593 return 0; 595 return 0;
594} 596}
595 597
@@ -605,6 +607,8 @@ static void __exit fini(void)
605 RCU_INIT_POINTER(nat_h245_hook, NULL); 607 RCU_INIT_POINTER(nat_h245_hook, NULL);
606 RCU_INIT_POINTER(nat_callforwarding_hook, NULL); 608 RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
607 RCU_INIT_POINTER(nat_q931_hook, NULL); 609 RCU_INIT_POINTER(nat_q931_hook, NULL);
610 nf_ct_helper_expectfn_unregister(&q931_nat);
611 nf_ct_helper_expectfn_unregister(&callforwarding_nat);
608 synchronize_rcu(); 612 synchronize_rcu();
609} 613}
610 614
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index d0319f96269f..ea4a23813d26 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -283,7 +283,7 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
283 __be32 newip; 283 __be32 newip;
284 u_int16_t port; 284 u_int16_t port;
285 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 285 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
286 unsigned buflen; 286 unsigned int buflen;
287 287
288 /* Connection will come from reply */ 288 /* Connection will come from reply */
289 if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip) 289 if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip)
@@ -526,6 +526,11 @@ err1:
526 return NF_DROP; 526 return NF_DROP;
527} 527}
528 528
529static struct nf_ct_helper_expectfn sip_nat = {
530 .name = "sip",
531 .expectfn = ip_nat_sip_expected,
532};
533
529static void __exit nf_nat_sip_fini(void) 534static void __exit nf_nat_sip_fini(void)
530{ 535{
531 RCU_INIT_POINTER(nf_nat_sip_hook, NULL); 536 RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
@@ -535,6 +540,7 @@ static void __exit nf_nat_sip_fini(void)
535 RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL); 540 RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
536 RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL); 541 RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
537 RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL); 542 RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
543 nf_ct_helper_expectfn_unregister(&sip_nat);
538 synchronize_rcu(); 544 synchronize_rcu();
539} 545}
540 546
@@ -554,6 +560,7 @@ static int __init nf_nat_sip_init(void)
554 RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port); 560 RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
555 RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session); 561 RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
556 RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media); 562 RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
563 nf_ct_helper_expectfn_register(&sip_nat);
557 return 0; 564 return 0;
558} 565}
559 566
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 2133c30a4a5f..746edec8b86e 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1206,8 +1206,7 @@ static int snmp_translate(struct nf_conn *ct,
1206 1206
1207 if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), 1207 if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
1208 paylen, &map, &udph->check)) { 1208 paylen, &map, &udph->check)) {
1209 if (net_ratelimit()) 1209 net_warn_ratelimited("bsalg: parser failed\n");
1210 printk(KERN_WARNING "bsalg: parser failed\n");
1211 return NF_DROP; 1210 return NF_DROP;
1212 } 1211 }
1213 return NF_ACCEPT; 1212 return NF_ACCEPT;
@@ -1241,9 +1240,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
1241 * can mess around with the payload. 1240 * can mess around with the payload.
1242 */ 1241 */
1243 if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { 1242 if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
1244 if (net_ratelimit()) 1243 net_warn_ratelimited("SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
1245 printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", 1244 &iph->saddr, &iph->daddr);
1246 &iph->saddr, &iph->daddr);
1247 return NF_DROP; 1245 return NF_DROP;
1248 } 1246 }
1249 1247
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index b072386cee21..2c00e8bf684d 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -20,7 +20,6 @@
20 * 20 *
21 */ 21 */
22 22
23#include <asm/system.h>
24#include <linux/uaccess.h> 23#include <linux/uaccess.h>
25#include <linux/types.h> 24#include <linux/types.h>
26#include <linux/fcntl.h> 25#include <linux/fcntl.h>
@@ -52,15 +51,16 @@ static struct ping_table ping_table;
52 51
53static u16 ping_port_rover; 52static u16 ping_port_rover;
54 53
55static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask) 54static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
56{ 55{
57 int res = (num + net_hash_mix(net)) & mask; 56 int res = (num + net_hash_mix(net)) & mask;
57
58 pr_debug("hash(%d) = %d\n", num, res); 58 pr_debug("hash(%d) = %d\n", num, res);
59 return res; 59 return res;
60} 60}
61 61
62static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, 62static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
63 struct net *net, unsigned num) 63 struct net *net, unsigned int num)
64{ 64{
65 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; 65 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
66} 66}
@@ -156,7 +156,7 @@ static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr,
156 struct hlist_nulls_node *hnode; 156 struct hlist_nulls_node *hnode;
157 157
158 pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", 158 pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
159 (int)ident, &daddr, dif); 159 (int)ident, &daddr, dif);
160 read_lock_bh(&ping_table.lock); 160 read_lock_bh(&ping_table.lock);
161 161
162 ping_portaddr_for_each_entry(sk, hnode, hslot) { 162 ping_portaddr_for_each_entry(sk, hnode, hslot) {
@@ -189,7 +189,8 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
189 gid_t *high) 189 gid_t *high)
190{ 190{
191 gid_t *data = net->ipv4.sysctl_ping_group_range; 191 gid_t *data = net->ipv4.sysctl_ping_group_range;
192 unsigned seq; 192 unsigned int seq;
193
193 do { 194 do {
194 seq = read_seqbegin(&sysctl_local_ports.lock); 195 seq = read_seqbegin(&sysctl_local_ports.lock);
195 196
@@ -206,17 +207,22 @@ static int ping_init_sock(struct sock *sk)
206 gid_t range[2]; 207 gid_t range[2];
207 struct group_info *group_info = get_current_groups(); 208 struct group_info *group_info = get_current_groups();
208 int i, j, count = group_info->ngroups; 209 int i, j, count = group_info->ngroups;
210 kgid_t low, high;
209 211
210 inet_get_ping_group_range_net(net, range, range+1); 212 inet_get_ping_group_range_net(net, range, range+1);
213 low = make_kgid(&init_user_ns, range[0]);
214 high = make_kgid(&init_user_ns, range[1]);
215 if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low))
216 return -EACCES;
217
211 if (range[0] <= group && group <= range[1]) 218 if (range[0] <= group && group <= range[1])
212 return 0; 219 return 0;
213 220
214 for (i = 0; i < group_info->nblocks; i++) { 221 for (i = 0; i < group_info->nblocks; i++) {
215 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); 222 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
216
217 for (j = 0; j < cp_count; j++) { 223 for (j = 0; j < cp_count; j++) {
218 group = group_info->blocks[i][j]; 224 kgid_t gid = group_info->blocks[i][j];
219 if (range[0] <= group && group <= range[1]) 225 if (gid_lte(low, gid) && gid_lte(gid, high))
220 return 0; 226 return 0;
221 } 227 }
222 228
@@ -229,7 +235,7 @@ static int ping_init_sock(struct sock *sk)
229static void ping_close(struct sock *sk, long timeout) 235static void ping_close(struct sock *sk, long timeout)
230{ 236{
231 pr_debug("ping_close(sk=%p,sk->num=%u)\n", 237 pr_debug("ping_close(sk=%p,sk->num=%u)\n",
232 inet_sk(sk), inet_sk(sk)->inet_num); 238 inet_sk(sk), inet_sk(sk)->inet_num);
233 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); 239 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
234 240
235 sk_common_release(sk); 241 sk_common_release(sk);
@@ -252,7 +258,7 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
252 return -EINVAL; 258 return -EINVAL;
253 259
254 pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n", 260 pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
255 sk, addr->sin_addr.s_addr, ntohs(addr->sin_port)); 261 sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
256 262
257 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 263 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
258 if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) 264 if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
@@ -280,9 +286,9 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
280 } 286 }
281 287
282 pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n", 288 pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
283 (int)isk->inet_num, 289 (int)isk->inet_num,
284 &isk->inet_rcv_saddr, 290 &isk->inet_rcv_saddr,
285 (int)sk->sk_bound_dev_if); 291 (int)sk->sk_bound_dev_if);
286 292
287 err = 0; 293 err = 0;
288 if (isk->inet_rcv_saddr) 294 if (isk->inet_rcv_saddr)
@@ -335,7 +341,7 @@ void ping_err(struct sk_buff *skb, u32 info)
335 return; 341 return;
336 342
337 pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type, 343 pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
338 code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); 344 code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
339 345
340 sk = ping_v4_lookup(net, iph->daddr, iph->saddr, 346 sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
341 ntohs(icmph->un.echo.id), skb->dev->ifindex); 347 ntohs(icmph->un.echo.id), skb->dev->ifindex);
@@ -411,7 +417,7 @@ struct pingfakehdr {
411 __wsum wcheck; 417 __wsum wcheck;
412}; 418};
413 419
414static int ping_getfrag(void *from, char * to, 420static int ping_getfrag(void *from, char *to,
415 int offset, int fraglen, int odd, struct sk_buff *skb) 421 int offset, int fraglen, int odd, struct sk_buff *skb)
416{ 422{
417 struct pingfakehdr *pfh = (struct pingfakehdr *)from; 423 struct pingfakehdr *pfh = (struct pingfakehdr *)from;
@@ -556,7 +562,8 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
556 ipc.oif = inet->mc_index; 562 ipc.oif = inet->mc_index;
557 if (!saddr) 563 if (!saddr)
558 saddr = inet->mc_addr; 564 saddr = inet->mc_addr;
559 } 565 } else if (!ipc.oif)
566 ipc.oif = inet->uc_index;
560 567
561 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 568 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
562 RT_SCOPE_UNIVERSE, sk->sk_protocol, 569 RT_SCOPE_UNIVERSE, sk->sk_protocol,
@@ -678,7 +685,7 @@ out:
678static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 685static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
679{ 686{
680 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", 687 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
681 inet_sk(sk), inet_sk(sk)->inet_num, skb); 688 inet_sk(sk), inet_sk(sk)->inet_num, skb);
682 if (sock_queue_rcv_skb(sk, skb) < 0) { 689 if (sock_queue_rcv_skb(sk, skb) < 0) {
683 kfree_skb(skb); 690 kfree_skb(skb);
684 pr_debug("ping_queue_rcv_skb -> failed\n"); 691 pr_debug("ping_queue_rcv_skb -> failed\n");
@@ -704,7 +711,7 @@ void ping_rcv(struct sk_buff *skb)
704 /* We assume the packet has already been checked by icmp_rcv */ 711 /* We assume the packet has already been checked by icmp_rcv */
705 712
706 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n", 713 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n",
707 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); 714 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
708 715
709 /* Push ICMP header back */ 716 /* Push ICMP header back */
710 skb_push(skb, skb->data - (u8 *)icmph); 717 skb_push(skb, skb->data - (u8 *)icmph);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6afc807ee2ad..8af0d44e4e22 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -256,6 +256,8 @@ static const struct snmp_mib snmp4_net_list[] = {
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), 257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), 258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
259 SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL),
260 SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE),
259 SNMP_MIB_SENTINEL 261 SNMP_MIB_SENTINEL
260}; 262};
261 263
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 3ccda5ae8a27..4032b818f3e4 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -288,7 +288,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
288 read_unlock(&raw_v4_hashinfo.lock); 288 read_unlock(&raw_v4_hashinfo.lock);
289} 289}
290 290
291static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) 291static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
292{ 292{
293 /* Charge it to the socket. */ 293 /* Charge it to the socket. */
294 294
@@ -491,11 +491,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
491 if (msg->msg_namelen < sizeof(*usin)) 491 if (msg->msg_namelen < sizeof(*usin))
492 goto out; 492 goto out;
493 if (usin->sin_family != AF_INET) { 493 if (usin->sin_family != AF_INET) {
494 static int complained; 494 pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n",
495 if (!complained++) 495 __func__, current->comm);
496 printk(KERN_INFO "%s forgot to set AF_INET in "
497 "raw sendmsg. Fix it!\n",
498 current->comm);
499 err = -EAFNOSUPPORT; 496 err = -EAFNOSUPPORT;
500 if (usin->sin_family) 497 if (usin->sin_family)
501 goto out; 498 goto out;
@@ -563,7 +560,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
563 ipc.oif = inet->mc_index; 560 ipc.oif = inet->mc_index;
564 if (!saddr) 561 if (!saddr)
565 saddr = inet->mc_addr; 562 saddr = inet->mc_addr;
566 } 563 } else if (!ipc.oif)
564 ipc.oif = inet->uc_index;
567 565
568 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 566 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
569 RT_SCOPE_UNIVERSE, 567 RT_SCOPE_UNIVERSE,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 019774796174..98b30d08efe9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -62,9 +62,10 @@
62 * 2 of the License, or (at your option) any later version. 62 * 2 of the License, or (at your option) any later version.
63 */ 63 */
64 64
65#define pr_fmt(fmt) "IPv4: " fmt
66
65#include <linux/module.h> 67#include <linux/module.h>
66#include <asm/uaccess.h> 68#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h> 69#include <linux/bitops.h>
69#include <linux/types.h> 70#include <linux/types.h>
70#include <linux/kernel.h> 71#include <linux/kernel.h>
@@ -108,6 +109,7 @@
108#include <net/rtnetlink.h> 109#include <net/rtnetlink.h>
109#ifdef CONFIG_SYSCTL 110#ifdef CONFIG_SYSCTL
110#include <linux/sysctl.h> 111#include <linux/sysctl.h>
112#include <linux/kmemleak.h>
111#endif 113#endif
112#include <net/secure_seq.h> 114#include <net/secure_seq.h>
113 115
@@ -228,7 +230,7 @@ const __u8 ip_tos2prio[16] = {
228 TC_PRIO_INTERACTIVE_BULK, 230 TC_PRIO_INTERACTIVE_BULK,
229 ECN_OR_COST(INTERACTIVE_BULK) 231 ECN_OR_COST(INTERACTIVE_BULK)
230}; 232};
231 233EXPORT_SYMBOL(ip_tos2prio);
232 234
233/* 235/*
234 * Route cache. 236 * Route cache.
@@ -295,7 +297,7 @@ static inline void rt_hash_lock_init(void)
295#endif 297#endif
296 298
297static struct rt_hash_bucket *rt_hash_table __read_mostly; 299static struct rt_hash_bucket *rt_hash_table __read_mostly;
298static unsigned rt_hash_mask __read_mostly; 300static unsigned int rt_hash_mask __read_mostly;
299static unsigned int rt_hash_log __read_mostly; 301static unsigned int rt_hash_log __read_mostly;
300 302
301static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 303static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
@@ -958,8 +960,7 @@ void rt_cache_flush_batch(struct net *net)
958 960
959static void rt_emergency_hash_rebuild(struct net *net) 961static void rt_emergency_hash_rebuild(struct net *net)
960{ 962{
961 if (net_ratelimit()) 963 net_warn_ratelimited("Route hash chain too long!\n");
962 printk(KERN_WARNING "Route hash chain too long!\n");
963 rt_cache_invalidate(net); 964 rt_cache_invalidate(net);
964} 965}
965 966
@@ -1082,8 +1083,7 @@ static int rt_garbage_collect(struct dst_ops *ops)
1082 goto out; 1083 goto out;
1083 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size) 1084 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1084 goto out; 1085 goto out;
1085 if (net_ratelimit()) 1086 net_warn_ratelimited("dst cache overflow\n");
1086 printk(KERN_WARNING "dst cache overflow\n");
1087 RT_CACHE_STAT_INC(gc_dst_overflow); 1087 RT_CACHE_STAT_INC(gc_dst_overflow);
1088 return 1; 1088 return 1;
1089 1089
@@ -1116,12 +1116,17 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo
1116 static const __be32 inaddr_any = 0; 1116 static const __be32 inaddr_any = 0;
1117 struct net_device *dev = dst->dev; 1117 struct net_device *dev = dst->dev;
1118 const __be32 *pkey = daddr; 1118 const __be32 *pkey = daddr;
1119 const struct rtable *rt;
1119 struct neighbour *n; 1120 struct neighbour *n;
1120 1121
1122 rt = (const struct rtable *) dst;
1123
1121 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) 1124 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1122 pkey = &inaddr_any; 1125 pkey = &inaddr_any;
1126 else if (rt->rt_gateway)
1127 pkey = (const __be32 *) &rt->rt_gateway;
1123 1128
1124 n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey); 1129 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
1125 if (n) 1130 if (n)
1126 return n; 1131 return n;
1127 return neigh_create(&arp_tbl, pkey, dev); 1132 return neigh_create(&arp_tbl, pkey, dev);
@@ -1137,7 +1142,7 @@ static int rt_bind_neighbour(struct rtable *rt)
1137 return 0; 1142 return 0;
1138} 1143}
1139 1144
1140static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt, 1145static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
1141 struct sk_buff *skb, int ifindex) 1146 struct sk_buff *skb, int ifindex)
1142{ 1147{
1143 struct rtable *rth, *cand; 1148 struct rtable *rth, *cand;
@@ -1175,9 +1180,7 @@ restart:
1175 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) { 1180 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1176 int err = rt_bind_neighbour(rt); 1181 int err = rt_bind_neighbour(rt);
1177 if (err) { 1182 if (err) {
1178 if (net_ratelimit()) 1183 net_warn_ratelimited("Neighbour table failure & not caching routes\n");
1179 printk(KERN_WARNING
1180 "Neighbour table failure & not caching routes.\n");
1181 ip_rt_put(rt); 1184 ip_rt_put(rt);
1182 return ERR_PTR(err); 1185 return ERR_PTR(err);
1183 } 1186 }
@@ -1253,7 +1256,7 @@ restart:
1253 struct net *net = dev_net(rt->dst.dev); 1256 struct net *net = dev_net(rt->dst.dev);
1254 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1257 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1255 if (!rt_caching(net)) { 1258 if (!rt_caching(net)) {
1256 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1259 pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
1257 rt->dst.dev->name, num); 1260 rt->dst.dev->name, num);
1258 } 1261 }
1259 rt_emergency_hash_rebuild(net); 1262 rt_emergency_hash_rebuild(net);
@@ -1293,8 +1296,7 @@ restart:
1293 goto restart; 1296 goto restart;
1294 } 1297 }
1295 1298
1296 if (net_ratelimit()) 1299 net_warn_ratelimited("Neighbour table overflow\n");
1297 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1298 rt_drop(rt); 1300 rt_drop(rt);
1299 return ERR_PTR(-ENOBUFS); 1301 return ERR_PTR(-ENOBUFS);
1300 } 1302 }
@@ -1372,14 +1374,13 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1372 return; 1374 return;
1373 } 1375 }
1374 } else if (!rt) 1376 } else if (!rt)
1375 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", 1377 pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
1376 __builtin_return_address(0));
1377 1378
1378 ip_select_fb_ident(iph); 1379 ip_select_fb_ident(iph);
1379} 1380}
1380EXPORT_SYMBOL(__ip_select_ident); 1381EXPORT_SYMBOL(__ip_select_ident);
1381 1382
1382static void rt_del(unsigned hash, struct rtable *rt) 1383static void rt_del(unsigned int hash, struct rtable *rt)
1383{ 1384{
1384 struct rtable __rcu **rthp; 1385 struct rtable __rcu **rthp;
1385 struct rtable *aux; 1386 struct rtable *aux;
@@ -1497,11 +1498,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1497 1498
1498reject_redirect: 1499reject_redirect:
1499#ifdef CONFIG_IP_ROUTE_VERBOSE 1500#ifdef CONFIG_IP_ROUTE_VERBOSE
1500 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) 1501 if (IN_DEV_LOG_MARTIANS(in_dev))
1501 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n" 1502 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
1502 " Advised path = %pI4 -> %pI4\n", 1503 " Advised path = %pI4 -> %pI4\n",
1503 &old_gw, dev->name, &new_gw, 1504 &old_gw, dev->name, &new_gw,
1504 &saddr, &daddr); 1505 &saddr, &daddr);
1505#endif 1506#endif
1506 ; 1507 ;
1507} 1508}
@@ -1533,7 +1534,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1533 ip_rt_put(rt); 1534 ip_rt_put(rt);
1534 ret = NULL; 1535 ret = NULL;
1535 } else if (rt->rt_flags & RTCF_REDIRECTED) { 1536 } else if (rt->rt_flags & RTCF_REDIRECTED) {
1536 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 1537 unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1537 rt->rt_oif, 1538 rt->rt_oif,
1538 rt_genid(dev_net(dst->dev))); 1539 rt_genid(dev_net(dst->dev)));
1539 rt_del(hash, rt); 1540 rt_del(hash, rt);
@@ -1611,11 +1612,10 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1611 ++peer->rate_tokens; 1612 ++peer->rate_tokens;
1612#ifdef CONFIG_IP_ROUTE_VERBOSE 1613#ifdef CONFIG_IP_ROUTE_VERBOSE
1613 if (log_martians && 1614 if (log_martians &&
1614 peer->rate_tokens == ip_rt_redirect_number && 1615 peer->rate_tokens == ip_rt_redirect_number)
1615 net_ratelimit()) 1616 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
1616 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1617 &ip_hdr(skb)->saddr, rt->rt_iif,
1617 &ip_hdr(skb)->saddr, rt->rt_iif, 1618 &rt->rt_dst, &rt->rt_gateway);
1618 &rt->rt_dst, &rt->rt_gateway);
1619#endif 1619#endif
1620 } 1620 }
1621} 1621}
@@ -1838,9 +1838,9 @@ static void ipv4_link_failure(struct sk_buff *skb)
1838 1838
1839static int ip_rt_bug(struct sk_buff *skb) 1839static int ip_rt_bug(struct sk_buff *skb)
1840{ 1840{
1841 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n", 1841 pr_debug("%s: %pI4 -> %pI4, %s\n",
1842 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1842 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1843 skb->dev ? skb->dev->name : "?"); 1843 skb->dev ? skb->dev->name : "?");
1844 kfree_skb(skb); 1844 kfree_skb(skb);
1845 WARN_ON(1); 1845 WARN_ON(1);
1846 return 0; 1846 return 0;
@@ -2036,7 +2036,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2036 if (err < 0) 2036 if (err < 0)
2037 goto e_err; 2037 goto e_err;
2038 } 2038 }
2039 rth = rt_dst_alloc(init_net.loopback_dev, 2039 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
2040 IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 2040 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2041 if (!rth) 2041 if (!rth)
2042 goto e_nobufs; 2042 goto e_nobufs;
@@ -2100,18 +2100,13 @@ static void ip_handle_martian_source(struct net_device *dev,
2100 * RFC1812 recommendation, if source is martian, 2100 * RFC1812 recommendation, if source is martian,
2101 * the only hint is MAC header. 2101 * the only hint is MAC header.
2102 */ 2102 */
2103 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n", 2103 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
2104 &daddr, &saddr, dev->name); 2104 &daddr, &saddr, dev->name);
2105 if (dev->hard_header_len && skb_mac_header_was_set(skb)) { 2105 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
2106 int i; 2106 print_hex_dump(KERN_WARNING, "ll header: ",
2107 const unsigned char *p = skb_mac_header(skb); 2107 DUMP_PREFIX_OFFSET, 16, 1,
2108 printk(KERN_WARNING "ll header: "); 2108 skb_mac_header(skb),
2109 for (i = 0; i < dev->hard_header_len; i++, p++) { 2109 dev->hard_header_len, true);
2110 printk("%02x", *p);
2111 if (i < (dev->hard_header_len - 1))
2112 printk(":");
2113 }
2114 printk("\n");
2115 } 2110 }
2116 } 2111 }
2117#endif 2112#endif
@@ -2134,9 +2129,7 @@ static int __mkroute_input(struct sk_buff *skb,
2134 /* get a working reference to the output device */ 2129 /* get a working reference to the output device */
2135 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); 2130 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
2136 if (out_dev == NULL) { 2131 if (out_dev == NULL) {
2137 if (net_ratelimit()) 2132 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
2138 printk(KERN_CRIT "Bug in ip_route_input" \
2139 "_slow(). Please, report\n");
2140 return -EINVAL; 2133 return -EINVAL;
2141 } 2134 }
2142 2135
@@ -2216,9 +2209,9 @@ static int ip_mkroute_input(struct sk_buff *skb,
2216 struct in_device *in_dev, 2209 struct in_device *in_dev,
2217 __be32 daddr, __be32 saddr, u32 tos) 2210 __be32 daddr, __be32 saddr, u32 tos)
2218{ 2211{
2219 struct rtable* rth = NULL; 2212 struct rtable *rth = NULL;
2220 int err; 2213 int err;
2221 unsigned hash; 2214 unsigned int hash;
2222 2215
2223#ifdef CONFIG_IP_ROUTE_MULTIPATH 2216#ifdef CONFIG_IP_ROUTE_MULTIPATH
2224 if (res->fi && res->fi->fib_nhs > 1) 2217 if (res->fi && res->fi->fib_nhs > 1)
@@ -2256,13 +2249,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2256 struct fib_result res; 2249 struct fib_result res;
2257 struct in_device *in_dev = __in_dev_get_rcu(dev); 2250 struct in_device *in_dev = __in_dev_get_rcu(dev);
2258 struct flowi4 fl4; 2251 struct flowi4 fl4;
2259 unsigned flags = 0; 2252 unsigned int flags = 0;
2260 u32 itag = 0; 2253 u32 itag = 0;
2261 struct rtable * rth; 2254 struct rtable *rth;
2262 unsigned hash; 2255 unsigned int hash;
2263 __be32 spec_dst; 2256 __be32 spec_dst;
2264 int err = -EINVAL; 2257 int err = -EINVAL;
2265 struct net * net = dev_net(dev); 2258 struct net *net = dev_net(dev);
2266 2259
2267 /* IP on this device is disabled. */ 2260 /* IP on this device is disabled. */
2268 2261
@@ -2407,9 +2400,9 @@ no_route:
2407martian_destination: 2400martian_destination:
2408 RT_CACHE_STAT_INC(in_martian_dst); 2401 RT_CACHE_STAT_INC(in_martian_dst);
2409#ifdef CONFIG_IP_ROUTE_VERBOSE 2402#ifdef CONFIG_IP_ROUTE_VERBOSE
2410 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) 2403 if (IN_DEV_LOG_MARTIANS(in_dev))
2411 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n", 2404 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2412 &daddr, &saddr, dev->name); 2405 &daddr, &saddr, dev->name);
2413#endif 2406#endif
2414 2407
2415e_hostunreach: 2408e_hostunreach:
@@ -2434,8 +2427,8 @@ martian_source_keep_err:
2434int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2427int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2435 u8 tos, struct net_device *dev, bool noref) 2428 u8 tos, struct net_device *dev, bool noref)
2436{ 2429{
2437 struct rtable * rth; 2430 struct rtable *rth;
2438 unsigned hash; 2431 unsigned int hash;
2439 int iif = dev->ifindex; 2432 int iif = dev->ifindex;
2440 struct net *net; 2433 struct net *net;
2441 int res; 2434 int res;
@@ -2973,7 +2966,8 @@ static int rt_fill_info(struct net *net,
2973 r->rtm_src_len = 0; 2966 r->rtm_src_len = 0;
2974 r->rtm_tos = rt->rt_key_tos; 2967 r->rtm_tos = rt->rt_key_tos;
2975 r->rtm_table = RT_TABLE_MAIN; 2968 r->rtm_table = RT_TABLE_MAIN;
2976 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); 2969 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2970 goto nla_put_failure;
2977 r->rtm_type = rt->rt_type; 2971 r->rtm_type = rt->rt_type;
2978 r->rtm_scope = RT_SCOPE_UNIVERSE; 2972 r->rtm_scope = RT_SCOPE_UNIVERSE;
2979 r->rtm_protocol = RTPROT_UNSPEC; 2973 r->rtm_protocol = RTPROT_UNSPEC;
@@ -2981,31 +2975,38 @@ static int rt_fill_info(struct net *net,
2981 if (rt->rt_flags & RTCF_NOTIFY) 2975 if (rt->rt_flags & RTCF_NOTIFY)
2982 r->rtm_flags |= RTM_F_NOTIFY; 2976 r->rtm_flags |= RTM_F_NOTIFY;
2983 2977
2984 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst); 2978 if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
2985 2979 goto nla_put_failure;
2986 if (rt->rt_key_src) { 2980 if (rt->rt_key_src) {
2987 r->rtm_src_len = 32; 2981 r->rtm_src_len = 32;
2988 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src); 2982 if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
2983 goto nla_put_failure;
2989 } 2984 }
2990 if (rt->dst.dev) 2985 if (rt->dst.dev &&
2991 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); 2986 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2987 goto nla_put_failure;
2992#ifdef CONFIG_IP_ROUTE_CLASSID 2988#ifdef CONFIG_IP_ROUTE_CLASSID
2993 if (rt->dst.tclassid) 2989 if (rt->dst.tclassid &&
2994 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); 2990 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2991 goto nla_put_failure;
2995#endif 2992#endif
2996 if (rt_is_input_route(rt)) 2993 if (rt_is_input_route(rt)) {
2997 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2994 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
2998 else if (rt->rt_src != rt->rt_key_src) 2995 goto nla_put_failure;
2999 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); 2996 } else if (rt->rt_src != rt->rt_key_src) {
3000 2997 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
3001 if (rt->rt_dst != rt->rt_gateway) 2998 goto nla_put_failure;
3002 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); 2999 }
3000 if (rt->rt_dst != rt->rt_gateway &&
3001 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
3002 goto nla_put_failure;
3003 3003
3004 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 3004 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
3005 goto nla_put_failure; 3005 goto nla_put_failure;
3006 3006
3007 if (rt->rt_mark) 3007 if (rt->rt_mark &&
3008 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark); 3008 nla_put_be32(skb, RTA_MARK, rt->rt_mark))
3009 goto nla_put_failure;
3009 3010
3010 error = rt->dst.error; 3011 error = rt->dst.error;
3011 if (peer) { 3012 if (peer) {
@@ -3046,7 +3047,8 @@ static int rt_fill_info(struct net *net,
3046 } 3047 }
3047 } else 3048 } else
3048#endif 3049#endif
3049 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif); 3050 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
3051 goto nla_put_failure;
3050 } 3052 }
3051 3053
3052 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, 3054 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
@@ -3060,7 +3062,7 @@ nla_put_failure:
3060 return -EMSGSIZE; 3062 return -EMSGSIZE;
3061} 3063}
3062 3064
3063static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) 3065static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
3064{ 3066{
3065 struct net *net = sock_net(in_skb->sk); 3067 struct net *net = sock_net(in_skb->sk);
3066 struct rtmsg *rtm; 3068 struct rtmsg *rtm;
@@ -3335,23 +3337,6 @@ static ctl_table ipv4_route_table[] = {
3335 { } 3337 { }
3336}; 3338};
3337 3339
3338static struct ctl_table empty[1];
3339
3340static struct ctl_table ipv4_skeleton[] =
3341{
3342 { .procname = "route",
3343 .mode = 0555, .child = ipv4_route_table},
3344 { .procname = "neigh",
3345 .mode = 0555, .child = empty},
3346 { }
3347};
3348
3349static __net_initdata struct ctl_path ipv4_path[] = {
3350 { .procname = "net", },
3351 { .procname = "ipv4", },
3352 { },
3353};
3354
3355static struct ctl_table ipv4_route_flush_table[] = { 3340static struct ctl_table ipv4_route_flush_table[] = {
3356 { 3341 {
3357 .procname = "flush", 3342 .procname = "flush",
@@ -3362,13 +3347,6 @@ static struct ctl_table ipv4_route_flush_table[] = {
3362 { }, 3347 { },
3363}; 3348};
3364 3349
3365static __net_initdata struct ctl_path ipv4_route_path[] = {
3366 { .procname = "net", },
3367 { .procname = "ipv4", },
3368 { .procname = "route", },
3369 { },
3370};
3371
3372static __net_init int sysctl_route_net_init(struct net *net) 3350static __net_init int sysctl_route_net_init(struct net *net)
3373{ 3351{
3374 struct ctl_table *tbl; 3352 struct ctl_table *tbl;
@@ -3381,8 +3359,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
3381 } 3359 }
3382 tbl[0].extra1 = net; 3360 tbl[0].extra1 = net;
3383 3361
3384 net->ipv4.route_hdr = 3362 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3385 register_net_sysctl_table(net, ipv4_route_path, tbl);
3386 if (net->ipv4.route_hdr == NULL) 3363 if (net->ipv4.route_hdr == NULL)
3387 goto err_reg; 3364 goto err_reg;
3388 return 0; 3365 return 0;
@@ -3431,9 +3408,15 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3431static __initdata unsigned long rhash_entries; 3408static __initdata unsigned long rhash_entries;
3432static int __init set_rhash_entries(char *str) 3409static int __init set_rhash_entries(char *str)
3433{ 3410{
3411 ssize_t ret;
3412
3434 if (!str) 3413 if (!str)
3435 return 0; 3414 return 0;
3436 rhash_entries = simple_strtoul(str, &str, 0); 3415
3416 ret = kstrtoul(str, 0, &rhash_entries);
3417 if (ret)
3418 return 0;
3419
3437 return 1; 3420 return 1;
3438} 3421}
3439__setup("rhash_entries=", set_rhash_entries); 3422__setup("rhash_entries=", set_rhash_entries);
@@ -3469,6 +3452,7 @@ int __init ip_rt_init(void)
3469 0, 3452 0,
3470 &rt_hash_log, 3453 &rt_hash_log,
3471 &rt_hash_mask, 3454 &rt_hash_mask,
3455 0,
3472 rhash_entries ? 0 : 512 * 1024); 3456 rhash_entries ? 0 : 512 * 1024);
3473 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); 3457 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3474 rt_hash_lock_init(); 3458 rt_hash_lock_init();
@@ -3485,7 +3469,7 @@ int __init ip_rt_init(void)
3485 net_random() % ip_rt_gc_interval + ip_rt_gc_interval); 3469 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3486 3470
3487 if (ip_rt_proc_init()) 3471 if (ip_rt_proc_init())
3488 printk(KERN_ERR "Unable to create route proc files\n"); 3472 pr_err("Unable to create route proc files\n");
3489#ifdef CONFIG_XFRM 3473#ifdef CONFIG_XFRM
3490 xfrm_init(); 3474 xfrm_init();
3491 xfrm4_init(ip_rt_max_size); 3475 xfrm4_init(ip_rt_max_size);
@@ -3506,6 +3490,6 @@ int __init ip_rt_init(void)
3506 */ 3490 */
3507void __init ip_static_sysctl_init(void) 3491void __init ip_static_sysctl_init(void)
3508{ 3492{
3509 register_sysctl_paths(ipv4_path, ipv4_skeleton); 3493 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3510} 3494}
3511#endif 3495#endif
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7a7724da9bff..ef32956ed655 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -27,6 +27,7 @@
27#include <net/tcp_memcontrol.h> 27#include <net/tcp_memcontrol.h>
28 28
29static int zero; 29static int zero;
30static int two = 2;
30static int tcp_retr1_max = 255; 31static int tcp_retr1_max = 255;
31static int ip_local_port_range_min[] = { 1, 1 }; 32static int ip_local_port_range_min[] = { 1, 1 };
32static int ip_local_port_range_max[] = { 65535, 65535 }; 33static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -78,7 +79,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
78static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) 79static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
79{ 80{
80 gid_t *data = table->data; 81 gid_t *data = table->data;
81 unsigned seq; 82 unsigned int seq;
82 do { 83 do {
83 seq = read_seqbegin(&sysctl_local_ports.lock); 84 seq = read_seqbegin(&sysctl_local_ports.lock);
84 85
@@ -677,6 +678,15 @@ static struct ctl_table ipv4_table[] = {
677 .proc_handler = proc_dointvec 678 .proc_handler = proc_dointvec
678 }, 679 },
679 { 680 {
681 .procname = "tcp_early_retrans",
682 .data = &sysctl_tcp_early_retrans,
683 .maxlen = sizeof(int),
684 .mode = 0644,
685 .proc_handler = proc_dointvec_minmax,
686 .extra1 = &zero,
687 .extra2 = &two,
688 },
689 {
680 .procname = "udp_mem", 690 .procname = "udp_mem",
681 .data = &sysctl_udp_mem, 691 .data = &sysctl_udp_mem,
682 .maxlen = sizeof(sysctl_udp_mem), 692 .maxlen = sizeof(sysctl_udp_mem),
@@ -768,13 +778,6 @@ static struct ctl_table ipv4_net_table[] = {
768 { } 778 { }
769}; 779};
770 780
771struct ctl_path net_ipv4_ctl_path[] = {
772 { .procname = "net", },
773 { .procname = "ipv4", },
774 { },
775};
776EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
777
778static __net_init int ipv4_sysctl_init_net(struct net *net) 781static __net_init int ipv4_sysctl_init_net(struct net *net)
779{ 782{
780 struct ctl_table *table; 783 struct ctl_table *table;
@@ -815,8 +818,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
815 818
816 tcp_init_mem(net); 819 tcp_init_mem(net);
817 820
818 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 821 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
819 net_ipv4_ctl_path, table);
820 if (net->ipv4.ipv4_hdr == NULL) 822 if (net->ipv4.ipv4_hdr == NULL)
821 goto err_reg; 823 goto err_reg;
822 824
@@ -857,12 +859,12 @@ static __init int sysctl_ipv4_init(void)
857 if (!i->procname) 859 if (!i->procname)
858 return -EINVAL; 860 return -EINVAL;
859 861
860 hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); 862 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
861 if (hdr == NULL) 863 if (hdr == NULL)
862 return -ENOMEM; 864 return -ENOMEM;
863 865
864 if (register_pernet_subsys(&ipv4_sysctl_ops)) { 866 if (register_pernet_subsys(&ipv4_sysctl_ops)) {
865 unregister_sysctl_table(hdr); 867 unregister_net_sysctl_table(hdr);
866 return -ENOMEM; 868 return -ENOMEM;
867 } 869 }
868 870
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 22ef5f9fd2ff..3ba605f60e4e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -245,6 +245,8 @@
245 * TCP_CLOSE socket is finished 245 * TCP_CLOSE socket is finished
246 */ 246 */
247 247
248#define pr_fmt(fmt) "TCP: " fmt
249
248#include <linux/kernel.h> 250#include <linux/kernel.h>
249#include <linux/module.h> 251#include <linux/module.h>
250#include <linux/types.h> 252#include <linux/types.h>
@@ -361,6 +363,71 @@ static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
361 return period; 363 return period;
362} 364}
363 365
366/* Address-family independent initialization for a tcp_sock.
367 *
368 * NOTE: A lot of things set to zero explicitly by call to
369 * sk_alloc() so need not be done here.
370 */
371void tcp_init_sock(struct sock *sk)
372{
373 struct inet_connection_sock *icsk = inet_csk(sk);
374 struct tcp_sock *tp = tcp_sk(sk);
375
376 skb_queue_head_init(&tp->out_of_order_queue);
377 tcp_init_xmit_timers(sk);
378 tcp_prequeue_init(tp);
379
380 icsk->icsk_rto = TCP_TIMEOUT_INIT;
381 tp->mdev = TCP_TIMEOUT_INIT;
382
383 /* So many TCP implementations out there (incorrectly) count the
384 * initial SYN frame in their delayed-ACK and congestion control
385 * algorithms that we must have the following bandaid to talk
386 * efficiently to them. -DaveM
387 */
388 tp->snd_cwnd = TCP_INIT_CWND;
389
390 /* See draft-stevens-tcpca-spec-01 for discussion of the
391 * initialization of these values.
392 */
393 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
394 tp->snd_cwnd_clamp = ~0;
395 tp->mss_cache = TCP_MSS_DEFAULT;
396
397 tp->reordering = sysctl_tcp_reordering;
398 tcp_enable_early_retrans(tp);
399 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
400
401 sk->sk_state = TCP_CLOSE;
402
403 sk->sk_write_space = sk_stream_write_space;
404 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
405
406 icsk->icsk_sync_mss = tcp_sync_mss;
407
408 /* TCP Cookie Transactions */
409 if (sysctl_tcp_cookie_size > 0) {
410 /* Default, cookies without s_data_payload. */
411 tp->cookie_values =
412 kzalloc(sizeof(*tp->cookie_values),
413 sk->sk_allocation);
414 if (tp->cookie_values != NULL)
415 kref_init(&tp->cookie_values->kref);
416 }
417 /* Presumed zeroed, in order of appearance:
418 * cookie_in_always, cookie_out_never,
419 * s_data_constant, s_data_in, s_data_out
420 */
421 sk->sk_sndbuf = sysctl_tcp_wmem[1];
422 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
423
424 local_bh_disable();
425 sock_update_memcg(sk);
426 sk_sockets_allocated_inc(sk);
427 local_bh_enable();
428}
429EXPORT_SYMBOL(tcp_init_sock);
430
364/* 431/*
365 * Wait for a TCP event. 432 * Wait for a TCP event.
366 * 433 *
@@ -526,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
526 tp->pushed_seq = tp->write_seq; 593 tp->pushed_seq = tp->write_seq;
527} 594}
528 595
529static inline int forced_push(const struct tcp_sock *tp) 596static inline bool forced_push(const struct tcp_sock *tp)
530{ 597{
531 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 598 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
532} 599}
@@ -699,11 +766,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
699 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 766 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
700 if (skb) { 767 if (skb) {
701 if (sk_wmem_schedule(sk, skb->truesize)) { 768 if (sk_wmem_schedule(sk, skb->truesize)) {
769 skb_reserve(skb, sk->sk_prot->max_header);
702 /* 770 /*
703 * Make sure that we have exactly size bytes 771 * Make sure that we have exactly size bytes
704 * available to the caller, no more, no less. 772 * available to the caller, no more, no less.
705 */ 773 */
706 skb_reserve(skb, skb_tailroom(skb) - size); 774 skb->avail_size = size;
707 return skb; 775 return skb;
708 } 776 }
709 __kfree_skb(skb); 777 __kfree_skb(skb);
@@ -781,9 +849,10 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
781 while (psize > 0) { 849 while (psize > 0) {
782 struct sk_buff *skb = tcp_write_queue_tail(sk); 850 struct sk_buff *skb = tcp_write_queue_tail(sk);
783 struct page *page = pages[poffset / PAGE_SIZE]; 851 struct page *page = pages[poffset / PAGE_SIZE];
784 int copy, i, can_coalesce; 852 int copy, i;
785 int offset = poffset % PAGE_SIZE; 853 int offset = poffset % PAGE_SIZE;
786 int size = min_t(size_t, psize, PAGE_SIZE - offset); 854 int size = min_t(size_t, psize, PAGE_SIZE - offset);
855 bool can_coalesce;
787 856
788 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { 857 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
789new_segment: 858new_segment:
@@ -848,8 +917,7 @@ new_segment:
848wait_for_sndbuf: 917wait_for_sndbuf:
849 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 918 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
850wait_for_memory: 919wait_for_memory:
851 if (copied) 920 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
852 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
853 921
854 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 922 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
855 goto do_error; 923 goto do_error;
@@ -858,7 +926,7 @@ wait_for_memory:
858 } 926 }
859 927
860out: 928out:
861 if (copied) 929 if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
862 tcp_push(sk, flags, mss_now, tp->nonagle); 930 tcp_push(sk, flags, mss_now, tp->nonagle);
863 return copied; 931 return copied;
864 932
@@ -916,7 +984,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
916 struct tcp_sock *tp = tcp_sk(sk); 984 struct tcp_sock *tp = tcp_sk(sk);
917 struct sk_buff *skb; 985 struct sk_buff *skb;
918 int iovlen, flags, err, copied; 986 int iovlen, flags, err, copied;
919 int mss_now, size_goal; 987 int mss_now = 0, size_goal;
920 bool sg; 988 bool sg;
921 long timeo; 989 long timeo;
922 990
@@ -930,6 +998,19 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
930 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 998 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
931 goto out_err; 999 goto out_err;
932 1000
1001 if (unlikely(tp->repair)) {
1002 if (tp->repair_queue == TCP_RECV_QUEUE) {
1003 copied = tcp_send_rcvq(sk, msg, size);
1004 goto out;
1005 }
1006
1007 err = -EINVAL;
1008 if (tp->repair_queue == TCP_NO_QUEUE)
1009 goto out_err;
1010
1011 /* 'common' sending to sendq */
1012 }
1013
933 /* This should be in poll */ 1014 /* This should be in poll */
934 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1015 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
935 1016
@@ -993,15 +1074,14 @@ new_segment:
993 copy = seglen; 1074 copy = seglen;
994 1075
995 /* Where to copy to? */ 1076 /* Where to copy to? */
996 if (skb_tailroom(skb) > 0) { 1077 if (skb_availroom(skb) > 0) {
997 /* We have some space in skb head. Superb! */ 1078 /* We have some space in skb head. Superb! */
998 if (copy > skb_tailroom(skb)) 1079 copy = min_t(int, copy, skb_availroom(skb));
999 copy = skb_tailroom(skb);
1000 err = skb_add_data_nocache(sk, skb, from, copy); 1080 err = skb_add_data_nocache(sk, skb, from, copy);
1001 if (err) 1081 if (err)
1002 goto do_fault; 1082 goto do_fault;
1003 } else { 1083 } else {
1004 int merge = 0; 1084 bool merge = false;
1005 int i = skb_shinfo(skb)->nr_frags; 1085 int i = skb_shinfo(skb)->nr_frags;
1006 struct page *page = sk->sk_sndmsg_page; 1086 struct page *page = sk->sk_sndmsg_page;
1007 int off; 1087 int off;
@@ -1015,7 +1095,7 @@ new_segment:
1015 off != PAGE_SIZE) { 1095 off != PAGE_SIZE) {
1016 /* We can extend the last page 1096 /* We can extend the last page
1017 * fragment. */ 1097 * fragment. */
1018 merge = 1; 1098 merge = true;
1019 } else if (i == MAX_SKB_FRAGS || !sg) { 1099 } else if (i == MAX_SKB_FRAGS || !sg) {
1020 /* Need to add new fragment and cannot 1100 /* Need to add new fragment and cannot
1021 * do this because interface is non-SG, 1101 * do this because interface is non-SG,
@@ -1087,7 +1167,7 @@ new_segment:
1087 if ((seglen -= copy) == 0 && iovlen == 0) 1167 if ((seglen -= copy) == 0 && iovlen == 0)
1088 goto out; 1168 goto out;
1089 1169
1090 if (skb->len < max || (flags & MSG_OOB)) 1170 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1091 continue; 1171 continue;
1092 1172
1093 if (forced_push(tp)) { 1173 if (forced_push(tp)) {
@@ -1100,7 +1180,7 @@ new_segment:
1100wait_for_sndbuf: 1180wait_for_sndbuf:
1101 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1181 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1102wait_for_memory: 1182wait_for_memory:
1103 if (copied) 1183 if (copied && likely(!tp->repair))
1104 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 1184 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1105 1185
1106 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 1186 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
@@ -1111,7 +1191,7 @@ wait_for_memory:
1111 } 1191 }
1112 1192
1113out: 1193out:
1114 if (copied) 1194 if (copied && likely(!tp->repair))
1115 tcp_push(sk, flags, mss_now, tp->nonagle); 1195 tcp_push(sk, flags, mss_now, tp->nonagle);
1116 release_sock(sk); 1196 release_sock(sk);
1117 return copied; 1197 return copied;
@@ -1185,6 +1265,24 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1185 return -EAGAIN; 1265 return -EAGAIN;
1186} 1266}
1187 1267
1268static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1269{
1270 struct sk_buff *skb;
1271 int copied = 0, err = 0;
1272
1273 /* XXX -- need to support SO_PEEK_OFF */
1274
1275 skb_queue_walk(&sk->sk_write_queue, skb) {
1276 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
1277 if (err)
1278 break;
1279
1280 copied += skb->len;
1281 }
1282
1283 return err ?: copied;
1284}
1285
1188/* Clean up the receive buffer for full frames taken by the user, 1286/* Clean up the receive buffer for full frames taken by the user,
1189 * then send an ACK if necessary. COPIED is the number of bytes 1287 * then send an ACK if necessary. COPIED is the number of bytes
1190 * tcp_recvmsg has given to the user so far, it speeds up the 1288 * tcp_recvmsg has given to the user so far, it speeds up the
@@ -1194,7 +1292,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1194void tcp_cleanup_rbuf(struct sock *sk, int copied) 1292void tcp_cleanup_rbuf(struct sock *sk, int copied)
1195{ 1293{
1196 struct tcp_sock *tp = tcp_sk(sk); 1294 struct tcp_sock *tp = tcp_sk(sk);
1197 int time_to_ack = 0; 1295 bool time_to_ack = false;
1198 1296
1199 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1297 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1200 1298
@@ -1220,7 +1318,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1220 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1318 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1221 !icsk->icsk_ack.pingpong)) && 1319 !icsk->icsk_ack.pingpong)) &&
1222 !atomic_read(&sk->sk_rmem_alloc))) 1320 !atomic_read(&sk->sk_rmem_alloc)))
1223 time_to_ack = 1; 1321 time_to_ack = true;
1224 } 1322 }
1225 1323
1226 /* We send an ACK if we can now advertise a non-zero window 1324 /* We send an ACK if we can now advertise a non-zero window
@@ -1242,7 +1340,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1242 * "Lots" means "at least twice" here. 1340 * "Lots" means "at least twice" here.
1243 */ 1341 */
1244 if (new_window && new_window >= 2 * rcv_window_now) 1342 if (new_window && new_window >= 2 * rcv_window_now)
1245 time_to_ack = 1; 1343 time_to_ack = true;
1246 } 1344 }
1247 } 1345 }
1248 if (time_to_ack) 1346 if (time_to_ack)
@@ -1374,11 +1472,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1374 break; 1472 break;
1375 } 1473 }
1376 if (tcp_hdr(skb)->fin) { 1474 if (tcp_hdr(skb)->fin) {
1377 sk_eat_skb(sk, skb, 0); 1475 sk_eat_skb(sk, skb, false);
1378 ++seq; 1476 ++seq;
1379 break; 1477 break;
1380 } 1478 }
1381 sk_eat_skb(sk, skb, 0); 1479 sk_eat_skb(sk, skb, false);
1382 if (!desc->count) 1480 if (!desc->count)
1383 break; 1481 break;
1384 tp->copied_seq = seq; 1482 tp->copied_seq = seq;
@@ -1414,7 +1512,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1414 int target; /* Read at least this many bytes */ 1512 int target; /* Read at least this many bytes */
1415 long timeo; 1513 long timeo;
1416 struct task_struct *user_recv = NULL; 1514 struct task_struct *user_recv = NULL;
1417 int copied_early = 0; 1515 bool copied_early = false;
1418 struct sk_buff *skb; 1516 struct sk_buff *skb;
1419 u32 urg_hole = 0; 1517 u32 urg_hole = 0;
1420 1518
@@ -1430,6 +1528,21 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1430 if (flags & MSG_OOB) 1528 if (flags & MSG_OOB)
1431 goto recv_urg; 1529 goto recv_urg;
1432 1530
1531 if (unlikely(tp->repair)) {
1532 err = -EPERM;
1533 if (!(flags & MSG_PEEK))
1534 goto out;
1535
1536 if (tp->repair_queue == TCP_SEND_QUEUE)
1537 goto recv_sndq;
1538
1539 err = -EINVAL;
1540 if (tp->repair_queue == TCP_NO_QUEUE)
1541 goto out;
1542
1543 /* 'common' recv queue MSG_PEEK-ing */
1544 }
1545
1433 seq = &tp->copied_seq; 1546 seq = &tp->copied_seq;
1434 if (flags & MSG_PEEK) { 1547 if (flags & MSG_PEEK) {
1435 peek_seq = tp->copied_seq; 1548 peek_seq = tp->copied_seq;
@@ -1450,7 +1563,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1450 if ((available < target) && 1563 if ((available < target) &&
1451 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1564 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1452 !sysctl_tcp_low_latency && 1565 !sysctl_tcp_low_latency &&
1453 dma_find_channel(DMA_MEMCPY)) { 1566 net_dma_find_channel()) {
1454 preempt_enable_no_resched(); 1567 preempt_enable_no_resched();
1455 tp->ucopy.pinned_list = 1568 tp->ucopy.pinned_list =
1456 dma_pin_iovec_pages(msg->msg_iov, len); 1569 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1631,9 +1744,9 @@ do_prequeue:
1631 } 1744 }
1632 if ((flags & MSG_PEEK) && 1745 if ((flags & MSG_PEEK) &&
1633 (peek_seq - copied - urg_hole != tp->copied_seq)) { 1746 (peek_seq - copied - urg_hole != tp->copied_seq)) {
1634 if (net_ratelimit()) 1747 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1635 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", 1748 current->comm,
1636 current->comm, task_pid_nr(current)); 1749 task_pid_nr(current));
1637 peek_seq = tp->copied_seq; 1750 peek_seq = tp->copied_seq;
1638 } 1751 }
1639 continue; 1752 continue;
@@ -1665,7 +1778,7 @@ do_prequeue:
1665 if (!(flags & MSG_TRUNC)) { 1778 if (!(flags & MSG_TRUNC)) {
1666#ifdef CONFIG_NET_DMA 1779#ifdef CONFIG_NET_DMA
1667 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1780 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1668 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1781 tp->ucopy.dma_chan = net_dma_find_channel();
1669 1782
1670 if (tp->ucopy.dma_chan) { 1783 if (tp->ucopy.dma_chan) {
1671 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1784 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1675,7 +1788,8 @@ do_prequeue:
1675 1788
1676 if (tp->ucopy.dma_cookie < 0) { 1789 if (tp->ucopy.dma_cookie < 0) {
1677 1790
1678 printk(KERN_ALERT "dma_cookie < 0\n"); 1791 pr_alert("%s: dma_cookie < 0\n",
1792 __func__);
1679 1793
1680 /* Exception. Bailout! */ 1794 /* Exception. Bailout! */
1681 if (!copied) 1795 if (!copied)
@@ -1686,7 +1800,7 @@ do_prequeue:
1686 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); 1800 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1687 1801
1688 if ((offset + used) == skb->len) 1802 if ((offset + used) == skb->len)
1689 copied_early = 1; 1803 copied_early = true;
1690 1804
1691 } else 1805 } else
1692#endif 1806#endif
@@ -1720,7 +1834,7 @@ skip_copy:
1720 goto found_fin_ok; 1834 goto found_fin_ok;
1721 if (!(flags & MSG_PEEK)) { 1835 if (!(flags & MSG_PEEK)) {
1722 sk_eat_skb(sk, skb, copied_early); 1836 sk_eat_skb(sk, skb, copied_early);
1723 copied_early = 0; 1837 copied_early = false;
1724 } 1838 }
1725 continue; 1839 continue;
1726 1840
@@ -1729,7 +1843,7 @@ skip_copy:
1729 ++*seq; 1843 ++*seq;
1730 if (!(flags & MSG_PEEK)) { 1844 if (!(flags & MSG_PEEK)) {
1731 sk_eat_skb(sk, skb, copied_early); 1845 sk_eat_skb(sk, skb, copied_early);
1732 copied_early = 0; 1846 copied_early = false;
1733 } 1847 }
1734 break; 1848 break;
1735 } while (len > 0); 1849 } while (len > 0);
@@ -1780,6 +1894,10 @@ out:
1780recv_urg: 1894recv_urg:
1781 err = tcp_recv_urg(sk, msg, len, flags); 1895 err = tcp_recv_urg(sk, msg, len, flags);
1782 goto out; 1896 goto out;
1897
1898recv_sndq:
1899 err = tcp_peek_sndq(sk, msg, len);
1900 goto out;
1783} 1901}
1784EXPORT_SYMBOL(tcp_recvmsg); 1902EXPORT_SYMBOL(tcp_recvmsg);
1785 1903
@@ -1883,10 +2001,10 @@ bool tcp_check_oom(struct sock *sk, int shift)
1883 too_many_orphans = tcp_too_many_orphans(sk, shift); 2001 too_many_orphans = tcp_too_many_orphans(sk, shift);
1884 out_of_socket_memory = tcp_out_of_memory(sk); 2002 out_of_socket_memory = tcp_out_of_memory(sk);
1885 2003
1886 if (too_many_orphans && net_ratelimit()) 2004 if (too_many_orphans)
1887 pr_info("TCP: too many orphaned sockets\n"); 2005 net_info_ratelimited("too many orphaned sockets\n");
1888 if (out_of_socket_memory && net_ratelimit()) 2006 if (out_of_socket_memory)
1889 pr_info("TCP: out of memory -- consider tuning tcp_mem\n"); 2007 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
1890 return too_many_orphans || out_of_socket_memory; 2008 return too_many_orphans || out_of_socket_memory;
1891} 2009}
1892 2010
@@ -1932,7 +2050,9 @@ void tcp_close(struct sock *sk, long timeout)
1932 * advertise a zero window, then kill -9 the FTP client, wheee... 2050 * advertise a zero window, then kill -9 the FTP client, wheee...
1933 * Note: timeout is always zero in such a case. 2051 * Note: timeout is always zero in such a case.
1934 */ 2052 */
1935 if (data_was_unread) { 2053 if (unlikely(tcp_sk(sk)->repair)) {
2054 sk->sk_prot->disconnect(sk, 0);
2055 } else if (data_was_unread) {
1936 /* Unread data was tossed, zap the connection. */ 2056 /* Unread data was tossed, zap the connection. */
1937 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2057 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1938 tcp_set_state(sk, TCP_CLOSE); 2058 tcp_set_state(sk, TCP_CLOSE);
@@ -2050,7 +2170,7 @@ EXPORT_SYMBOL(tcp_close);
2050 2170
2051/* These states need RST on ABORT according to RFC793 */ 2171/* These states need RST on ABORT according to RFC793 */
2052 2172
2053static inline int tcp_need_reset(int state) 2173static inline bool tcp_need_reset(int state)
2054{ 2174{
2055 return (1 << state) & 2175 return (1 << state) &
2056 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2176 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
@@ -2071,6 +2191,8 @@ int tcp_disconnect(struct sock *sk, int flags)
2071 /* ABORT function of RFC793 */ 2191 /* ABORT function of RFC793 */
2072 if (old_state == TCP_LISTEN) { 2192 if (old_state == TCP_LISTEN) {
2073 inet_csk_listen_stop(sk); 2193 inet_csk_listen_stop(sk);
2194 } else if (unlikely(tp->repair)) {
2195 sk->sk_err = ECONNABORTED;
2074 } else if (tcp_need_reset(old_state) || 2196 } else if (tcp_need_reset(old_state) ||
2075 (tp->snd_nxt != tp->write_seq && 2197 (tp->snd_nxt != tp->write_seq &&
2076 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 2198 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
@@ -2122,6 +2244,54 @@ int tcp_disconnect(struct sock *sk, int flags)
2122} 2244}
2123EXPORT_SYMBOL(tcp_disconnect); 2245EXPORT_SYMBOL(tcp_disconnect);
2124 2246
2247static inline bool tcp_can_repair_sock(const struct sock *sk)
2248{
2249 return capable(CAP_NET_ADMIN) &&
2250 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2251}
2252
2253static int tcp_repair_options_est(struct tcp_sock *tp,
2254 struct tcp_repair_opt __user *optbuf, unsigned int len)
2255{
2256 struct tcp_repair_opt opt;
2257
2258 while (len >= sizeof(opt)) {
2259 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2260 return -EFAULT;
2261
2262 optbuf++;
2263 len -= sizeof(opt);
2264
2265 switch (opt.opt_code) {
2266 case TCPOPT_MSS:
2267 tp->rx_opt.mss_clamp = opt.opt_val;
2268 break;
2269 case TCPOPT_WINDOW:
2270 if (opt.opt_val > 14)
2271 return -EFBIG;
2272
2273 tp->rx_opt.snd_wscale = opt.opt_val;
2274 break;
2275 case TCPOPT_SACK_PERM:
2276 if (opt.opt_val != 0)
2277 return -EINVAL;
2278
2279 tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2280 if (sysctl_tcp_fack)
2281 tcp_enable_fack(tp);
2282 break;
2283 case TCPOPT_TIMESTAMP:
2284 if (opt.opt_val != 0)
2285 return -EINVAL;
2286
2287 tp->rx_opt.tstamp_ok = 1;
2288 break;
2289 }
2290 }
2291
2292 return 0;
2293}
2294
2125/* 2295/*
2126 * Socket option code for TCP. 2296 * Socket option code for TCP.
2127 */ 2297 */
@@ -2292,6 +2462,55 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2292 err = -EINVAL; 2462 err = -EINVAL;
2293 else 2463 else
2294 tp->thin_dupack = val; 2464 tp->thin_dupack = val;
2465 if (tp->thin_dupack)
2466 tcp_disable_early_retrans(tp);
2467 break;
2468
2469 case TCP_REPAIR:
2470 if (!tcp_can_repair_sock(sk))
2471 err = -EPERM;
2472 else if (val == 1) {
2473 tp->repair = 1;
2474 sk->sk_reuse = SK_FORCE_REUSE;
2475 tp->repair_queue = TCP_NO_QUEUE;
2476 } else if (val == 0) {
2477 tp->repair = 0;
2478 sk->sk_reuse = SK_NO_REUSE;
2479 tcp_send_window_probe(sk);
2480 } else
2481 err = -EINVAL;
2482
2483 break;
2484
2485 case TCP_REPAIR_QUEUE:
2486 if (!tp->repair)
2487 err = -EPERM;
2488 else if (val < TCP_QUEUES_NR)
2489 tp->repair_queue = val;
2490 else
2491 err = -EINVAL;
2492 break;
2493
2494 case TCP_QUEUE_SEQ:
2495 if (sk->sk_state != TCP_CLOSE)
2496 err = -EPERM;
2497 else if (tp->repair_queue == TCP_SEND_QUEUE)
2498 tp->write_seq = val;
2499 else if (tp->repair_queue == TCP_RECV_QUEUE)
2500 tp->rcv_nxt = val;
2501 else
2502 err = -EINVAL;
2503 break;
2504
2505 case TCP_REPAIR_OPTIONS:
2506 if (!tp->repair)
2507 err = -EINVAL;
2508 else if (sk->sk_state == TCP_ESTABLISHED)
2509 err = tcp_repair_options_est(tp,
2510 (struct tcp_repair_opt __user *)optval,
2511 optlen);
2512 else
2513 err = -EPERM;
2295 break; 2514 break;
2296 2515
2297 case TCP_CORK: 2516 case TCP_CORK:
@@ -2527,6 +2746,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2527 val = tp->mss_cache; 2746 val = tp->mss_cache;
2528 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 2747 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2529 val = tp->rx_opt.user_mss; 2748 val = tp->rx_opt.user_mss;
2749 if (tp->repair)
2750 val = tp->rx_opt.mss_clamp;
2530 break; 2751 break;
2531 case TCP_NODELAY: 2752 case TCP_NODELAY:
2532 val = !!(tp->nonagle&TCP_NAGLE_OFF); 2753 val = !!(tp->nonagle&TCP_NAGLE_OFF);
@@ -2629,6 +2850,26 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2629 val = tp->thin_dupack; 2850 val = tp->thin_dupack;
2630 break; 2851 break;
2631 2852
2853 case TCP_REPAIR:
2854 val = tp->repair;
2855 break;
2856
2857 case TCP_REPAIR_QUEUE:
2858 if (tp->repair)
2859 val = tp->repair_queue;
2860 else
2861 return -EINVAL;
2862 break;
2863
2864 case TCP_QUEUE_SEQ:
2865 if (tp->repair_queue == TCP_SEND_QUEUE)
2866 val = tp->write_seq;
2867 else if (tp->repair_queue == TCP_RECV_QUEUE)
2868 val = tp->rcv_nxt;
2869 else
2870 return -EINVAL;
2871 break;
2872
2632 case TCP_USER_TIMEOUT: 2873 case TCP_USER_TIMEOUT:
2633 val = jiffies_to_msecs(icsk->icsk_user_timeout); 2874 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2634 break; 2875 break;
@@ -2672,7 +2913,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2672{ 2913{
2673 struct sk_buff *segs = ERR_PTR(-EINVAL); 2914 struct sk_buff *segs = ERR_PTR(-EINVAL);
2674 struct tcphdr *th; 2915 struct tcphdr *th;
2675 unsigned thlen; 2916 unsigned int thlen;
2676 unsigned int seq; 2917 unsigned int seq;
2677 __be32 delta; 2918 __be32 delta;
2678 unsigned int oldlen; 2919 unsigned int oldlen;
@@ -2930,13 +3171,13 @@ out_free:
2930struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 3171struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2931{ 3172{
2932 struct tcp_md5sig_pool __percpu *pool; 3173 struct tcp_md5sig_pool __percpu *pool;
2933 int alloc = 0; 3174 bool alloc = false;
2934 3175
2935retry: 3176retry:
2936 spin_lock_bh(&tcp_md5sig_pool_lock); 3177 spin_lock_bh(&tcp_md5sig_pool_lock);
2937 pool = tcp_md5sig_pool; 3178 pool = tcp_md5sig_pool;
2938 if (tcp_md5sig_users++ == 0) { 3179 if (tcp_md5sig_users++ == 0) {
2939 alloc = 1; 3180 alloc = true;
2940 spin_unlock_bh(&tcp_md5sig_pool_lock); 3181 spin_unlock_bh(&tcp_md5sig_pool_lock);
2941 } else if (!pool) { 3182 } else if (!pool) {
2942 tcp_md5sig_users--; 3183 tcp_md5sig_users--;
@@ -3030,9 +3271,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3030 struct scatterlist sg; 3271 struct scatterlist sg;
3031 const struct tcphdr *tp = tcp_hdr(skb); 3272 const struct tcphdr *tp = tcp_hdr(skb);
3032 struct hash_desc *desc = &hp->md5_desc; 3273 struct hash_desc *desc = &hp->md5_desc;
3033 unsigned i; 3274 unsigned int i;
3034 const unsigned head_data_len = skb_headlen(skb) > header_len ? 3275 const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3035 skb_headlen(skb) - header_len : 0; 3276 skb_headlen(skb) - header_len : 0;
3036 const struct skb_shared_info *shi = skb_shinfo(skb); 3277 const struct skb_shared_info *shi = skb_shinfo(skb);
3037 struct sk_buff *frag_iter; 3278 struct sk_buff *frag_iter;
3038 3279
@@ -3220,9 +3461,15 @@ extern struct tcp_congestion_ops tcp_reno;
3220static __initdata unsigned long thash_entries; 3461static __initdata unsigned long thash_entries;
3221static int __init set_thash_entries(char *str) 3462static int __init set_thash_entries(char *str)
3222{ 3463{
3464 ssize_t ret;
3465
3223 if (!str) 3466 if (!str)
3224 return 0; 3467 return 0;
3225 thash_entries = simple_strtoul(str, &str, 0); 3468
3469 ret = kstrtoul(str, 0, &thash_entries);
3470 if (ret)
3471 return 0;
3472
3226 return 1; 3473 return 1;
3227} 3474}
3228__setup("thash_entries=", set_thash_entries); 3475__setup("thash_entries=", set_thash_entries);
@@ -3240,7 +3487,7 @@ void __init tcp_init(void)
3240{ 3487{
3241 struct sk_buff *skb = NULL; 3488 struct sk_buff *skb = NULL;
3242 unsigned long limit; 3489 unsigned long limit;
3243 int max_share, cnt; 3490 int max_rshare, max_wshare, cnt;
3244 unsigned int i; 3491 unsigned int i;
3245 unsigned long jiffy = jiffies; 3492 unsigned long jiffy = jiffies;
3246 3493
@@ -3267,6 +3514,7 @@ void __init tcp_init(void)
3267 0, 3514 0,
3268 NULL, 3515 NULL,
3269 &tcp_hashinfo.ehash_mask, 3516 &tcp_hashinfo.ehash_mask,
3517 0,
3270 thash_entries ? 0 : 512 * 1024); 3518 thash_entries ? 0 : 512 * 1024);
3271 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { 3519 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
3272 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 3520 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
@@ -3283,6 +3531,7 @@ void __init tcp_init(void)
3283 0, 3531 0,
3284 &tcp_hashinfo.bhash_size, 3532 &tcp_hashinfo.bhash_size,
3285 NULL, 3533 NULL,
3534 0,
3286 64 * 1024); 3535 64 * 1024);
3287 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 3536 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3288 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3537 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
@@ -3299,21 +3548,20 @@ void __init tcp_init(void)
3299 3548
3300 tcp_init_mem(&init_net); 3549 tcp_init_mem(&init_net);
3301 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3550 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3302 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10); 3551 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3303 limit = max(limit, 128UL); 3552 max_wshare = min(4UL*1024*1024, limit);
3304 max_share = min(4UL*1024*1024, limit); 3553 max_rshare = min(6UL*1024*1024, limit);
3305 3554
3306 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3555 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3307 sysctl_tcp_wmem[1] = 16*1024; 3556 sysctl_tcp_wmem[1] = 16*1024;
3308 sysctl_tcp_wmem[2] = max(64*1024, max_share); 3557 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3309 3558
3310 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3559 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3311 sysctl_tcp_rmem[1] = 87380; 3560 sysctl_tcp_rmem[1] = 87380;
3312 sysctl_tcp_rmem[2] = max(87380, max_share); 3561 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3313 3562
3314 printk(KERN_INFO "TCP: Hash tables configured " 3563 pr_info("Hash tables configured (established %u bind %u)\n",
3315 "(established %u bind %u)\n", 3564 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3316 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3317 3565
3318 tcp_register_congestion_control(&tcp_reno); 3566 tcp_register_congestion_control(&tcp_reno);
3319 3567
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index fc6d475f488f..04dbd7ae7c62 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -6,6 +6,8 @@
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> 6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7 */ 7 */
8 8
9#define pr_fmt(fmt) "TCP: " fmt
10
9#include <linux/module.h> 11#include <linux/module.h>
10#include <linux/mm.h> 12#include <linux/mm.h>
11#include <linux/types.h> 13#include <linux/types.h>
@@ -41,18 +43,17 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
41 43
42 /* all algorithms must implement ssthresh and cong_avoid ops */ 44 /* all algorithms must implement ssthresh and cong_avoid ops */
43 if (!ca->ssthresh || !ca->cong_avoid) { 45 if (!ca->ssthresh || !ca->cong_avoid) {
44 printk(KERN_ERR "TCP %s does not implement required ops\n", 46 pr_err("%s does not implement required ops\n", ca->name);
45 ca->name);
46 return -EINVAL; 47 return -EINVAL;
47 } 48 }
48 49
49 spin_lock(&tcp_cong_list_lock); 50 spin_lock(&tcp_cong_list_lock);
50 if (tcp_ca_find(ca->name)) { 51 if (tcp_ca_find(ca->name)) {
51 printk(KERN_NOTICE "TCP %s already registered\n", ca->name); 52 pr_notice("%s already registered\n", ca->name);
52 ret = -EEXIST; 53 ret = -EEXIST;
53 } else { 54 } else {
54 list_add_tail_rcu(&ca->list, &tcp_cong_list); 55 list_add_tail_rcu(&ca->list, &tcp_cong_list);
55 printk(KERN_INFO "TCP %s registered\n", ca->name); 56 pr_info("%s registered\n", ca->name);
56 } 57 }
57 spin_unlock(&tcp_cong_list_lock); 58 spin_unlock(&tcp_cong_list_lock);
58 59
@@ -279,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
279/* RFC2861 Check whether we are limited by application or congestion window 280/* RFC2861 Check whether we are limited by application or congestion window
280 * This is the inverse of cwnd check in tcp_tso_should_defer 281 * This is the inverse of cwnd check in tcp_tso_should_defer
281 */ 282 */
282int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) 283bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
283{ 284{
284 const struct tcp_sock *tp = tcp_sk(sk); 285 const struct tcp_sock *tp = tcp_sk(sk);
285 u32 left; 286 u32 left;
286 287
287 if (in_flight >= tp->snd_cwnd) 288 if (in_flight >= tp->snd_cwnd)
288 return 1; 289 return true;
289 290
290 left = tp->snd_cwnd - in_flight; 291 left = tp->snd_cwnd - in_flight;
291 if (sk_can_gso(sk) && 292 if (sk_can_gso(sk) &&
292 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && 293 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
293 left * tp->mss_cache < sk->sk_gso_max_size) 294 left * tp->mss_cache < sk->sk_gso_max_size)
294 return 1; 295 return true;
295 return left <= tcp_max_tso_deferred_mss(tp); 296 return left <= tcp_max_tso_deferred_mss(tp);
296} 297}
297EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); 298EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index fe3ecf484b44..57bdd17dff4d 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -15,7 +15,7 @@
15 15
16/* Tcp Hybla structure. */ 16/* Tcp Hybla structure. */
17struct hybla { 17struct hybla {
18 u8 hybla_en; 18 bool hybla_en;
19 u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ 19 u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
20 u32 rho; /* Rho parameter, integer part */ 20 u32 rho; /* Rho parameter, integer part */
21 u32 rho2; /* Rho * Rho, integer part */ 21 u32 rho2; /* Rho * Rho, integer part */
@@ -24,8 +24,7 @@ struct hybla {
24 u32 minrtt; /* Minimum smoothed round trip time value seen */ 24 u32 minrtt; /* Minimum smoothed round trip time value seen */
25}; 25};
26 26
27/* Hybla reference round trip time (default= 1/40 sec = 25 ms), 27/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
28 expressed in jiffies */
29static int rtt0 = 25; 28static int rtt0 = 25;
30module_param(rtt0, int, 0644); 29module_param(rtt0, int, 0644);
31MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); 30MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
@@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk)
39 ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8); 38 ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
40 ca->rho = ca->rho_3ls >> 3; 39 ca->rho = ca->rho_3ls >> 3;
41 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; 40 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
42 ca->rho2 = ca->rho2_7ls >>7; 41 ca->rho2 = ca->rho2_7ls >> 7;
43} 42}
44 43
45static void hybla_init(struct sock *sk) 44static void hybla_init(struct sock *sk)
@@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk)
52 ca->rho_3ls = 0; 51 ca->rho_3ls = 0;
53 ca->rho2_7ls = 0; 52 ca->rho2_7ls = 0;
54 ca->snd_cwnd_cents = 0; 53 ca->snd_cwnd_cents = 0;
55 ca->hybla_en = 1; 54 ca->hybla_en = true;
56 tp->snd_cwnd = 2; 55 tp->snd_cwnd = 2;
57 tp->snd_cwnd_clamp = 65535; 56 tp->snd_cwnd_clamp = 65535;
58 57
@@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk)
67static void hybla_state(struct sock *sk, u8 ca_state) 66static void hybla_state(struct sock *sk, u8 ca_state)
68{ 67{
69 struct hybla *ca = inet_csk_ca(sk); 68 struct hybla *ca = inet_csk_ca(sk);
69
70 ca->hybla_en = (ca_state == TCP_CA_Open); 70 ca->hybla_en = (ca_state == TCP_CA_Open);
71} 71}
72 72
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b5e315f13641..b224eb8bce8b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -61,6 +61,8 @@
61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
62 */ 62 */
63 63
64#define pr_fmt(fmt) "TCP: " fmt
65
64#include <linux/mm.h> 66#include <linux/mm.h>
65#include <linux/slab.h> 67#include <linux/slab.h>
66#include <linux/module.h> 68#include <linux/module.h>
@@ -83,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
83EXPORT_SYMBOL(sysctl_tcp_ecn); 85EXPORT_SYMBOL(sysctl_tcp_ecn);
84int sysctl_tcp_dsack __read_mostly = 1; 86int sysctl_tcp_dsack __read_mostly = 1;
85int sysctl_tcp_app_win __read_mostly = 31; 87int sysctl_tcp_app_win __read_mostly = 31;
86int sysctl_tcp_adv_win_scale __read_mostly = 2; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
88 90
89int sysctl_tcp_stdurg __read_mostly; 91int sysctl_tcp_stdurg __read_mostly;
@@ -97,6 +99,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
97 99
98int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
99int sysctl_tcp_abc __read_mostly; 101int sysctl_tcp_abc __read_mostly;
102int sysctl_tcp_early_retrans __read_mostly = 2;
100 103
101#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 104#define FLAG_DATA 0x01 /* Incoming frame contained data. */
102#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 105#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -173,7 +176,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
173static void tcp_incr_quickack(struct sock *sk) 176static void tcp_incr_quickack(struct sock *sk)
174{ 177{
175 struct inet_connection_sock *icsk = inet_csk(sk); 178 struct inet_connection_sock *icsk = inet_csk(sk);
176 unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 179 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
177 180
178 if (quickacks == 0) 181 if (quickacks == 0)
179 quickacks = 2; 182 quickacks = 2;
@@ -193,9 +196,10 @@ static void tcp_enter_quickack_mode(struct sock *sk)
193 * and the session is not interactive. 196 * and the session is not interactive.
194 */ 197 */
195 198
196static inline int tcp_in_quickack_mode(const struct sock *sk) 199static inline bool tcp_in_quickack_mode(const struct sock *sk)
197{ 200{
198 const struct inet_connection_sock *icsk = inet_csk(sk); 201 const struct inet_connection_sock *icsk = inet_csk(sk);
202
199 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 203 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
200} 204}
201 205
@@ -250,11 +254,11 @@ static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
250 tp->ecn_flags &= ~TCP_ECN_OK; 254 tp->ecn_flags &= ~TCP_ECN_OK;
251} 255}
252 256
253static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 257static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
254{ 258{
255 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 259 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
256 return 1; 260 return true;
257 return 0; 261 return false;
258} 262}
259 263
260/* Buffer size and advertised window tuning. 264/* Buffer size and advertised window tuning.
@@ -333,6 +337,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
333 incr = __tcp_grow_window(sk, skb); 337 incr = __tcp_grow_window(sk, skb);
334 338
335 if (incr) { 339 if (incr) {
340 incr = max_t(int, incr, 2 * skb->len);
336 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 341 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
337 tp->window_clamp); 342 tp->window_clamp);
338 inet_csk(sk)->icsk_ack.quick |= 1; 343 inet_csk(sk)->icsk_ack.quick |= 1;
@@ -472,8 +477,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
472 if (!win_dep) { 477 if (!win_dep) {
473 m -= (new_sample >> 3); 478 m -= (new_sample >> 3);
474 new_sample += m; 479 new_sample += m;
475 } else if (m < new_sample) 480 } else {
476 new_sample = m << 3; 481 m <<= 3;
482 if (m < new_sample)
483 new_sample = m;
484 }
477 } else { 485 } else {
478 /* No previous measure. */ 486 /* No previous measure. */
479 new_sample = m << 3; 487 new_sample = m << 3;
@@ -489,7 +497,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
489 goto new_measure; 497 goto new_measure;
490 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 498 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
491 return; 499 return;
492 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 500 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
493 501
494new_measure: 502new_measure:
495 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 503 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -900,6 +908,7 @@ static void tcp_init_metrics(struct sock *sk)
900 if (dst_metric(dst, RTAX_REORDERING) && 908 if (dst_metric(dst, RTAX_REORDERING) &&
901 tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 909 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
902 tcp_disable_fack(tp); 910 tcp_disable_fack(tp);
911 tcp_disable_early_retrans(tp);
903 tp->reordering = dst_metric(dst, RTAX_REORDERING); 912 tp->reordering = dst_metric(dst, RTAX_REORDERING);
904 } 913 }
905 914
@@ -931,7 +940,7 @@ static void tcp_init_metrics(struct sock *sk)
931 tcp_set_rto(sk); 940 tcp_set_rto(sk);
932reset: 941reset:
933 if (tp->srtt == 0) { 942 if (tp->srtt == 0) {
934 /* RFC2988bis: We've failed to get a valid RTT sample from 943 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
935 * 3WHS. This is most likely due to retransmission, 944 * 3WHS. This is most likely due to retransmission,
936 * including spurious one. Reset the RTO back to 3secs 945 * including spurious one. Reset the RTO back to 3secs
937 * from the more aggressive 1sec to avoid more spurious 946 * from the more aggressive 1sec to avoid more spurious
@@ -941,7 +950,7 @@ reset:
941 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; 950 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
942 } 951 }
943 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been 952 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
944 * retransmitted. In light of RFC2988bis' more aggressive 1sec 953 * retransmitted. In light of RFC6298 more aggressive 1sec
945 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK 954 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
946 * retransmission has occurred. 955 * retransmission has occurred.
947 */ 956 */
@@ -973,15 +982,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
973 982
974 NET_INC_STATS_BH(sock_net(sk), mib_idx); 983 NET_INC_STATS_BH(sock_net(sk), mib_idx);
975#if FASTRETRANS_DEBUG > 1 984#if FASTRETRANS_DEBUG > 1
976 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 985 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
977 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 986 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
978 tp->reordering, 987 tp->reordering,
979 tp->fackets_out, 988 tp->fackets_out,
980 tp->sacked_out, 989 tp->sacked_out,
981 tp->undo_marker ? tp->undo_retrans : 0); 990 tp->undo_marker ? tp->undo_retrans : 0);
982#endif 991#endif
983 tcp_disable_fack(tp); 992 tcp_disable_fack(tp);
984 } 993 }
994
995 if (metric > 0)
996 tcp_disable_early_retrans(tp);
985} 997}
986 998
987/* This must be called before lost_out is incremented */ 999/* This must be called before lost_out is incremented */
@@ -1112,36 +1124,36 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
1112 * the exact amount is rather hard to quantify. However, tp->max_window can 1124 * the exact amount is rather hard to quantify. However, tp->max_window can
1113 * be used as an exaggerated estimate. 1125 * be used as an exaggerated estimate.
1114 */ 1126 */
1115static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, 1127static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1116 u32 start_seq, u32 end_seq) 1128 u32 start_seq, u32 end_seq)
1117{ 1129{
1118 /* Too far in future, or reversed (interpretation is ambiguous) */ 1130 /* Too far in future, or reversed (interpretation is ambiguous) */
1119 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1131 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1120 return 0; 1132 return false;
1121 1133
1122 /* Nasty start_seq wrap-around check (see comments above) */ 1134 /* Nasty start_seq wrap-around check (see comments above) */
1123 if (!before(start_seq, tp->snd_nxt)) 1135 if (!before(start_seq, tp->snd_nxt))
1124 return 0; 1136 return false;
1125 1137
1126 /* In outstanding window? ...This is valid exit for D-SACKs too. 1138 /* In outstanding window? ...This is valid exit for D-SACKs too.
1127 * start_seq == snd_una is non-sensical (see comments above) 1139 * start_seq == snd_una is non-sensical (see comments above)
1128 */ 1140 */
1129 if (after(start_seq, tp->snd_una)) 1141 if (after(start_seq, tp->snd_una))
1130 return 1; 1142 return true;
1131 1143
1132 if (!is_dsack || !tp->undo_marker) 1144 if (!is_dsack || !tp->undo_marker)
1133 return 0; 1145 return false;
1134 1146
1135 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1147 /* ...Then it's D-SACK, and must reside below snd_una completely */
1136 if (after(end_seq, tp->snd_una)) 1148 if (after(end_seq, tp->snd_una))
1137 return 0; 1149 return false;
1138 1150
1139 if (!before(start_seq, tp->undo_marker)) 1151 if (!before(start_seq, tp->undo_marker))
1140 return 1; 1152 return true;
1141 1153
1142 /* Too old */ 1154 /* Too old */
1143 if (!after(end_seq, tp->undo_marker)) 1155 if (!after(end_seq, tp->undo_marker))
1144 return 0; 1156 return false;
1145 1157
1146 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1158 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1147 * start_seq < undo_marker and end_seq >= undo_marker. 1159 * start_seq < undo_marker and end_seq >= undo_marker.
@@ -1213,17 +1225,17 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1213 tp->lost_retrans_low = new_low_seq; 1225 tp->lost_retrans_low = new_low_seq;
1214} 1226}
1215 1227
1216static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1228static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1217 struct tcp_sack_block_wire *sp, int num_sacks, 1229 struct tcp_sack_block_wire *sp, int num_sacks,
1218 u32 prior_snd_una) 1230 u32 prior_snd_una)
1219{ 1231{
1220 struct tcp_sock *tp = tcp_sk(sk); 1232 struct tcp_sock *tp = tcp_sk(sk);
1221 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1233 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1222 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1234 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1223 int dup_sack = 0; 1235 bool dup_sack = false;
1224 1236
1225 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1237 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1226 dup_sack = 1; 1238 dup_sack = true;
1227 tcp_dsack_seen(tp); 1239 tcp_dsack_seen(tp);
1228 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1229 } else if (num_sacks > 1) { 1241 } else if (num_sacks > 1) {
@@ -1232,7 +1244,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1232 1244
1233 if (!after(end_seq_0, end_seq_1) && 1245 if (!after(end_seq_0, end_seq_1) &&
1234 !before(start_seq_0, start_seq_1)) { 1246 !before(start_seq_0, start_seq_1)) {
1235 dup_sack = 1; 1247 dup_sack = true;
1236 tcp_dsack_seen(tp); 1248 tcp_dsack_seen(tp);
1237 NET_INC_STATS_BH(sock_net(sk), 1249 NET_INC_STATS_BH(sock_net(sk),
1238 LINUX_MIB_TCPDSACKOFORECV); 1250 LINUX_MIB_TCPDSACKOFORECV);
@@ -1263,9 +1275,10 @@ struct tcp_sacktag_state {
1263 * FIXME: this could be merged to shift decision code 1275 * FIXME: this could be merged to shift decision code
1264 */ 1276 */
1265static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1277static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1266 u32 start_seq, u32 end_seq) 1278 u32 start_seq, u32 end_seq)
1267{ 1279{
1268 int in_sack, err; 1280 int err;
1281 bool in_sack;
1269 unsigned int pkt_len; 1282 unsigned int pkt_len;
1270 unsigned int mss; 1283 unsigned int mss;
1271 1284
@@ -1311,7 +1324,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1311static u8 tcp_sacktag_one(struct sock *sk, 1324static u8 tcp_sacktag_one(struct sock *sk,
1312 struct tcp_sacktag_state *state, u8 sacked, 1325 struct tcp_sacktag_state *state, u8 sacked,
1313 u32 start_seq, u32 end_seq, 1326 u32 start_seq, u32 end_seq,
1314 int dup_sack, int pcount) 1327 bool dup_sack, int pcount)
1315{ 1328{
1316 struct tcp_sock *tp = tcp_sk(sk); 1329 struct tcp_sock *tp = tcp_sk(sk);
1317 int fack_count = state->fack_count; 1330 int fack_count = state->fack_count;
@@ -1391,10 +1404,10 @@ static u8 tcp_sacktag_one(struct sock *sk,
1391/* Shift newly-SACKed bytes from this skb to the immediately previous 1404/* Shift newly-SACKed bytes from this skb to the immediately previous
1392 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1405 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1393 */ 1406 */
1394static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1407static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1395 struct tcp_sacktag_state *state, 1408 struct tcp_sacktag_state *state,
1396 unsigned int pcount, int shifted, int mss, 1409 unsigned int pcount, int shifted, int mss,
1397 int dup_sack) 1410 bool dup_sack)
1398{ 1411{
1399 struct tcp_sock *tp = tcp_sk(sk); 1412 struct tcp_sock *tp = tcp_sk(sk);
1400 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1413 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1444,7 +1457,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1444 if (skb->len > 0) { 1457 if (skb->len > 0) {
1445 BUG_ON(!tcp_skb_pcount(skb)); 1458 BUG_ON(!tcp_skb_pcount(skb));
1446 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1459 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1447 return 0; 1460 return false;
1448 } 1461 }
1449 1462
1450 /* Whole SKB was eaten :-) */ 1463 /* Whole SKB was eaten :-) */
@@ -1467,7 +1480,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1467 1480
1468 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); 1481 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
1469 1482
1470 return 1; 1483 return true;
1471} 1484}
1472 1485
1473/* I wish gso_size would have a bit more sane initialization than 1486/* I wish gso_size would have a bit more sane initialization than
@@ -1490,7 +1503,7 @@ static int skb_can_shift(const struct sk_buff *skb)
1490static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1503static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1491 struct tcp_sacktag_state *state, 1504 struct tcp_sacktag_state *state,
1492 u32 start_seq, u32 end_seq, 1505 u32 start_seq, u32 end_seq,
1493 int dup_sack) 1506 bool dup_sack)
1494{ 1507{
1495 struct tcp_sock *tp = tcp_sk(sk); 1508 struct tcp_sock *tp = tcp_sk(sk);
1496 struct sk_buff *prev; 1509 struct sk_buff *prev;
@@ -1629,14 +1642,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1629 struct tcp_sack_block *next_dup, 1642 struct tcp_sack_block *next_dup,
1630 struct tcp_sacktag_state *state, 1643 struct tcp_sacktag_state *state,
1631 u32 start_seq, u32 end_seq, 1644 u32 start_seq, u32 end_seq,
1632 int dup_sack_in) 1645 bool dup_sack_in)
1633{ 1646{
1634 struct tcp_sock *tp = tcp_sk(sk); 1647 struct tcp_sock *tp = tcp_sk(sk);
1635 struct sk_buff *tmp; 1648 struct sk_buff *tmp;
1636 1649
1637 tcp_for_write_queue_from(skb, sk) { 1650 tcp_for_write_queue_from(skb, sk) {
1638 int in_sack = 0; 1651 int in_sack = 0;
1639 int dup_sack = dup_sack_in; 1652 bool dup_sack = dup_sack_in;
1640 1653
1641 if (skb == tcp_send_head(sk)) 1654 if (skb == tcp_send_head(sk))
1642 break; 1655 break;
@@ -1651,7 +1664,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1651 next_dup->start_seq, 1664 next_dup->start_seq,
1652 next_dup->end_seq); 1665 next_dup->end_seq);
1653 if (in_sack > 0) 1666 if (in_sack > 0)
1654 dup_sack = 1; 1667 dup_sack = true;
1655 } 1668 }
1656 1669
1657 /* skb reference here is a bit tricky to get right, since 1670 /* skb reference here is a bit tricky to get right, since
@@ -1756,7 +1769,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1756 struct sk_buff *skb; 1769 struct sk_buff *skb;
1757 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1770 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1758 int used_sacks; 1771 int used_sacks;
1759 int found_dup_sack = 0; 1772 bool found_dup_sack = false;
1760 int i, j; 1773 int i, j;
1761 int first_sack_index; 1774 int first_sack_index;
1762 1775
@@ -1787,7 +1800,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1787 used_sacks = 0; 1800 used_sacks = 0;
1788 first_sack_index = 0; 1801 first_sack_index = 0;
1789 for (i = 0; i < num_sacks; i++) { 1802 for (i = 0; i < num_sacks; i++) {
1790 int dup_sack = !i && found_dup_sack; 1803 bool dup_sack = !i && found_dup_sack;
1791 1804
1792 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1805 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1793 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1806 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
@@ -1854,7 +1867,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1854 while (i < used_sacks) { 1867 while (i < used_sacks) {
1855 u32 start_seq = sp[i].start_seq; 1868 u32 start_seq = sp[i].start_seq;
1856 u32 end_seq = sp[i].end_seq; 1869 u32 end_seq = sp[i].end_seq;
1857 int dup_sack = (found_dup_sack && (i == first_sack_index)); 1870 bool dup_sack = (found_dup_sack && (i == first_sack_index));
1858 struct tcp_sack_block *next_dup = NULL; 1871 struct tcp_sack_block *next_dup = NULL;
1859 1872
1860 if (found_dup_sack && ((i + 1) == first_sack_index)) 1873 if (found_dup_sack && ((i + 1) == first_sack_index))
@@ -1956,9 +1969,9 @@ out:
1956} 1969}
1957 1970
1958/* Limits sacked_out so that sum with lost_out isn't ever larger than 1971/* Limits sacked_out so that sum with lost_out isn't ever larger than
1959 * packets_out. Returns zero if sacked_out adjustement wasn't necessary. 1972 * packets_out. Returns false if sacked_out adjustement wasn't necessary.
1960 */ 1973 */
1961static int tcp_limit_reno_sacked(struct tcp_sock *tp) 1974static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
1962{ 1975{
1963 u32 holes; 1976 u32 holes;
1964 1977
@@ -1967,9 +1980,9 @@ static int tcp_limit_reno_sacked(struct tcp_sock *tp)
1967 1980
1968 if ((tp->sacked_out + holes) > tp->packets_out) { 1981 if ((tp->sacked_out + holes) > tp->packets_out) {
1969 tp->sacked_out = tp->packets_out - holes; 1982 tp->sacked_out = tp->packets_out - holes;
1970 return 1; 1983 return true;
1971 } 1984 }
1972 return 0; 1985 return false;
1973} 1986}
1974 1987
1975/* If we receive more dupacks than we expected counting segments 1988/* If we receive more dupacks than we expected counting segments
@@ -2023,40 +2036,40 @@ static int tcp_is_sackfrto(const struct tcp_sock *tp)
2023/* F-RTO can only be used if TCP has never retransmitted anything other than 2036/* F-RTO can only be used if TCP has never retransmitted anything other than
2024 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) 2037 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
2025 */ 2038 */
2026int tcp_use_frto(struct sock *sk) 2039bool tcp_use_frto(struct sock *sk)
2027{ 2040{
2028 const struct tcp_sock *tp = tcp_sk(sk); 2041 const struct tcp_sock *tp = tcp_sk(sk);
2029 const struct inet_connection_sock *icsk = inet_csk(sk); 2042 const struct inet_connection_sock *icsk = inet_csk(sk);
2030 struct sk_buff *skb; 2043 struct sk_buff *skb;
2031 2044
2032 if (!sysctl_tcp_frto) 2045 if (!sysctl_tcp_frto)
2033 return 0; 2046 return false;
2034 2047
2035 /* MTU probe and F-RTO won't really play nicely along currently */ 2048 /* MTU probe and F-RTO won't really play nicely along currently */
2036 if (icsk->icsk_mtup.probe_size) 2049 if (icsk->icsk_mtup.probe_size)
2037 return 0; 2050 return false;
2038 2051
2039 if (tcp_is_sackfrto(tp)) 2052 if (tcp_is_sackfrto(tp))
2040 return 1; 2053 return true;
2041 2054
2042 /* Avoid expensive walking of rexmit queue if possible */ 2055 /* Avoid expensive walking of rexmit queue if possible */
2043 if (tp->retrans_out > 1) 2056 if (tp->retrans_out > 1)
2044 return 0; 2057 return false;
2045 2058
2046 skb = tcp_write_queue_head(sk); 2059 skb = tcp_write_queue_head(sk);
2047 if (tcp_skb_is_last(sk, skb)) 2060 if (tcp_skb_is_last(sk, skb))
2048 return 1; 2061 return true;
2049 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 2062 skb = tcp_write_queue_next(sk, skb); /* Skips head */
2050 tcp_for_write_queue_from(skb, sk) { 2063 tcp_for_write_queue_from(skb, sk) {
2051 if (skb == tcp_send_head(sk)) 2064 if (skb == tcp_send_head(sk))
2052 break; 2065 break;
2053 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2066 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
2054 return 0; 2067 return false;
2055 /* Short-circuit when first non-SACKed skb has been checked */ 2068 /* Short-circuit when first non-SACKed skb has been checked */
2056 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2069 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2057 break; 2070 break;
2058 } 2071 }
2059 return 1; 2072 return true;
2060} 2073}
2061 2074
2062/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO 2075/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
@@ -2292,7 +2305,7 @@ void tcp_enter_loss(struct sock *sk, int how)
2292 * 2305 *
2293 * Do processing similar to RTO timeout. 2306 * Do processing similar to RTO timeout.
2294 */ 2307 */
2295static int tcp_check_sack_reneging(struct sock *sk, int flag) 2308static bool tcp_check_sack_reneging(struct sock *sk, int flag)
2296{ 2309{
2297 if (flag & FLAG_SACK_RENEGING) { 2310 if (flag & FLAG_SACK_RENEGING) {
2298 struct inet_connection_sock *icsk = inet_csk(sk); 2311 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2303,9 +2316,9 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
2303 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 2316 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
2304 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2317 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2305 icsk->icsk_rto, TCP_RTO_MAX); 2318 icsk->icsk_rto, TCP_RTO_MAX);
2306 return 1; 2319 return true;
2307 } 2320 }
2308 return 0; 2321 return false;
2309} 2322}
2310 2323
2311static inline int tcp_fackets_out(const struct tcp_sock *tp) 2324static inline int tcp_fackets_out(const struct tcp_sock *tp)
@@ -2333,6 +2346,27 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
2333 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2346 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
2334} 2347}
2335 2348
2349static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
2350{
2351 struct tcp_sock *tp = tcp_sk(sk);
2352 unsigned long delay;
2353
2354 /* Delay early retransmit and entering fast recovery for
2355 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
2356 * available, or RTO is scheduled to fire first.
2357 */
2358 if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt)
2359 return false;
2360
2361 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
2362 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
2363 return false;
2364
2365 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX);
2366 tp->early_retrans_delayed = 1;
2367 return true;
2368}
2369
2336static inline int tcp_skb_timedout(const struct sock *sk, 2370static inline int tcp_skb_timedout(const struct sock *sk,
2337 const struct sk_buff *skb) 2371 const struct sk_buff *skb)
2338{ 2372{
@@ -2440,28 +2474,28 @@ static inline int tcp_head_timedout(const struct sock *sk)
2440 * Main question: may we further continue forward transmission 2474 * Main question: may we further continue forward transmission
2441 * with the same cwnd? 2475 * with the same cwnd?
2442 */ 2476 */
2443static int tcp_time_to_recover(struct sock *sk) 2477static bool tcp_time_to_recover(struct sock *sk, int flag)
2444{ 2478{
2445 struct tcp_sock *tp = tcp_sk(sk); 2479 struct tcp_sock *tp = tcp_sk(sk);
2446 __u32 packets_out; 2480 __u32 packets_out;
2447 2481
2448 /* Do not perform any recovery during F-RTO algorithm */ 2482 /* Do not perform any recovery during F-RTO algorithm */
2449 if (tp->frto_counter) 2483 if (tp->frto_counter)
2450 return 0; 2484 return false;
2451 2485
2452 /* Trick#1: The loss is proven. */ 2486 /* Trick#1: The loss is proven. */
2453 if (tp->lost_out) 2487 if (tp->lost_out)
2454 return 1; 2488 return true;
2455 2489
2456 /* Not-A-Trick#2 : Classic rule... */ 2490 /* Not-A-Trick#2 : Classic rule... */
2457 if (tcp_dupack_heuristics(tp) > tp->reordering) 2491 if (tcp_dupack_heuristics(tp) > tp->reordering)
2458 return 1; 2492 return true;
2459 2493
2460 /* Trick#3 : when we use RFC2988 timer restart, fast 2494 /* Trick#3 : when we use RFC2988 timer restart, fast
2461 * retransmit can be triggered by timeout of queue head. 2495 * retransmit can be triggered by timeout of queue head.
2462 */ 2496 */
2463 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) 2497 if (tcp_is_fack(tp) && tcp_head_timedout(sk))
2464 return 1; 2498 return true;
2465 2499
2466 /* Trick#4: It is still not OK... But will it be useful to delay 2500 /* Trick#4: It is still not OK... But will it be useful to delay
2467 * recovery more? 2501 * recovery more?
@@ -2473,7 +2507,7 @@ static int tcp_time_to_recover(struct sock *sk)
2473 /* We have nothing to send. This connection is limited 2507 /* We have nothing to send. This connection is limited
2474 * either by receiver window or by application. 2508 * either by receiver window or by application.
2475 */ 2509 */
2476 return 1; 2510 return true;
2477 } 2511 }
2478 2512
2479 /* If a thin stream is detected, retransmit after first 2513 /* If a thin stream is detected, retransmit after first
@@ -2484,9 +2518,19 @@ static int tcp_time_to_recover(struct sock *sk)
2484 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && 2518 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
2485 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && 2519 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
2486 tcp_is_sack(tp) && !tcp_send_head(sk)) 2520 tcp_is_sack(tp) && !tcp_send_head(sk))
2487 return 1; 2521 return true;
2488 2522
2489 return 0; 2523 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
2524 * retransmissions due to small network reorderings, we implement
2525 * Mitigation A.3 in the RFC and delay the retransmission for a short
2526 * interval if appropriate.
2527 */
2528 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
2529 (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) &&
2530 !tcp_may_send_now(sk))
2531 return !tcp_pause_early_retransmit(sk, flag);
2532
2533 return false;
2490} 2534}
2491 2535
2492/* New heuristics: it is possible only after we switched to restart timer 2536/* New heuristics: it is possible only after we switched to restart timer
@@ -2674,22 +2718,22 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2674 struct inet_sock *inet = inet_sk(sk); 2718 struct inet_sock *inet = inet_sk(sk);
2675 2719
2676 if (sk->sk_family == AF_INET) { 2720 if (sk->sk_family == AF_INET) {
2677 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2721 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2678 msg, 2722 msg,
2679 &inet->inet_daddr, ntohs(inet->inet_dport), 2723 &inet->inet_daddr, ntohs(inet->inet_dport),
2680 tp->snd_cwnd, tcp_left_out(tp), 2724 tp->snd_cwnd, tcp_left_out(tp),
2681 tp->snd_ssthresh, tp->prior_ssthresh, 2725 tp->snd_ssthresh, tp->prior_ssthresh,
2682 tp->packets_out); 2726 tp->packets_out);
2683 } 2727 }
2684#if IS_ENABLED(CONFIG_IPV6) 2728#if IS_ENABLED(CONFIG_IPV6)
2685 else if (sk->sk_family == AF_INET6) { 2729 else if (sk->sk_family == AF_INET6) {
2686 struct ipv6_pinfo *np = inet6_sk(sk); 2730 struct ipv6_pinfo *np = inet6_sk(sk);
2687 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2731 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2688 msg, 2732 msg,
2689 &np->daddr, ntohs(inet->inet_dport), 2733 &np->daddr, ntohs(inet->inet_dport),
2690 tp->snd_cwnd, tcp_left_out(tp), 2734 tp->snd_cwnd, tcp_left_out(tp),
2691 tp->snd_ssthresh, tp->prior_ssthresh, 2735 tp->snd_ssthresh, tp->prior_ssthresh,
2692 tp->packets_out); 2736 tp->packets_out);
2693 } 2737 }
2694#endif 2738#endif
2695} 2739}
@@ -2725,7 +2769,7 @@ static inline int tcp_may_undo(const struct tcp_sock *tp)
2725} 2769}
2726 2770
2727/* People celebrate: "We love our President!" */ 2771/* People celebrate: "We love our President!" */
2728static int tcp_try_undo_recovery(struct sock *sk) 2772static bool tcp_try_undo_recovery(struct sock *sk)
2729{ 2773{
2730 struct tcp_sock *tp = tcp_sk(sk); 2774 struct tcp_sock *tp = tcp_sk(sk);
2731 2775
@@ -2750,10 +2794,10 @@ static int tcp_try_undo_recovery(struct sock *sk)
2750 * is ACKed. For Reno it is MUST to prevent false 2794 * is ACKed. For Reno it is MUST to prevent false
2751 * fast retransmits (RFC2582). SACK TCP is safe. */ 2795 * fast retransmits (RFC2582). SACK TCP is safe. */
2752 tcp_moderate_cwnd(tp); 2796 tcp_moderate_cwnd(tp);
2753 return 1; 2797 return true;
2754 } 2798 }
2755 tcp_set_ca_state(sk, TCP_CA_Open); 2799 tcp_set_ca_state(sk, TCP_CA_Open);
2756 return 0; 2800 return false;
2757} 2801}
2758 2802
2759/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2803/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
@@ -2783,19 +2827,19 @@ static void tcp_try_undo_dsack(struct sock *sk)
2783 * that successive retransmissions of a segment must not advance 2827 * that successive retransmissions of a segment must not advance
2784 * retrans_stamp under any conditions. 2828 * retrans_stamp under any conditions.
2785 */ 2829 */
2786static int tcp_any_retrans_done(const struct sock *sk) 2830static bool tcp_any_retrans_done(const struct sock *sk)
2787{ 2831{
2788 const struct tcp_sock *tp = tcp_sk(sk); 2832 const struct tcp_sock *tp = tcp_sk(sk);
2789 struct sk_buff *skb; 2833 struct sk_buff *skb;
2790 2834
2791 if (tp->retrans_out) 2835 if (tp->retrans_out)
2792 return 1; 2836 return true;
2793 2837
2794 skb = tcp_write_queue_head(sk); 2838 skb = tcp_write_queue_head(sk);
2795 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2839 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2796 return 1; 2840 return true;
2797 2841
2798 return 0; 2842 return false;
2799} 2843}
2800 2844
2801/* Undo during fast recovery after partial ACK. */ 2845/* Undo during fast recovery after partial ACK. */
@@ -2829,7 +2873,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2829} 2873}
2830 2874
2831/* Undo during loss recovery after partial ACK. */ 2875/* Undo during loss recovery after partial ACK. */
2832static int tcp_try_undo_loss(struct sock *sk) 2876static bool tcp_try_undo_loss(struct sock *sk)
2833{ 2877{
2834 struct tcp_sock *tp = tcp_sk(sk); 2878 struct tcp_sock *tp = tcp_sk(sk);
2835 2879
@@ -2851,9 +2895,9 @@ static int tcp_try_undo_loss(struct sock *sk)
2851 tp->undo_marker = 0; 2895 tp->undo_marker = 0;
2852 if (tcp_is_sack(tp)) 2896 if (tcp_is_sack(tp))
2853 tcp_set_ca_state(sk, TCP_CA_Open); 2897 tcp_set_ca_state(sk, TCP_CA_Open);
2854 return 1; 2898 return true;
2855 } 2899 }
2856 return 0; 2900 return false;
2857} 2901}
2858 2902
2859static inline void tcp_complete_cwr(struct sock *sk) 2903static inline void tcp_complete_cwr(struct sock *sk)
@@ -2862,11 +2906,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
2862 2906
2863 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2907 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2864 if (tp->undo_marker) { 2908 if (tp->undo_marker) {
2865 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) 2909 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
2866 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2910 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2867 else /* PRR */ 2911 tp->snd_cwnd_stamp = tcp_time_stamp;
2912 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
2913 /* PRR algorithm. */
2868 tp->snd_cwnd = tp->snd_ssthresh; 2914 tp->snd_cwnd = tp->snd_ssthresh;
2869 tp->snd_cwnd_stamp = tcp_time_stamp; 2915 tp->snd_cwnd_stamp = tcp_time_stamp;
2916 }
2870 } 2917 }
2871 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2918 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2872} 2919}
@@ -3016,6 +3063,38 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
3016 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 3063 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
3017} 3064}
3018 3065
3066static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
3067{
3068 struct tcp_sock *tp = tcp_sk(sk);
3069 int mib_idx;
3070
3071 if (tcp_is_reno(tp))
3072 mib_idx = LINUX_MIB_TCPRENORECOVERY;
3073 else
3074 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3075
3076 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3077
3078 tp->high_seq = tp->snd_nxt;
3079 tp->prior_ssthresh = 0;
3080 tp->undo_marker = tp->snd_una;
3081 tp->undo_retrans = tp->retrans_out;
3082
3083 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
3084 if (!ece_ack)
3085 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3086 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
3087 TCP_ECN_queue_cwr(tp);
3088 }
3089
3090 tp->bytes_acked = 0;
3091 tp->snd_cwnd_cnt = 0;
3092 tp->prior_cwnd = tp->snd_cwnd;
3093 tp->prr_delivered = 0;
3094 tp->prr_out = 0;
3095 tcp_set_ca_state(sk, TCP_CA_Recovery);
3096}
3097
3019/* Process an event, which can update packets-in-flight not trivially. 3098/* Process an event, which can update packets-in-flight not trivially.
3020 * Main goal of this function is to calculate new estimate for left_out, 3099 * Main goal of this function is to calculate new estimate for left_out,
3021 * taking into account both packets sitting in receiver's buffer and 3100 * taking into account both packets sitting in receiver's buffer and
@@ -3035,7 +3114,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3035 struct tcp_sock *tp = tcp_sk(sk); 3114 struct tcp_sock *tp = tcp_sk(sk);
3036 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 3115 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
3037 (tcp_fackets_out(tp) > tp->reordering)); 3116 (tcp_fackets_out(tp) > tp->reordering));
3038 int fast_rexmit = 0, mib_idx; 3117 int fast_rexmit = 0;
3039 3118
3040 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 3119 if (WARN_ON(!tp->packets_out && tp->sacked_out))
3041 tp->sacked_out = 0; 3120 tp->sacked_out = 0;
@@ -3119,7 +3198,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3119 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3198 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3120 tcp_try_undo_dsack(sk); 3199 tcp_try_undo_dsack(sk);
3121 3200
3122 if (!tcp_time_to_recover(sk)) { 3201 if (!tcp_time_to_recover(sk, flag)) {
3123 tcp_try_to_open(sk, flag); 3202 tcp_try_to_open(sk, flag);
3124 return; 3203 return;
3125 } 3204 }
@@ -3136,32 +3215,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3136 } 3215 }
3137 3216
3138 /* Otherwise enter Recovery state */ 3217 /* Otherwise enter Recovery state */
3139 3218 tcp_enter_recovery(sk, (flag & FLAG_ECE));
3140 if (tcp_is_reno(tp))
3141 mib_idx = LINUX_MIB_TCPRENORECOVERY;
3142 else
3143 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3144
3145 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3146
3147 tp->high_seq = tp->snd_nxt;
3148 tp->prior_ssthresh = 0;
3149 tp->undo_marker = tp->snd_una;
3150 tp->undo_retrans = tp->retrans_out;
3151
3152 if (icsk->icsk_ca_state < TCP_CA_CWR) {
3153 if (!(flag & FLAG_ECE))
3154 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3155 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
3156 TCP_ECN_queue_cwr(tp);
3157 }
3158
3159 tp->bytes_acked = 0;
3160 tp->snd_cwnd_cnt = 0;
3161 tp->prior_cwnd = tp->snd_cwnd;
3162 tp->prr_delivered = 0;
3163 tp->prr_out = 0;
3164 tcp_set_ca_state(sk, TCP_CA_Recovery);
3165 fast_rexmit = 1; 3219 fast_rexmit = 1;
3166 } 3220 }
3167 3221
@@ -3243,16 +3297,47 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
3243/* Restart timer after forward progress on connection. 3297/* Restart timer after forward progress on connection.
3244 * RFC2988 recommends to restart timer to now+rto. 3298 * RFC2988 recommends to restart timer to now+rto.
3245 */ 3299 */
3246static void tcp_rearm_rto(struct sock *sk) 3300void tcp_rearm_rto(struct sock *sk)
3247{ 3301{
3248 const struct tcp_sock *tp = tcp_sk(sk); 3302 struct tcp_sock *tp = tcp_sk(sk);
3249 3303
3250 if (!tp->packets_out) { 3304 if (!tp->packets_out) {
3251 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3305 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3252 } else { 3306 } else {
3253 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 3307 u32 rto = inet_csk(sk)->icsk_rto;
3254 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 3308 /* Offset the time elapsed after installing regular RTO */
3309 if (tp->early_retrans_delayed) {
3310 struct sk_buff *skb = tcp_write_queue_head(sk);
3311 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
3312 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
3313 /* delta may not be positive if the socket is locked
3314 * when the delayed ER timer fires and is rescheduled.
3315 */
3316 if (delta > 0)
3317 rto = delta;
3318 }
3319 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
3320 TCP_RTO_MAX);
3255 } 3321 }
3322 tp->early_retrans_delayed = 0;
3323}
3324
3325/* This function is called when the delayed ER timer fires. TCP enters
3326 * fast recovery and performs fast-retransmit.
3327 */
3328void tcp_resume_early_retransmit(struct sock *sk)
3329{
3330 struct tcp_sock *tp = tcp_sk(sk);
3331
3332 tcp_rearm_rto(sk);
3333
3334 /* Stop if ER is disabled after the delayed ER timer is scheduled */
3335 if (!tp->do_early_retrans)
3336 return;
3337
3338 tcp_enter_recovery(sk, false);
3339 tcp_update_scoreboard(sk, 1);
3340 tcp_xmit_retransmit_queue(sk);
3256} 3341}
3257 3342
3258/* If we get here, the whole TSO packet has not been acked. */ 3343/* If we get here, the whole TSO packet has not been acked. */
@@ -3287,7 +3372,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3287 const struct inet_connection_sock *icsk = inet_csk(sk); 3372 const struct inet_connection_sock *icsk = inet_csk(sk);
3288 struct sk_buff *skb; 3373 struct sk_buff *skb;
3289 u32 now = tcp_time_stamp; 3374 u32 now = tcp_time_stamp;
3290 int fully_acked = 1; 3375 int fully_acked = true;
3291 int flag = 0; 3376 int flag = 0;
3292 u32 pkts_acked = 0; 3377 u32 pkts_acked = 0;
3293 u32 reord = tp->packets_out; 3378 u32 reord = tp->packets_out;
@@ -3311,7 +3396,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3311 if (!acked_pcount) 3396 if (!acked_pcount)
3312 break; 3397 break;
3313 3398
3314 fully_acked = 0; 3399 fully_acked = false;
3315 } else { 3400 } else {
3316 acked_pcount = tcp_skb_pcount(skb); 3401 acked_pcount = tcp_skb_pcount(skb);
3317 } 3402 }
@@ -3428,18 +3513,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3428 if (!tp->packets_out && tcp_is_sack(tp)) { 3513 if (!tp->packets_out && tcp_is_sack(tp)) {
3429 icsk = inet_csk(sk); 3514 icsk = inet_csk(sk);
3430 if (tp->lost_out) { 3515 if (tp->lost_out) {
3431 printk(KERN_DEBUG "Leak l=%u %d\n", 3516 pr_debug("Leak l=%u %d\n",
3432 tp->lost_out, icsk->icsk_ca_state); 3517 tp->lost_out, icsk->icsk_ca_state);
3433 tp->lost_out = 0; 3518 tp->lost_out = 0;
3434 } 3519 }
3435 if (tp->sacked_out) { 3520 if (tp->sacked_out) {
3436 printk(KERN_DEBUG "Leak s=%u %d\n", 3521 pr_debug("Leak s=%u %d\n",
3437 tp->sacked_out, icsk->icsk_ca_state); 3522 tp->sacked_out, icsk->icsk_ca_state);
3438 tp->sacked_out = 0; 3523 tp->sacked_out = 0;
3439 } 3524 }
3440 if (tp->retrans_out) { 3525 if (tp->retrans_out) {
3441 printk(KERN_DEBUG "Leak r=%u %d\n", 3526 pr_debug("Leak r=%u %d\n",
3442 tp->retrans_out, icsk->icsk_ca_state); 3527 tp->retrans_out, icsk->icsk_ca_state);
3443 tp->retrans_out = 0; 3528 tp->retrans_out = 0;
3444 } 3529 }
3445 } 3530 }
@@ -3590,7 +3675,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
3590 * to prove that the RTO is indeed spurious. It transfers the control 3675 * to prove that the RTO is indeed spurious. It transfers the control
3591 * from F-RTO to the conventional RTO recovery 3676 * from F-RTO to the conventional RTO recovery
3592 */ 3677 */
3593static int tcp_process_frto(struct sock *sk, int flag) 3678static bool tcp_process_frto(struct sock *sk, int flag)
3594{ 3679{
3595 struct tcp_sock *tp = tcp_sk(sk); 3680 struct tcp_sock *tp = tcp_sk(sk);
3596 3681
@@ -3606,7 +3691,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3606 3691
3607 if (!before(tp->snd_una, tp->frto_highmark)) { 3692 if (!before(tp->snd_una, tp->frto_highmark)) {
3608 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); 3693 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
3609 return 1; 3694 return true;
3610 } 3695 }
3611 3696
3612 if (!tcp_is_sackfrto(tp)) { 3697 if (!tcp_is_sackfrto(tp)) {
@@ -3615,19 +3700,19 @@ static int tcp_process_frto(struct sock *sk, int flag)
3615 * data, winupdate 3700 * data, winupdate
3616 */ 3701 */
3617 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) 3702 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
3618 return 1; 3703 return true;
3619 3704
3620 if (!(flag & FLAG_DATA_ACKED)) { 3705 if (!(flag & FLAG_DATA_ACKED)) {
3621 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), 3706 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
3622 flag); 3707 flag);
3623 return 1; 3708 return true;
3624 } 3709 }
3625 } else { 3710 } else {
3626 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3711 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3627 /* Prevent sending of new data. */ 3712 /* Prevent sending of new data. */
3628 tp->snd_cwnd = min(tp->snd_cwnd, 3713 tp->snd_cwnd = min(tp->snd_cwnd,
3629 tcp_packets_in_flight(tp)); 3714 tcp_packets_in_flight(tp));
3630 return 1; 3715 return true;
3631 } 3716 }
3632 3717
3633 if ((tp->frto_counter >= 2) && 3718 if ((tp->frto_counter >= 2) &&
@@ -3637,10 +3722,10 @@ static int tcp_process_frto(struct sock *sk, int flag)
3637 /* RFC4138 shortcoming (see comment above) */ 3722 /* RFC4138 shortcoming (see comment above) */
3638 if (!(flag & FLAG_FORWARD_PROGRESS) && 3723 if (!(flag & FLAG_FORWARD_PROGRESS) &&
3639 (flag & FLAG_NOT_DUP)) 3724 (flag & FLAG_NOT_DUP))
3640 return 1; 3725 return true;
3641 3726
3642 tcp_enter_frto_loss(sk, 3, flag); 3727 tcp_enter_frto_loss(sk, 3, flag);
3643 return 1; 3728 return true;
3644 } 3729 }
3645 } 3730 }
3646 3731
@@ -3652,7 +3737,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3652 if (!tcp_may_send_now(sk)) 3737 if (!tcp_may_send_now(sk))
3653 tcp_enter_frto_loss(sk, 2, flag); 3738 tcp_enter_frto_loss(sk, 2, flag);
3654 3739
3655 return 1; 3740 return true;
3656 } else { 3741 } else {
3657 switch (sysctl_tcp_frto_response) { 3742 switch (sysctl_tcp_frto_response) {
3658 case 2: 3743 case 2:
@@ -3669,7 +3754,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3669 tp->undo_marker = 0; 3754 tp->undo_marker = 0;
3670 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); 3755 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3671 } 3756 }
3672 return 0; 3757 return false;
3673} 3758}
3674 3759
3675/* This routine deals with incoming acks, but not outgoing ones. */ 3760/* This routine deals with incoming acks, but not outgoing ones. */
@@ -3687,7 +3772,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3687 int prior_sacked = tp->sacked_out; 3772 int prior_sacked = tp->sacked_out;
3688 int pkts_acked = 0; 3773 int pkts_acked = 0;
3689 int newly_acked_sacked = 0; 3774 int newly_acked_sacked = 0;
3690 int frto_cwnd = 0; 3775 bool frto_cwnd = false;
3691 3776
3692 /* If the ack is older than previous acks 3777 /* If the ack is older than previous acks
3693 * then we can probably ignore it. 3778 * then we can probably ignore it.
@@ -3701,6 +3786,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3701 if (after(ack, tp->snd_nxt)) 3786 if (after(ack, tp->snd_nxt))
3702 goto invalid_ack; 3787 goto invalid_ack;
3703 3788
3789 if (tp->early_retrans_delayed)
3790 tcp_rearm_rto(sk);
3791
3704 if (after(ack, prior_snd_una)) 3792 if (after(ack, prior_snd_una))
3705 flag |= FLAG_SND_UNA_ADVANCED; 3793 flag |= FLAG_SND_UNA_ADVANCED;
3706 3794
@@ -3866,10 +3954,9 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
3866 __u8 snd_wscale = *(__u8 *)ptr; 3954 __u8 snd_wscale = *(__u8 *)ptr;
3867 opt_rx->wscale_ok = 1; 3955 opt_rx->wscale_ok = 1;
3868 if (snd_wscale > 14) { 3956 if (snd_wscale > 14) {
3869 if (net_ratelimit()) 3957 net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n",
3870 printk(KERN_INFO "tcp_parse_options: Illegal window " 3958 __func__,
3871 "scaling value %d >14 received.\n", 3959 snd_wscale);
3872 snd_wscale);
3873 snd_wscale = 14; 3960 snd_wscale = 14;
3874 } 3961 }
3875 opt_rx->snd_wscale = snd_wscale; 3962 opt_rx->snd_wscale = snd_wscale;
@@ -3940,7 +4027,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
3940} 4027}
3941EXPORT_SYMBOL(tcp_parse_options); 4028EXPORT_SYMBOL(tcp_parse_options);
3942 4029
3943static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 4030static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
3944{ 4031{
3945 const __be32 *ptr = (const __be32 *)(th + 1); 4032 const __be32 *ptr = (const __be32 *)(th + 1);
3946 4033
@@ -3951,31 +4038,31 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3951 tp->rx_opt.rcv_tsval = ntohl(*ptr); 4038 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3952 ++ptr; 4039 ++ptr;
3953 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 4040 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3954 return 1; 4041 return true;
3955 } 4042 }
3956 return 0; 4043 return false;
3957} 4044}
3958 4045
3959/* Fast parse options. This hopes to only see timestamps. 4046/* Fast parse options. This hopes to only see timestamps.
3960 * If it is wrong it falls back on tcp_parse_options(). 4047 * If it is wrong it falls back on tcp_parse_options().
3961 */ 4048 */
3962static int tcp_fast_parse_options(const struct sk_buff *skb, 4049static bool tcp_fast_parse_options(const struct sk_buff *skb,
3963 const struct tcphdr *th, 4050 const struct tcphdr *th,
3964 struct tcp_sock *tp, const u8 **hvpp) 4051 struct tcp_sock *tp, const u8 **hvpp)
3965{ 4052{
3966 /* In the spirit of fast parsing, compare doff directly to constant 4053 /* In the spirit of fast parsing, compare doff directly to constant
3967 * values. Because equality is used, short doff can be ignored here. 4054 * values. Because equality is used, short doff can be ignored here.
3968 */ 4055 */
3969 if (th->doff == (sizeof(*th) / 4)) { 4056 if (th->doff == (sizeof(*th) / 4)) {
3970 tp->rx_opt.saw_tstamp = 0; 4057 tp->rx_opt.saw_tstamp = 0;
3971 return 0; 4058 return false;
3972 } else if (tp->rx_opt.tstamp_ok && 4059 } else if (tp->rx_opt.tstamp_ok &&
3973 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 4060 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
3974 if (tcp_parse_aligned_timestamp(tp, th)) 4061 if (tcp_parse_aligned_timestamp(tp, th))
3975 return 1; 4062 return true;
3976 } 4063 }
3977 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); 4064 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
3978 return 1; 4065 return true;
3979} 4066}
3980 4067
3981#ifdef CONFIG_TCP_MD5SIG 4068#ifdef CONFIG_TCP_MD5SIG
@@ -4191,7 +4278,7 @@ static void tcp_fin(struct sock *sk)
4191 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 4278 /* Only TCP_LISTEN and TCP_CLOSE are left, in these
4192 * cases we should never reach this piece of code. 4279 * cases we should never reach this piece of code.
4193 */ 4280 */
4194 printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", 4281 pr_err("%s: Impossible, sk->sk_state=%d\n",
4195 __func__, sk->sk_state); 4282 __func__, sk->sk_state);
4196 break; 4283 break;
4197 } 4284 }
@@ -4216,7 +4303,7 @@ static void tcp_fin(struct sock *sk)
4216 } 4303 }
4217} 4304}
4218 4305
4219static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 4306static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4220 u32 end_seq) 4307 u32 end_seq)
4221{ 4308{
4222 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 4309 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
@@ -4224,9 +4311,9 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4224 sp->start_seq = seq; 4311 sp->start_seq = seq;
4225 if (after(end_seq, sp->end_seq)) 4312 if (after(end_seq, sp->end_seq))
4226 sp->end_seq = end_seq; 4313 sp->end_seq = end_seq;
4227 return 1; 4314 return true;
4228 } 4315 }
4229 return 0; 4316 return false;
4230} 4317}
4231 4318
4232static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4319static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
@@ -4422,10 +4509,10 @@ static void tcp_ofo_queue(struct sock *sk)
4422 } 4509 }
4423} 4510}
4424 4511
4425static int tcp_prune_ofo_queue(struct sock *sk); 4512static bool tcp_prune_ofo_queue(struct sock *sk);
4426static int tcp_prune_queue(struct sock *sk); 4513static int tcp_prune_queue(struct sock *sk);
4427 4514
4428static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) 4515static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
4429{ 4516{
4430 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4517 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
4431 !sk_rmem_schedule(sk, size)) { 4518 !sk_rmem_schedule(sk, size)) {
@@ -4444,11 +4531,225 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
4444 return 0; 4531 return 0;
4445} 4532}
4446 4533
4534/**
4535 * tcp_try_coalesce - try to merge skb to prior one
4536 * @sk: socket
4537 * @to: prior buffer
4538 * @from: buffer to add in queue
4539 * @fragstolen: pointer to boolean
4540 *
4541 * Before queueing skb @from after @to, try to merge them
4542 * to reduce overall memory use and queue lengths, if cost is small.
4543 * Packets in ofo or receive queues can stay a long time.
4544 * Better try to coalesce them right now to avoid future collapses.
4545 * Returns true if caller should free @from instead of queueing it
4546 */
4547static bool tcp_try_coalesce(struct sock *sk,
4548 struct sk_buff *to,
4549 struct sk_buff *from,
4550 bool *fragstolen)
4551{
4552 int delta;
4553
4554 *fragstolen = false;
4555
4556 if (tcp_hdr(from)->fin)
4557 return false;
4558
4559 /* Its possible this segment overlaps with prior segment in queue */
4560 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
4561 return false;
4562
4563 if (!skb_try_coalesce(to, from, fragstolen, &delta))
4564 return false;
4565
4566 atomic_add(delta, &sk->sk_rmem_alloc);
4567 sk_mem_charge(sk, delta);
4568 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4569 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4570 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4571 return true;
4572}
4573
4574static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4575{
4576 struct tcp_sock *tp = tcp_sk(sk);
4577 struct sk_buff *skb1;
4578 u32 seq, end_seq;
4579
4580 TCP_ECN_check_ce(tp, skb);
4581
4582 if (tcp_try_rmem_schedule(sk, skb->truesize)) {
4583 /* TODO: should increment a counter */
4584 __kfree_skb(skb);
4585 return;
4586 }
4587
4588 /* Disable header prediction. */
4589 tp->pred_flags = 0;
4590 inet_csk_schedule_ack(sk);
4591
4592 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4593 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4594
4595 skb1 = skb_peek_tail(&tp->out_of_order_queue);
4596 if (!skb1) {
4597 /* Initial out of order segment, build 1 SACK. */
4598 if (tcp_is_sack(tp)) {
4599 tp->rx_opt.num_sacks = 1;
4600 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
4601 tp->selective_acks[0].end_seq =
4602 TCP_SKB_CB(skb)->end_seq;
4603 }
4604 __skb_queue_head(&tp->out_of_order_queue, skb);
4605 goto end;
4606 }
4607
4608 seq = TCP_SKB_CB(skb)->seq;
4609 end_seq = TCP_SKB_CB(skb)->end_seq;
4610
4611 if (seq == TCP_SKB_CB(skb1)->end_seq) {
4612 bool fragstolen;
4613
4614 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
4615 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4616 } else {
4617 kfree_skb_partial(skb, fragstolen);
4618 skb = NULL;
4619 }
4620
4621 if (!tp->rx_opt.num_sacks ||
4622 tp->selective_acks[0].end_seq != seq)
4623 goto add_sack;
4624
4625 /* Common case: data arrive in order after hole. */
4626 tp->selective_acks[0].end_seq = end_seq;
4627 goto end;
4628 }
4629
4630 /* Find place to insert this segment. */
4631 while (1) {
4632 if (!after(TCP_SKB_CB(skb1)->seq, seq))
4633 break;
4634 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
4635 skb1 = NULL;
4636 break;
4637 }
4638 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4639 }
4640
4641 /* Do skb overlap to previous one? */
4642 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4643 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4644 /* All the bits are present. Drop. */
4645 __kfree_skb(skb);
4646 skb = NULL;
4647 tcp_dsack_set(sk, seq, end_seq);
4648 goto add_sack;
4649 }
4650 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4651 /* Partial overlap. */
4652 tcp_dsack_set(sk, seq,
4653 TCP_SKB_CB(skb1)->end_seq);
4654 } else {
4655 if (skb_queue_is_first(&tp->out_of_order_queue,
4656 skb1))
4657 skb1 = NULL;
4658 else
4659 skb1 = skb_queue_prev(
4660 &tp->out_of_order_queue,
4661 skb1);
4662 }
4663 }
4664 if (!skb1)
4665 __skb_queue_head(&tp->out_of_order_queue, skb);
4666 else
4667 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4668
4669 /* And clean segments covered by new one as whole. */
4670 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
4671 skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
4672
4673 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4674 break;
4675 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4676 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4677 end_seq);
4678 break;
4679 }
4680 __skb_unlink(skb1, &tp->out_of_order_queue);
4681 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4682 TCP_SKB_CB(skb1)->end_seq);
4683 __kfree_skb(skb1);
4684 }
4685
4686add_sack:
4687 if (tcp_is_sack(tp))
4688 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4689end:
4690 if (skb)
4691 skb_set_owner_r(skb, sk);
4692}
4693
4694static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
4695 bool *fragstolen)
4696{
4697 int eaten;
4698 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
4699
4700 __skb_pull(skb, hdrlen);
4701 eaten = (tail &&
4702 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0;
4703 tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4704 if (!eaten) {
4705 __skb_queue_tail(&sk->sk_receive_queue, skb);
4706 skb_set_owner_r(skb, sk);
4707 }
4708 return eaten;
4709}
4710
4711int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4712{
4713 struct sk_buff *skb;
4714 struct tcphdr *th;
4715 bool fragstolen;
4716
4717 if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
4718 goto err;
4719
4720 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
4721 if (!skb)
4722 goto err;
4723
4724 th = (struct tcphdr *)skb_put(skb, sizeof(*th));
4725 skb_reset_transport_header(skb);
4726 memset(th, 0, sizeof(*th));
4727
4728 if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
4729 goto err_free;
4730
4731 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
4732 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
4733 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
4734
4735 if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) {
4736 WARN_ON_ONCE(fragstolen); /* should not happen */
4737 __kfree_skb(skb);
4738 }
4739 return size;
4740
4741err_free:
4742 kfree_skb(skb);
4743err:
4744 return -ENOMEM;
4745}
4746
4447static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4747static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4448{ 4748{
4449 const struct tcphdr *th = tcp_hdr(skb); 4749 const struct tcphdr *th = tcp_hdr(skb);
4450 struct tcp_sock *tp = tcp_sk(sk); 4750 struct tcp_sock *tp = tcp_sk(sk);
4451 int eaten = -1; 4751 int eaten = -1;
4752 bool fragstolen = false;
4452 4753
4453 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 4754 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
4454 goto drop; 4755 goto drop;
@@ -4493,8 +4794,7 @@ queue_and_out:
4493 tcp_try_rmem_schedule(sk, skb->truesize)) 4794 tcp_try_rmem_schedule(sk, skb->truesize))
4494 goto drop; 4795 goto drop;
4495 4796
4496 skb_set_owner_r(skb, sk); 4797 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
4497 __skb_queue_tail(&sk->sk_receive_queue, skb);
4498 } 4798 }
4499 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4799 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4500 if (skb->len) 4800 if (skb->len)
@@ -4518,7 +4818,7 @@ queue_and_out:
4518 tcp_fast_path_check(sk); 4818 tcp_fast_path_check(sk);
4519 4819
4520 if (eaten > 0) 4820 if (eaten > 0)
4521 __kfree_skb(skb); 4821 kfree_skb_partial(skb, fragstolen);
4522 else if (!sock_flag(sk, SOCK_DEAD)) 4822 else if (!sock_flag(sk, SOCK_DEAD))
4523 sk->sk_data_ready(sk, 0); 4823 sk->sk_data_ready(sk, 0);
4524 return; 4824 return;
@@ -4559,105 +4859,7 @@ drop:
4559 goto queue_and_out; 4859 goto queue_and_out;
4560 } 4860 }
4561 4861
4562 TCP_ECN_check_ce(tp, skb); 4862 tcp_data_queue_ofo(sk, skb);
4563
4564 if (tcp_try_rmem_schedule(sk, skb->truesize))
4565 goto drop;
4566
4567 /* Disable header prediction. */
4568 tp->pred_flags = 0;
4569 inet_csk_schedule_ack(sk);
4570
4571 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4572 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4573
4574 skb_set_owner_r(skb, sk);
4575
4576 if (!skb_peek(&tp->out_of_order_queue)) {
4577 /* Initial out of order segment, build 1 SACK. */
4578 if (tcp_is_sack(tp)) {
4579 tp->rx_opt.num_sacks = 1;
4580 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
4581 tp->selective_acks[0].end_seq =
4582 TCP_SKB_CB(skb)->end_seq;
4583 }
4584 __skb_queue_head(&tp->out_of_order_queue, skb);
4585 } else {
4586 struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
4587 u32 seq = TCP_SKB_CB(skb)->seq;
4588 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4589
4590 if (seq == TCP_SKB_CB(skb1)->end_seq) {
4591 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4592
4593 if (!tp->rx_opt.num_sacks ||
4594 tp->selective_acks[0].end_seq != seq)
4595 goto add_sack;
4596
4597 /* Common case: data arrive in order after hole. */
4598 tp->selective_acks[0].end_seq = end_seq;
4599 return;
4600 }
4601
4602 /* Find place to insert this segment. */
4603 while (1) {
4604 if (!after(TCP_SKB_CB(skb1)->seq, seq))
4605 break;
4606 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
4607 skb1 = NULL;
4608 break;
4609 }
4610 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4611 }
4612
4613 /* Do skb overlap to previous one? */
4614 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4615 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4616 /* All the bits are present. Drop. */
4617 __kfree_skb(skb);
4618 tcp_dsack_set(sk, seq, end_seq);
4619 goto add_sack;
4620 }
4621 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4622 /* Partial overlap. */
4623 tcp_dsack_set(sk, seq,
4624 TCP_SKB_CB(skb1)->end_seq);
4625 } else {
4626 if (skb_queue_is_first(&tp->out_of_order_queue,
4627 skb1))
4628 skb1 = NULL;
4629 else
4630 skb1 = skb_queue_prev(
4631 &tp->out_of_order_queue,
4632 skb1);
4633 }
4634 }
4635 if (!skb1)
4636 __skb_queue_head(&tp->out_of_order_queue, skb);
4637 else
4638 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4639
4640 /* And clean segments covered by new one as whole. */
4641 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
4642 skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
4643
4644 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4645 break;
4646 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4647 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4648 end_seq);
4649 break;
4650 }
4651 __skb_unlink(skb1, &tp->out_of_order_queue);
4652 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4653 TCP_SKB_CB(skb1)->end_seq);
4654 __kfree_skb(skb1);
4655 }
4656
4657add_sack:
4658 if (tcp_is_sack(tp))
4659 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4660 }
4661} 4863}
4662 4864
4663static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4865static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
@@ -4836,10 +5038,10 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4836 * Purge the out-of-order queue. 5038 * Purge the out-of-order queue.
4837 * Return true if queue was pruned. 5039 * Return true if queue was pruned.
4838 */ 5040 */
4839static int tcp_prune_ofo_queue(struct sock *sk) 5041static bool tcp_prune_ofo_queue(struct sock *sk)
4840{ 5042{
4841 struct tcp_sock *tp = tcp_sk(sk); 5043 struct tcp_sock *tp = tcp_sk(sk);
4842 int res = 0; 5044 bool res = false;
4843 5045
4844 if (!skb_queue_empty(&tp->out_of_order_queue)) { 5046 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4845 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); 5047 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
@@ -4853,7 +5055,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
4853 if (tp->rx_opt.sack_ok) 5055 if (tp->rx_opt.sack_ok)
4854 tcp_sack_reset(&tp->rx_opt); 5056 tcp_sack_reset(&tp->rx_opt);
4855 sk_mem_reclaim(sk); 5057 sk_mem_reclaim(sk);
4856 res = 1; 5058 res = true;
4857 } 5059 }
4858 return res; 5060 return res;
4859} 5061}
@@ -4930,7 +5132,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
4930 tp->snd_cwnd_stamp = tcp_time_stamp; 5132 tp->snd_cwnd_stamp = tcp_time_stamp;
4931} 5133}
4932 5134
4933static int tcp_should_expand_sndbuf(const struct sock *sk) 5135static bool tcp_should_expand_sndbuf(const struct sock *sk)
4934{ 5136{
4935 const struct tcp_sock *tp = tcp_sk(sk); 5137 const struct tcp_sock *tp = tcp_sk(sk);
4936 5138
@@ -4938,21 +5140,21 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
4938 * not modify it. 5140 * not modify it.
4939 */ 5141 */
4940 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 5142 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
4941 return 0; 5143 return false;
4942 5144
4943 /* If we are under global TCP memory pressure, do not expand. */ 5145 /* If we are under global TCP memory pressure, do not expand. */
4944 if (sk_under_memory_pressure(sk)) 5146 if (sk_under_memory_pressure(sk))
4945 return 0; 5147 return false;
4946 5148
4947 /* If we are under soft global TCP memory pressure, do not expand. */ 5149 /* If we are under soft global TCP memory pressure, do not expand. */
4948 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 5150 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
4949 return 0; 5151 return false;
4950 5152
4951 /* If we filled the congestion window, do not expand. */ 5153 /* If we filled the congestion window, do not expand. */
4952 if (tp->packets_out >= tp->snd_cwnd) 5154 if (tp->packets_out >= tp->snd_cwnd)
4953 return 0; 5155 return false;
4954 5156
4955 return 1; 5157 return true;
4956} 5158}
4957 5159
4958/* When incoming ACK allowed to free some skb from write_queue, 5160/* When incoming ACK allowed to free some skb from write_queue,
@@ -5178,19 +5380,19 @@ static inline int tcp_checksum_complete_user(struct sock *sk,
5178} 5380}
5179 5381
5180#ifdef CONFIG_NET_DMA 5382#ifdef CONFIG_NET_DMA
5181static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, 5383static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5182 int hlen) 5384 int hlen)
5183{ 5385{
5184 struct tcp_sock *tp = tcp_sk(sk); 5386 struct tcp_sock *tp = tcp_sk(sk);
5185 int chunk = skb->len - hlen; 5387 int chunk = skb->len - hlen;
5186 int dma_cookie; 5388 int dma_cookie;
5187 int copied_early = 0; 5389 bool copied_early = false;
5188 5390
5189 if (tp->ucopy.wakeup) 5391 if (tp->ucopy.wakeup)
5190 return 0; 5392 return false;
5191 5393
5192 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5394 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5193 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 5395 tp->ucopy.dma_chan = net_dma_find_channel();
5194 5396
5195 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5397 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5196 5398
@@ -5203,7 +5405,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5203 goto out; 5405 goto out;
5204 5406
5205 tp->ucopy.dma_cookie = dma_cookie; 5407 tp->ucopy.dma_cookie = dma_cookie;
5206 copied_early = 1; 5408 copied_early = true;
5207 5409
5208 tp->ucopy.len -= chunk; 5410 tp->ucopy.len -= chunk;
5209 tp->copied_seq += chunk; 5411 tp->copied_seq += chunk;
@@ -5395,6 +5597,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5395 } else { 5597 } else {
5396 int eaten = 0; 5598 int eaten = 0;
5397 int copied_early = 0; 5599 int copied_early = 0;
5600 bool fragstolen = false;
5398 5601
5399 if (tp->copied_seq == tp->rcv_nxt && 5602 if (tp->copied_seq == tp->rcv_nxt &&
5400 len - tcp_header_len <= tp->ucopy.len) { 5603 len - tcp_header_len <= tp->ucopy.len) {
@@ -5452,10 +5655,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5452 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5655 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
5453 5656
5454 /* Bulk data transfer: receiver */ 5657 /* Bulk data transfer: receiver */
5455 __skb_pull(skb, tcp_header_len); 5658 eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
5456 __skb_queue_tail(&sk->sk_receive_queue, skb); 5659 &fragstolen);
5457 skb_set_owner_r(skb, sk);
5458 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
5459 } 5660 }
5460 5661
5461 tcp_event_data_recv(sk, skb); 5662 tcp_event_data_recv(sk, skb);
@@ -5477,7 +5678,7 @@ no_ack:
5477 else 5678 else
5478#endif 5679#endif
5479 if (eaten) 5680 if (eaten)
5480 __kfree_skb(skb); 5681 kfree_skb_partial(skb, fragstolen);
5481 else 5682 else
5482 sk->sk_data_ready(sk, 0); 5683 sk->sk_data_ready(sk, 0);
5483 return 0; 5684 return 0;
@@ -5521,6 +5722,44 @@ discard:
5521} 5722}
5522EXPORT_SYMBOL(tcp_rcv_established); 5723EXPORT_SYMBOL(tcp_rcv_established);
5523 5724
5725void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5726{
5727 struct tcp_sock *tp = tcp_sk(sk);
5728 struct inet_connection_sock *icsk = inet_csk(sk);
5729
5730 tcp_set_state(sk, TCP_ESTABLISHED);
5731
5732 if (skb != NULL)
5733 security_inet_conn_established(sk, skb);
5734
5735 /* Make sure socket is routed, for correct metrics. */
5736 icsk->icsk_af_ops->rebuild_header(sk);
5737
5738 tcp_init_metrics(sk);
5739
5740 tcp_init_congestion_control(sk);
5741
5742 /* Prevent spurious tcp_cwnd_restart() on first data
5743 * packet.
5744 */
5745 tp->lsndtime = tcp_time_stamp;
5746
5747 tcp_init_buffer_space(sk);
5748
5749 if (sock_flag(sk, SOCK_KEEPOPEN))
5750 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
5751
5752 if (!tp->rx_opt.snd_wscale)
5753 __tcp_fast_path_on(tp, tp->snd_wnd);
5754 else
5755 tp->pred_flags = 0;
5756
5757 if (!sock_flag(sk, SOCK_DEAD)) {
5758 sk->sk_state_change(sk);
5759 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5760 }
5761}
5762
5524static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5763static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5525 const struct tcphdr *th, unsigned int len) 5764 const struct tcphdr *th, unsigned int len)
5526{ 5765{
@@ -5653,36 +5892,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5653 } 5892 }
5654 5893
5655 smp_mb(); 5894 smp_mb();
5656 tcp_set_state(sk, TCP_ESTABLISHED);
5657
5658 security_inet_conn_established(sk, skb);
5659 5895
5660 /* Make sure socket is routed, for correct metrics. */ 5896 tcp_finish_connect(sk, skb);
5661 icsk->icsk_af_ops->rebuild_header(sk);
5662
5663 tcp_init_metrics(sk);
5664
5665 tcp_init_congestion_control(sk);
5666
5667 /* Prevent spurious tcp_cwnd_restart() on first data
5668 * packet.
5669 */
5670 tp->lsndtime = tcp_time_stamp;
5671
5672 tcp_init_buffer_space(sk);
5673
5674 if (sock_flag(sk, SOCK_KEEPOPEN))
5675 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
5676
5677 if (!tp->rx_opt.snd_wscale)
5678 __tcp_fast_path_on(tp, tp->snd_wnd);
5679 else
5680 tp->pred_flags = 0;
5681
5682 if (!sock_flag(sk, SOCK_DEAD)) {
5683 sk->sk_state_change(sk);
5684 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5685 }
5686 5897
5687 if (sk->sk_write_pending || 5898 if (sk->sk_write_pending ||
5688 icsk->icsk_accept_queue.rskq_defer_accept || 5899 icsk->icsk_accept_queue.rskq_defer_accept ||
@@ -5696,8 +5907,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5696 */ 5907 */
5697 inet_csk_schedule_ack(sk); 5908 inet_csk_schedule_ack(sk);
5698 icsk->icsk_ack.lrcvtime = tcp_time_stamp; 5909 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5699 icsk->icsk_ack.ato = TCP_ATO_MIN;
5700 tcp_incr_quickack(sk);
5701 tcp_enter_quickack_mode(sk); 5910 tcp_enter_quickack_mode(sk);
5702 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5911 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5703 TCP_DELACK_MAX, TCP_RTO_MAX); 5912 TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fd54c5f8a255..c8d28c433b2b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -50,6 +50,7 @@
50 * a single port at the same time. 50 * a single port at the same time.
51 */ 51 */
52 52
53#define pr_fmt(fmt) "TCP: " fmt
53 54
54#include <linux/bottom_half.h> 55#include <linux/bottom_half.h>
55#include <linux/types.h> 56#include <linux/types.h>
@@ -90,16 +91,8 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
90 91
91 92
92#ifdef CONFIG_TCP_MD5SIG 93#ifdef CONFIG_TCP_MD5SIG
93static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, 94static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 addr);
95static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
96 __be32 daddr, __be32 saddr, const struct tcphdr *th); 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97#else
98static inline
99struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
100{
101 return NULL;
102}
103#endif 96#endif
104 97
105struct inet_hashinfo tcp_hashinfo; 98struct inet_hashinfo tcp_hashinfo;
@@ -145,6 +138,14 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
145} 138}
146EXPORT_SYMBOL_GPL(tcp_twsk_unique); 139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
147 140
141static int tcp_repair_connect(struct sock *sk)
142{
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
145
146 return 0;
147}
148
148/* This will initiate an outgoing connection. */ 149/* This will initiate an outgoing connection. */
149int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 150int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
150{ 151{
@@ -203,7 +204,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
203 /* Reset inherited state */ 204 /* Reset inherited state */
204 tp->rx_opt.ts_recent = 0; 205 tp->rx_opt.ts_recent = 0;
205 tp->rx_opt.ts_recent_stamp = 0; 206 tp->rx_opt.ts_recent_stamp = 0;
206 tp->write_seq = 0; 207 if (likely(!tp->repair))
208 tp->write_seq = 0;
207 } 209 }
208 210
209 if (tcp_death_row.sysctl_tw_recycle && 211 if (tcp_death_row.sysctl_tw_recycle &&
@@ -254,7 +256,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
254 sk->sk_gso_type = SKB_GSO_TCPV4; 256 sk->sk_gso_type = SKB_GSO_TCPV4;
255 sk_setup_caps(sk, &rt->dst); 257 sk_setup_caps(sk, &rt->dst);
256 258
257 if (!tp->write_seq) 259 if (!tp->write_seq && likely(!tp->repair))
258 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, 260 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
259 inet->inet_daddr, 261 inet->inet_daddr,
260 inet->inet_sport, 262 inet->inet_sport,
@@ -262,7 +264,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
262 264
263 inet->inet_id = tp->write_seq ^ jiffies; 265 inet->inet_id = tp->write_seq ^ jiffies;
264 266
265 err = tcp_connect(sk); 267 if (likely(!tp->repair))
268 err = tcp_connect(sk);
269 else
270 err = tcp_repair_connect(sk);
271
266 rt = NULL; 272 rt = NULL;
267 if (err) 273 if (err)
268 goto failure; 274 goto failure;
@@ -601,6 +607,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
601 struct ip_reply_arg arg; 607 struct ip_reply_arg arg;
602#ifdef CONFIG_TCP_MD5SIG 608#ifdef CONFIG_TCP_MD5SIG
603 struct tcp_md5sig_key *key; 609 struct tcp_md5sig_key *key;
610 const __u8 *hash_location = NULL;
611 unsigned char newhash[16];
612 int genhash;
613 struct sock *sk1 = NULL;
604#endif 614#endif
605 struct net *net; 615 struct net *net;
606 616
@@ -631,7 +641,36 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
631 arg.iov[0].iov_len = sizeof(rep.th); 641 arg.iov[0].iov_len = sizeof(rep.th);
632 642
633#ifdef CONFIG_TCP_MD5SIG 643#ifdef CONFIG_TCP_MD5SIG
634 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; 644 hash_location = tcp_parse_md5sig_option(th);
645 if (!sk && hash_location) {
646 /*
647 * active side is lost. Try to find listening socket through
648 * source port, and then find md5 key through listening socket.
649 * we are not loose security here:
650 * Incoming packet is checked with md5 hash with finding key,
651 * no RST generated if md5 hash doesn't match.
652 */
653 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
654 &tcp_hashinfo, ip_hdr(skb)->daddr,
655 ntohs(th->source), inet_iif(skb));
656 /* don't send rst if it can't find key */
657 if (!sk1)
658 return;
659 rcu_read_lock();
660 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 &ip_hdr(skb)->saddr, AF_INET);
662 if (!key)
663 goto release_sk1;
664
665 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
666 if (genhash || memcmp(hash_location, newhash, 16) != 0)
667 goto release_sk1;
668 } else {
669 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
670 &ip_hdr(skb)->saddr,
671 AF_INET) : NULL;
672 }
673
635 if (key) { 674 if (key) {
636 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 675 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
637 (TCPOPT_NOP << 16) | 676 (TCPOPT_NOP << 16) |
@@ -664,6 +703,14 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
664 703
665 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
666 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
706
707#ifdef CONFIG_TCP_MD5SIG
708release_sk1:
709 if (sk1) {
710 rcu_read_unlock();
711 sock_put(sk1);
712 }
713#endif
667} 714}
668 715
669/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states 716/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
@@ -764,7 +811,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
764 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 811 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
765 req->ts_recent, 812 req->ts_recent,
766 0, 813 0,
767 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), 814 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
815 AF_INET),
768 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, 816 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
769 ip_hdr(skb)->tos); 817 ip_hdr(skb)->tos);
770} 818}
@@ -776,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
776 */ 824 */
777static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
778 struct request_sock *req, 826 struct request_sock *req,
779 struct request_values *rvp) 827 struct request_values *rvp,
828 u16 queue_mapping)
780{ 829{
781 const struct inet_request_sock *ireq = inet_rsk(req); 830 const struct inet_request_sock *ireq = inet_rsk(req);
782 struct flowi4 fl4; 831 struct flowi4 fl4;
@@ -792,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
792 if (skb) { 841 if (skb) {
793 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 842 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
794 843
844 skb_set_queue_mapping(skb, queue_mapping);
795 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 845 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
796 ireq->rmt_addr, 846 ireq->rmt_addr,
797 ireq->opt); 847 ireq->opt);
@@ -806,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
806 struct request_values *rvp) 856 struct request_values *rvp)
807{ 857{
808 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
809 return tcp_v4_send_synack(sk, NULL, req, rvp); 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
810} 860}
811 861
812/* 862/*
@@ -818,14 +868,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
818} 868}
819 869
820/* 870/*
821 * Return 1 if a syncookie should be sent 871 * Return true if a syncookie should be sent
822 */ 872 */
823int tcp_syn_flood_action(struct sock *sk, 873bool tcp_syn_flood_action(struct sock *sk,
824 const struct sk_buff *skb, 874 const struct sk_buff *skb,
825 const char *proto) 875 const char *proto)
826{ 876{
827 const char *msg = "Dropping request"; 877 const char *msg = "Dropping request";
828 int want_cookie = 0; 878 bool want_cookie = false;
829 struct listen_sock *lopt; 879 struct listen_sock *lopt;
830 880
831 881
@@ -833,7 +883,7 @@ int tcp_syn_flood_action(struct sock *sk,
833#ifdef CONFIG_SYN_COOKIES 883#ifdef CONFIG_SYN_COOKIES
834 if (sysctl_tcp_syncookies) { 884 if (sysctl_tcp_syncookies) {
835 msg = "Sending cookies"; 885 msg = "Sending cookies";
836 want_cookie = 1; 886 want_cookie = true;
837 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
838 } else 888 } else
839#endif 889#endif
@@ -842,8 +892,7 @@ int tcp_syn_flood_action(struct sock *sk,
842 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; 892 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
843 if (!lopt->synflood_warned) { 893 if (!lopt->synflood_warned) {
844 lopt->synflood_warned = 1; 894 lopt->synflood_warned = 1;
845 pr_info("%s: Possible SYN flooding on port %d. %s. " 895 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
846 " Check SNMP counters.\n",
847 proto, ntohs(tcp_hdr(skb)->dest), msg); 896 proto, ntohs(tcp_hdr(skb)->dest), msg);
848 } 897 }
849 return want_cookie; 898 return want_cookie;
@@ -881,153 +930,138 @@ static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
881 */ 930 */
882 931
883/* Find the Key structure for an address. */ 932/* Find the Key structure for an address. */
884static struct tcp_md5sig_key * 933struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
885 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) 934 const union tcp_md5_addr *addr,
935 int family)
886{ 936{
887 struct tcp_sock *tp = tcp_sk(sk); 937 struct tcp_sock *tp = tcp_sk(sk);
888 int i; 938 struct tcp_md5sig_key *key;
889 939 struct hlist_node *pos;
890 if (!tp->md5sig_info || !tp->md5sig_info->entries4) 940 unsigned int size = sizeof(struct in_addr);
941 struct tcp_md5sig_info *md5sig;
942
943 /* caller either holds rcu_read_lock() or socket lock */
944 md5sig = rcu_dereference_check(tp->md5sig_info,
945 sock_owned_by_user(sk) ||
946 lockdep_is_held(&sk->sk_lock.slock));
947 if (!md5sig)
891 return NULL; 948 return NULL;
892 for (i = 0; i < tp->md5sig_info->entries4; i++) { 949#if IS_ENABLED(CONFIG_IPV6)
893 if (tp->md5sig_info->keys4[i].addr == addr) 950 if (family == AF_INET6)
894 return &tp->md5sig_info->keys4[i].base; 951 size = sizeof(struct in6_addr);
952#endif
953 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
954 if (key->family != family)
955 continue;
956 if (!memcmp(&key->addr, addr, size))
957 return key;
895 } 958 }
896 return NULL; 959 return NULL;
897} 960}
961EXPORT_SYMBOL(tcp_md5_do_lookup);
898 962
899struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, 963struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
900 struct sock *addr_sk) 964 struct sock *addr_sk)
901{ 965{
902 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr); 966 union tcp_md5_addr *addr;
967
968 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
969 return tcp_md5_do_lookup(sk, addr, AF_INET);
903} 970}
904EXPORT_SYMBOL(tcp_v4_md5_lookup); 971EXPORT_SYMBOL(tcp_v4_md5_lookup);
905 972
906static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, 973static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
907 struct request_sock *req) 974 struct request_sock *req)
908{ 975{
909 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr); 976 union tcp_md5_addr *addr;
977
978 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
979 return tcp_md5_do_lookup(sk, addr, AF_INET);
910} 980}
911 981
912/* This can be called on a newly created socket, from other files */ 982/* This can be called on a newly created socket, from other files */
913int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, 983int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
914 u8 *newkey, u8 newkeylen) 984 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
915{ 985{
916 /* Add Key to the list */ 986 /* Add Key to the list */
917 struct tcp_md5sig_key *key; 987 struct tcp_md5sig_key *key;
918 struct tcp_sock *tp = tcp_sk(sk); 988 struct tcp_sock *tp = tcp_sk(sk);
919 struct tcp4_md5sig_key *keys; 989 struct tcp_md5sig_info *md5sig;
920 990
921 key = tcp_v4_md5_do_lookup(sk, addr); 991 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
922 if (key) { 992 if (key) {
923 /* Pre-existing entry - just update that one. */ 993 /* Pre-existing entry - just update that one. */
924 kfree(key->key); 994 memcpy(key->key, newkey, newkeylen);
925 key->key = newkey;
926 key->keylen = newkeylen; 995 key->keylen = newkeylen;
927 } else { 996 return 0;
928 struct tcp_md5sig_info *md5sig; 997 }
929
930 if (!tp->md5sig_info) {
931 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
932 GFP_ATOMIC);
933 if (!tp->md5sig_info) {
934 kfree(newkey);
935 return -ENOMEM;
936 }
937 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
938 }
939 998
940 md5sig = tp->md5sig_info; 999 md5sig = rcu_dereference_protected(tp->md5sig_info,
941 if (md5sig->entries4 == 0 && 1000 sock_owned_by_user(sk));
942 tcp_alloc_md5sig_pool(sk) == NULL) { 1001 if (!md5sig) {
943 kfree(newkey); 1002 md5sig = kmalloc(sizeof(*md5sig), gfp);
1003 if (!md5sig)
944 return -ENOMEM; 1004 return -ENOMEM;
945 }
946 1005
947 if (md5sig->alloced4 == md5sig->entries4) { 1006 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
948 keys = kmalloc((sizeof(*keys) * 1007 INIT_HLIST_HEAD(&md5sig->head);
949 (md5sig->entries4 + 1)), GFP_ATOMIC); 1008 rcu_assign_pointer(tp->md5sig_info, md5sig);
950 if (!keys) { 1009 }
951 kfree(newkey);
952 if (md5sig->entries4 == 0)
953 tcp_free_md5sig_pool();
954 return -ENOMEM;
955 }
956
957 if (md5sig->entries4)
958 memcpy(keys, md5sig->keys4,
959 sizeof(*keys) * md5sig->entries4);
960 1010
961 /* Free old key list, and reference new one */ 1011 key = sock_kmalloc(sk, sizeof(*key), gfp);
962 kfree(md5sig->keys4); 1012 if (!key)
963 md5sig->keys4 = keys; 1013 return -ENOMEM;
964 md5sig->alloced4++; 1014 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
965 } 1015 sock_kfree_s(sk, key, sizeof(*key));
966 md5sig->entries4++; 1016 return -ENOMEM;
967 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
968 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
969 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
970 } 1017 }
971 return 0;
972}
973EXPORT_SYMBOL(tcp_v4_md5_do_add);
974 1018
975static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, 1019 memcpy(key->key, newkey, newkeylen);
976 u8 *newkey, u8 newkeylen) 1020 key->keylen = newkeylen;
977{ 1021 key->family = family;
978 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr, 1022 memcpy(&key->addr, addr,
979 newkey, newkeylen); 1023 (family == AF_INET6) ? sizeof(struct in6_addr) :
1024 sizeof(struct in_addr));
1025 hlist_add_head_rcu(&key->node, &md5sig->head);
1026 return 0;
980} 1027}
1028EXPORT_SYMBOL(tcp_md5_do_add);
981 1029
982int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) 1030int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
983{ 1031{
984 struct tcp_sock *tp = tcp_sk(sk); 1032 struct tcp_sock *tp = tcp_sk(sk);
985 int i; 1033 struct tcp_md5sig_key *key;
986 1034 struct tcp_md5sig_info *md5sig;
987 for (i = 0; i < tp->md5sig_info->entries4; i++) { 1035
988 if (tp->md5sig_info->keys4[i].addr == addr) { 1036 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
989 /* Free the key */ 1037 if (!key)
990 kfree(tp->md5sig_info->keys4[i].base.key); 1038 return -ENOENT;
991 tp->md5sig_info->entries4--; 1039 hlist_del_rcu(&key->node);
992 1040 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
993 if (tp->md5sig_info->entries4 == 0) { 1041 kfree_rcu(key, rcu);
994 kfree(tp->md5sig_info->keys4); 1042 md5sig = rcu_dereference_protected(tp->md5sig_info,
995 tp->md5sig_info->keys4 = NULL; 1043 sock_owned_by_user(sk));
996 tp->md5sig_info->alloced4 = 0; 1044 if (hlist_empty(&md5sig->head))
997 tcp_free_md5sig_pool(); 1045 tcp_free_md5sig_pool();
998 } else if (tp->md5sig_info->entries4 != i) { 1046 return 0;
999 /* Need to do some manipulation */
1000 memmove(&tp->md5sig_info->keys4[i],
1001 &tp->md5sig_info->keys4[i+1],
1002 (tp->md5sig_info->entries4 - i) *
1003 sizeof(struct tcp4_md5sig_key));
1004 }
1005 return 0;
1006 }
1007 }
1008 return -ENOENT;
1009} 1047}
1010EXPORT_SYMBOL(tcp_v4_md5_do_del); 1048EXPORT_SYMBOL(tcp_md5_do_del);
1011 1049
1012static void tcp_v4_clear_md5_list(struct sock *sk) 1050void tcp_clear_md5_list(struct sock *sk)
1013{ 1051{
1014 struct tcp_sock *tp = tcp_sk(sk); 1052 struct tcp_sock *tp = tcp_sk(sk);
1053 struct tcp_md5sig_key *key;
1054 struct hlist_node *pos, *n;
1055 struct tcp_md5sig_info *md5sig;
1015 1056
1016 /* Free each key, then the set of key keys, 1057 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1017 * the crypto element, and then decrement our 1058
1018 * hold on the last resort crypto. 1059 if (!hlist_empty(&md5sig->head))
1019 */
1020 if (tp->md5sig_info->entries4) {
1021 int i;
1022 for (i = 0; i < tp->md5sig_info->entries4; i++)
1023 kfree(tp->md5sig_info->keys4[i].base.key);
1024 tp->md5sig_info->entries4 = 0;
1025 tcp_free_md5sig_pool(); 1060 tcp_free_md5sig_pool();
1026 } 1061 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1027 if (tp->md5sig_info->keys4) { 1062 hlist_del_rcu(&key->node);
1028 kfree(tp->md5sig_info->keys4); 1063 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1029 tp->md5sig_info->keys4 = NULL; 1064 kfree_rcu(key, rcu);
1030 tp->md5sig_info->alloced4 = 0;
1031 } 1065 }
1032} 1066}
1033 1067
@@ -1036,7 +1070,6 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1036{ 1070{
1037 struct tcp_md5sig cmd; 1071 struct tcp_md5sig cmd;
1038 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; 1072 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1039 u8 *newkey;
1040 1073
1041 if (optlen < sizeof(cmd)) 1074 if (optlen < sizeof(cmd))
1042 return -EINVAL; 1075 return -EINVAL;
@@ -1047,32 +1080,16 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1047 if (sin->sin_family != AF_INET) 1080 if (sin->sin_family != AF_INET)
1048 return -EINVAL; 1081 return -EINVAL;
1049 1082
1050 if (!cmd.tcpm_key || !cmd.tcpm_keylen) { 1083 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1051 if (!tcp_sk(sk)->md5sig_info) 1084 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1052 return -ENOENT; 1085 AF_INET);
1053 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1054 }
1055 1086
1056 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 1087 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1057 return -EINVAL; 1088 return -EINVAL;
1058 1089
1059 if (!tcp_sk(sk)->md5sig_info) { 1090 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1060 struct tcp_sock *tp = tcp_sk(sk); 1091 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1061 struct tcp_md5sig_info *p; 1092 GFP_KERNEL);
1062
1063 p = kzalloc(sizeof(*p), sk->sk_allocation);
1064 if (!p)
1065 return -EINVAL;
1066
1067 tp->md5sig_info = p;
1068 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1069 }
1070
1071 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1072 if (!newkey)
1073 return -ENOMEM;
1074 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1075 newkey, cmd.tcpm_keylen);
1076} 1093}
1077 1094
1078static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 1095static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
@@ -1098,7 +1115,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1098 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); 1115 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1099} 1116}
1100 1117
1101static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 1118static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1102 __be32 daddr, __be32 saddr, const struct tcphdr *th) 1119 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1103{ 1120{
1104 struct tcp_md5sig_pool *hp; 1121 struct tcp_md5sig_pool *hp;
@@ -1181,7 +1198,7 @@ clear_hash_noput:
1181} 1198}
1182EXPORT_SYMBOL(tcp_v4_md5_hash_skb); 1199EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1183 1200
1184static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) 1201static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1185{ 1202{
1186 /* 1203 /*
1187 * This gets called for each TCP segment that arrives 1204 * This gets called for each TCP segment that arrives
@@ -1198,21 +1215,22 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1198 int genhash; 1215 int genhash;
1199 unsigned char newhash[16]; 1216 unsigned char newhash[16];
1200 1217
1201 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); 1218 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1219 AF_INET);
1202 hash_location = tcp_parse_md5sig_option(th); 1220 hash_location = tcp_parse_md5sig_option(th);
1203 1221
1204 /* We've parsed the options - do we have a hash? */ 1222 /* We've parsed the options - do we have a hash? */
1205 if (!hash_expected && !hash_location) 1223 if (!hash_expected && !hash_location)
1206 return 0; 1224 return false;
1207 1225
1208 if (hash_expected && !hash_location) { 1226 if (hash_expected && !hash_location) {
1209 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 1227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1210 return 1; 1228 return true;
1211 } 1229 }
1212 1230
1213 if (!hash_expected && hash_location) { 1231 if (!hash_expected && hash_location) {
1214 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 1232 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1215 return 1; 1233 return true;
1216 } 1234 }
1217 1235
1218 /* Okay, so this is hash_expected and hash_location - 1236 /* Okay, so this is hash_expected and hash_location -
@@ -1223,15 +1241,14 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1223 NULL, NULL, skb); 1241 NULL, NULL, skb);
1224 1242
1225 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1243 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1226 if (net_ratelimit()) { 1244 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1227 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", 1245 &iph->saddr, ntohs(th->source),
1228 &iph->saddr, ntohs(th->source), 1246 &iph->daddr, ntohs(th->dest),
1229 &iph->daddr, ntohs(th->dest), 1247 genhash ? " tcp_v4_calc_md5_hash failed"
1230 genhash ? " tcp_v4_calc_md5_hash failed" : ""); 1248 : "");
1231 } 1249 return true;
1232 return 1;
1233 } 1250 }
1234 return 0; 1251 return false;
1235} 1252}
1236 1253
1237#endif 1254#endif
@@ -1265,7 +1282,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1265 __be32 saddr = ip_hdr(skb)->saddr; 1282 __be32 saddr = ip_hdr(skb)->saddr;
1266 __be32 daddr = ip_hdr(skb)->daddr; 1283 __be32 daddr = ip_hdr(skb)->daddr;
1267 __u32 isn = TCP_SKB_CB(skb)->when; 1284 __u32 isn = TCP_SKB_CB(skb)->when;
1268 int want_cookie = 0; 1285 bool want_cookie = false;
1269 1286
1270 /* Never answer to SYNs send to broadcast or multicast */ 1287 /* Never answer to SYNs send to broadcast or multicast */
1271 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1288 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1324,7 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1324 while (l-- > 0) 1341 while (l-- > 0)
1325 *c++ ^= *hash_location++; 1342 *c++ ^= *hash_location++;
1326 1343
1327 want_cookie = 0; /* not our kind of cookie */ 1344 want_cookie = false; /* not our kind of cookie */
1328 tmp_ext.cookie_out_never = 0; /* false */ 1345 tmp_ext.cookie_out_never = 0; /* false */
1329 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1346 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1330 } else if (!tp->rx_opt.cookie_in_always) { 1347 } else if (!tp->rx_opt.cookie_in_always) {
@@ -1352,7 +1369,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1352 goto drop_and_free; 1369 goto drop_and_free;
1353 1370
1354 if (!want_cookie || tmp_opt.tstamp_ok) 1371 if (!want_cookie || tmp_opt.tstamp_ok)
1355 TCP_ECN_create_request(req, tcp_hdr(skb)); 1372 TCP_ECN_create_request(req, skb);
1356 1373
1357 if (want_cookie) { 1374 if (want_cookie) {
1358 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1375 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -1396,7 +1413,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1396 * to destinations, already remembered 1413 * to destinations, already remembered
1397 * to the moment of synflood. 1414 * to the moment of synflood.
1398 */ 1415 */
1399 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n", 1416 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1400 &saddr, ntohs(tcp_hdr(skb)->source)); 1417 &saddr, ntohs(tcp_hdr(skb)->source));
1401 goto drop_and_release; 1418 goto drop_and_release;
1402 } 1419 }
@@ -1407,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1407 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1424 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1408 1425
1409 if (tcp_v4_send_synack(sk, dst, req, 1426 if (tcp_v4_send_synack(sk, dst, req,
1410 (struct request_values *)&tmp_ext) || 1427 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb)) ||
1411 want_cookie) 1429 want_cookie)
1412 goto drop_and_free; 1430 goto drop_and_free;
1413 1431
@@ -1461,6 +1479,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1461 ireq->opt = NULL; 1479 ireq->opt = NULL;
1462 newinet->mc_index = inet_iif(skb); 1480 newinet->mc_index = inet_iif(skb);
1463 newinet->mc_ttl = ip_hdr(skb)->ttl; 1481 newinet->mc_ttl = ip_hdr(skb)->ttl;
1482 newinet->rcv_tos = ip_hdr(skb)->tos;
1464 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1483 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1465 if (inet_opt) 1484 if (inet_opt)
1466 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1485 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -1490,7 +1509,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1490 1509
1491#ifdef CONFIG_TCP_MD5SIG 1510#ifdef CONFIG_TCP_MD5SIG
1492 /* Copy over the MD5 key from the original socket */ 1511 /* Copy over the MD5 key from the original socket */
1493 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr); 1512 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1513 AF_INET);
1494 if (key != NULL) { 1514 if (key != NULL) {
1495 /* 1515 /*
1496 * We're using one, so create a matching key 1516 * We're using one, so create a matching key
@@ -1498,10 +1518,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1498 * memory, then we end up not copying the key 1518 * memory, then we end up not copying the key
1499 * across. Shucks. 1519 * across. Shucks.
1500 */ 1520 */
1501 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1521 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1502 if (newkey != NULL) 1522 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1503 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1504 newkey, key->keylen);
1505 sk_nocaps_add(newsk, NETIF_F_GSO_MASK); 1523 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1506 } 1524 }
1507#endif 1525#endif
@@ -1727,7 +1745,7 @@ process:
1727#ifdef CONFIG_NET_DMA 1745#ifdef CONFIG_NET_DMA
1728 struct tcp_sock *tp = tcp_sk(sk); 1746 struct tcp_sock *tp = tcp_sk(sk);
1729 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1747 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1730 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1748 tp->ucopy.dma_chan = net_dma_find_channel();
1731 if (tp->ucopy.dma_chan) 1749 if (tp->ucopy.dma_chan)
1732 ret = tcp_v4_do_rcv(sk, skb); 1750 ret = tcp_v4_do_rcv(sk, skb);
1733 else 1751 else
@@ -1736,7 +1754,8 @@ process:
1736 if (!tcp_prequeue(sk, skb)) 1754 if (!tcp_prequeue(sk, skb))
1737 ret = tcp_v4_do_rcv(sk, skb); 1755 ret = tcp_v4_do_rcv(sk, skb);
1738 } 1756 }
1739 } else if (unlikely(sk_add_backlog(sk, skb))) { 1757 } else if (unlikely(sk_add_backlog(sk, skb,
1758 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1740 bh_unlock_sock(sk); 1759 bh_unlock_sock(sk);
1741 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1760 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1742 goto discard_and_relse; 1761 goto discard_and_relse;
@@ -1862,7 +1881,6 @@ EXPORT_SYMBOL(ipv4_specific);
1862static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1881static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1863 .md5_lookup = tcp_v4_md5_lookup, 1882 .md5_lookup = tcp_v4_md5_lookup,
1864 .calc_md5_hash = tcp_v4_md5_hash_skb, 1883 .calc_md5_hash = tcp_v4_md5_hash_skb,
1865 .md5_add = tcp_v4_md5_add_func,
1866 .md5_parse = tcp_v4_parse_md5_keys, 1884 .md5_parse = tcp_v4_parse_md5_keys,
1867}; 1885};
1868#endif 1886#endif
@@ -1873,64 +1891,15 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1873static int tcp_v4_init_sock(struct sock *sk) 1891static int tcp_v4_init_sock(struct sock *sk)
1874{ 1892{
1875 struct inet_connection_sock *icsk = inet_csk(sk); 1893 struct inet_connection_sock *icsk = inet_csk(sk);
1876 struct tcp_sock *tp = tcp_sk(sk);
1877
1878 skb_queue_head_init(&tp->out_of_order_queue);
1879 tcp_init_xmit_timers(sk);
1880 tcp_prequeue_init(tp);
1881 1894
1882 icsk->icsk_rto = TCP_TIMEOUT_INIT; 1895 tcp_init_sock(sk);
1883 tp->mdev = TCP_TIMEOUT_INIT;
1884
1885 /* So many TCP implementations out there (incorrectly) count the
1886 * initial SYN frame in their delayed-ACK and congestion control
1887 * algorithms that we must have the following bandaid to talk
1888 * efficiently to them. -DaveM
1889 */
1890 tp->snd_cwnd = TCP_INIT_CWND;
1891
1892 /* See draft-stevens-tcpca-spec-01 for discussion of the
1893 * initialization of these values.
1894 */
1895 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1896 tp->snd_cwnd_clamp = ~0;
1897 tp->mss_cache = TCP_MSS_DEFAULT;
1898
1899 tp->reordering = sysctl_tcp_reordering;
1900 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1901
1902 sk->sk_state = TCP_CLOSE;
1903
1904 sk->sk_write_space = sk_stream_write_space;
1905 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1906 1896
1907 icsk->icsk_af_ops = &ipv4_specific; 1897 icsk->icsk_af_ops = &ipv4_specific;
1908 icsk->icsk_sync_mss = tcp_sync_mss; 1898
1909#ifdef CONFIG_TCP_MD5SIG 1899#ifdef CONFIG_TCP_MD5SIG
1910 tp->af_specific = &tcp_sock_ipv4_specific; 1900 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1911#endif 1901#endif
1912 1902
1913 /* TCP Cookie Transactions */
1914 if (sysctl_tcp_cookie_size > 0) {
1915 /* Default, cookies without s_data_payload. */
1916 tp->cookie_values =
1917 kzalloc(sizeof(*tp->cookie_values),
1918 sk->sk_allocation);
1919 if (tp->cookie_values != NULL)
1920 kref_init(&tp->cookie_values->kref);
1921 }
1922 /* Presumed zeroed, in order of appearance:
1923 * cookie_in_always, cookie_out_never,
1924 * s_data_constant, s_data_in, s_data_out
1925 */
1926 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1927 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1928
1929 local_bh_disable();
1930 sock_update_memcg(sk);
1931 sk_sockets_allocated_inc(sk);
1932 local_bh_enable();
1933
1934 return 0; 1903 return 0;
1935} 1904}
1936 1905
@@ -1951,8 +1920,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
1951#ifdef CONFIG_TCP_MD5SIG 1920#ifdef CONFIG_TCP_MD5SIG
1952 /* Clean up the MD5 key list, if any */ 1921 /* Clean up the MD5 key list, if any */
1953 if (tp->md5sig_info) { 1922 if (tp->md5sig_info) {
1954 tcp_v4_clear_md5_list(sk); 1923 tcp_clear_md5_list(sk);
1955 kfree(tp->md5sig_info); 1924 kfree_rcu(tp->md5sig_info, rcu);
1956 tp->md5sig_info = NULL; 1925 tp->md5sig_info = NULL;
1957 } 1926 }
1958#endif 1927#endif
@@ -2107,7 +2076,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2107 return rc; 2076 return rc;
2108} 2077}
2109 2078
2110static inline int empty_bucket(struct tcp_iter_state *st) 2079static inline bool empty_bucket(struct tcp_iter_state *st)
2111{ 2080{
2112 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) && 2081 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2113 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); 2082 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 49978788a9dc..b6f3583ddfe8 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,37 +6,6 @@
6#include <linux/memcontrol.h> 6#include <linux/memcontrol.h>
7#include <linux/module.h> 7#include <linux/module.h>
8 8
9static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft);
10static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
11 const char *buffer);
12static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event);
13
14static struct cftype tcp_files[] = {
15 {
16 .name = "kmem.tcp.limit_in_bytes",
17 .write_string = tcp_cgroup_write,
18 .read_u64 = tcp_cgroup_read,
19 .private = RES_LIMIT,
20 },
21 {
22 .name = "kmem.tcp.usage_in_bytes",
23 .read_u64 = tcp_cgroup_read,
24 .private = RES_USAGE,
25 },
26 {
27 .name = "kmem.tcp.failcnt",
28 .private = RES_FAILCNT,
29 .trigger = tcp_cgroup_reset,
30 .read_u64 = tcp_cgroup_read,
31 },
32 {
33 .name = "kmem.tcp.max_usage_in_bytes",
34 .private = RES_MAX_USAGE,
35 .trigger = tcp_cgroup_reset,
36 .read_u64 = tcp_cgroup_read,
37 },
38};
39
40static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto) 9static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
41{ 10{
42 return container_of(cg_proto, struct tcp_memcontrol, cg_proto); 11 return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
@@ -49,7 +18,7 @@ static void memcg_tcp_enter_memory_pressure(struct sock *sk)
49} 18}
50EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure); 19EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
51 20
52int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) 21int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
53{ 22{
54 /* 23 /*
55 * The root cgroup does not use res_counters, but rather, 24 * The root cgroup does not use res_counters, but rather,
@@ -59,13 +28,12 @@ int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
59 struct res_counter *res_parent = NULL; 28 struct res_counter *res_parent = NULL;
60 struct cg_proto *cg_proto, *parent_cg; 29 struct cg_proto *cg_proto, *parent_cg;
61 struct tcp_memcontrol *tcp; 30 struct tcp_memcontrol *tcp;
62 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
63 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 31 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
64 struct net *net = current->nsproxy->net_ns; 32 struct net *net = current->nsproxy->net_ns;
65 33
66 cg_proto = tcp_prot.proto_cgroup(memcg); 34 cg_proto = tcp_prot.proto_cgroup(memcg);
67 if (!cg_proto) 35 if (!cg_proto)
68 goto create_files; 36 return 0;
69 37
70 tcp = tcp_from_cgproto(cg_proto); 38 tcp = tcp_from_cgproto(cg_proto);
71 39
@@ -88,15 +56,12 @@ int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
88 cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated; 56 cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
89 cg_proto->memcg = memcg; 57 cg_proto->memcg = memcg;
90 58
91create_files: 59 return 0;
92 return cgroup_add_files(cgrp, ss, tcp_files,
93 ARRAY_SIZE(tcp_files));
94} 60}
95EXPORT_SYMBOL(tcp_init_cgroup); 61EXPORT_SYMBOL(tcp_init_cgroup);
96 62
97void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) 63void tcp_destroy_cgroup(struct mem_cgroup *memcg)
98{ 64{
99 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
100 struct cg_proto *cg_proto; 65 struct cg_proto *cg_proto;
101 struct tcp_memcontrol *tcp; 66 struct tcp_memcontrol *tcp;
102 u64 val; 67 u64 val;
@@ -109,9 +74,6 @@ void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
109 percpu_counter_destroy(&tcp->tcp_sockets_allocated); 74 percpu_counter_destroy(&tcp->tcp_sockets_allocated);
110 75
111 val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); 76 val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
112
113 if (val != RESOURCE_MAX)
114 jump_label_dec(&memcg_socket_limit_enabled);
115} 77}
116EXPORT_SYMBOL(tcp_destroy_cgroup); 78EXPORT_SYMBOL(tcp_destroy_cgroup);
117 79
@@ -142,10 +104,33 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
142 tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT, 104 tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
143 net->ipv4.sysctl_tcp_mem[i]); 105 net->ipv4.sysctl_tcp_mem[i]);
144 106
145 if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) 107 if (val == RESOURCE_MAX)
146 jump_label_dec(&memcg_socket_limit_enabled); 108 clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
147 else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) 109 else if (val != RESOURCE_MAX) {
148 jump_label_inc(&memcg_socket_limit_enabled); 110 /*
111 * The active bit needs to be written after the static_key
112 * update. This is what guarantees that the socket activation
113 * function is the last one to run. See sock_update_memcg() for
114 * details, and note that we don't mark any socket as belonging
115 * to this memcg until that flag is up.
116 *
117 * We need to do this, because static_keys will span multiple
118 * sites, but we can't control their order. If we mark a socket
119 * as accounted, but the accounting functions are not patched in
120 * yet, we'll lose accounting.
121 *
122 * We never race with the readers in sock_update_memcg(),
123 * because when this value change, the code to process it is not
124 * patched in yet.
125 *
126 * The activated bit is used to guarantee that no two writers
127 * will do the update in the same memcg. Without that, we can't
128 * properly shutdown the static key.
129 */
130 if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
131 static_key_slow_inc(&memcg_socket_limit_enabled);
132 set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
133 }
149 134
150 return 0; 135 return 0;
151} 136}
@@ -270,3 +255,37 @@ void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
270 255
271 tcp->tcp_prot_mem[idx] = val; 256 tcp->tcp_prot_mem[idx] = val;
272} 257}
258
259static struct cftype tcp_files[] = {
260 {
261 .name = "kmem.tcp.limit_in_bytes",
262 .write_string = tcp_cgroup_write,
263 .read_u64 = tcp_cgroup_read,
264 .private = RES_LIMIT,
265 },
266 {
267 .name = "kmem.tcp.usage_in_bytes",
268 .read_u64 = tcp_cgroup_read,
269 .private = RES_USAGE,
270 },
271 {
272 .name = "kmem.tcp.failcnt",
273 .private = RES_FAILCNT,
274 .trigger = tcp_cgroup_reset,
275 .read_u64 = tcp_cgroup_read,
276 },
277 {
278 .name = "kmem.tcp.max_usage_in_bytes",
279 .private = RES_MAX_USAGE,
280 .trigger = tcp_cgroup_reset,
281 .read_u64 = tcp_cgroup_read,
282 },
283 { } /* terminate */
284};
285
286static int __init tcp_memcontrol_init(void)
287{
288 WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files));
289 return 0;
290}
291__initcall(tcp_memcontrol_init);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 550e755747e0..b85d9fe7d663 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(tcp_death_row);
55 * state. 55 * state.
56 */ 56 */
57 57
58static int tcp_remember_stamp(struct sock *sk) 58static bool tcp_remember_stamp(struct sock *sk)
59{ 59{
60 const struct inet_connection_sock *icsk = inet_csk(sk); 60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct tcp_sock *tp = tcp_sk(sk); 61 struct tcp_sock *tp = tcp_sk(sk);
@@ -72,13 +72,13 @@ static int tcp_remember_stamp(struct sock *sk)
72 } 72 }
73 if (release_it) 73 if (release_it)
74 inet_putpeer(peer); 74 inet_putpeer(peer);
75 return 1; 75 return true;
76 } 76 }
77 77
78 return 0; 78 return false;
79} 79}
80 80
81static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw) 81static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
82{ 82{
83 struct sock *sk = (struct sock *) tw; 83 struct sock *sk = (struct sock *) tw;
84 struct inet_peer *peer; 84 struct inet_peer *peer;
@@ -94,17 +94,17 @@ static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
94 peer->tcp_ts = tcptw->tw_ts_recent; 94 peer->tcp_ts = tcptw->tw_ts_recent;
95 } 95 }
96 inet_putpeer(peer); 96 inet_putpeer(peer);
97 return 1; 97 return true;
98 } 98 }
99 return 0; 99 return false;
100} 100}
101 101
102static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 102static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
103{ 103{
104 if (seq == s_win) 104 if (seq == s_win)
105 return 1; 105 return true;
106 if (after(end_seq, s_win) && before(seq, e_win)) 106 if (after(end_seq, s_win) && before(seq, e_win))
107 return 1; 107 return true;
108 return seq == e_win && seq == end_seq; 108 return seq == e_win && seq == end_seq;
109} 109}
110 110
@@ -143,7 +143,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
143 struct tcp_options_received tmp_opt; 143 struct tcp_options_received tmp_opt;
144 const u8 *hash_location; 144 const u8 *hash_location;
145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
146 int paws_reject = 0; 146 bool paws_reject = false;
147 147
148 tmp_opt.saw_tstamp = 0; 148 tmp_opt.saw_tstamp = 0;
149 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 149 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
316 struct inet_timewait_sock *tw = NULL; 316 struct inet_timewait_sock *tw = NULL;
317 const struct inet_connection_sock *icsk = inet_csk(sk); 317 const struct inet_connection_sock *icsk = inet_csk(sk);
318 const struct tcp_sock *tp = tcp_sk(sk); 318 const struct tcp_sock *tp = tcp_sk(sk);
319 int recycle_ok = 0; 319 bool recycle_ok = false;
320 320
321 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) 321 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
322 recycle_ok = tcp_remember_stamp(sk); 322 recycle_ok = tcp_remember_stamp(sk);
@@ -359,13 +359,11 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
359 */ 359 */
360 do { 360 do {
361 struct tcp_md5sig_key *key; 361 struct tcp_md5sig_key *key;
362 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); 362 tcptw->tw_md5_key = NULL;
363 tcptw->tw_md5_keylen = 0;
364 key = tp->af_specific->md5_lookup(sk, sk); 363 key = tp->af_specific->md5_lookup(sk, sk);
365 if (key != NULL) { 364 if (key != NULL) {
366 memcpy(&tcptw->tw_md5_key, key->key, key->keylen); 365 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
367 tcptw->tw_md5_keylen = key->keylen; 366 if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
368 if (tcp_alloc_md5sig_pool(sk) == NULL)
369 BUG(); 367 BUG();
370 } 368 }
371 } while (0); 369 } while (0);
@@ -405,8 +403,10 @@ void tcp_twsk_destructor(struct sock *sk)
405{ 403{
406#ifdef CONFIG_TCP_MD5SIG 404#ifdef CONFIG_TCP_MD5SIG
407 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 405 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
408 if (twsk->tw_md5_keylen) 406 if (twsk->tw_md5_key) {
409 tcp_free_md5sig_pool(); 407 tcp_free_md5sig_pool();
408 kfree_rcu(twsk->tw_md5_key, rcu);
409 }
410#endif 410#endif
411} 411}
412EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 412EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
@@ -482,6 +482,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
482 newtp->sacked_out = 0; 482 newtp->sacked_out = 0;
483 newtp->fackets_out = 0; 483 newtp->fackets_out = 0;
484 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 484 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
485 tcp_enable_early_retrans(newtp);
485 486
486 /* So many TCP implementations out there (incorrectly) count the 487 /* So many TCP implementations out there (incorrectly) count the
487 * initial SYN frame in their delayed-ACK and congestion control 488 * initial SYN frame in their delayed-ACK and congestion control
@@ -574,7 +575,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
574 struct sock *child; 575 struct sock *child;
575 const struct tcphdr *th = tcp_hdr(skb); 576 const struct tcphdr *th = tcp_hdr(skb);
576 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 577 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
577 int paws_reject = 0; 578 bool paws_reject = false;
578 579
579 tmp_opt.saw_tstamp = 0; 580 tmp_opt.saw_tstamp = 0;
580 if (th->doff > (sizeof(struct tcphdr)>>2)) { 581 if (th->doff > (sizeof(struct tcphdr)>>2)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4ff3b6dc74fc..803cbfe82fbc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -34,6 +34,8 @@
34 * 34 *
35 */ 35 */
36 36
37#define pr_fmt(fmt) "TCP: " fmt
38
37#include <net/tcp.h> 39#include <net/tcp.h>
38 40
39#include <linux/compiler.h> 41#include <linux/compiler.h>
@@ -78,9 +80,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
78 tp->frto_counter = 3; 80 tp->frto_counter = 3;
79 81
80 tp->packets_out += tcp_skb_pcount(skb); 82 tp->packets_out += tcp_skb_pcount(skb);
81 if (!prior_packets) 83 if (!prior_packets || tp->early_retrans_delayed)
82 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 84 tcp_rearm_rto(sk);
83 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
84} 85}
85 86
86/* SND.NXT, if window was not shrunk. 87/* SND.NXT, if window was not shrunk.
@@ -369,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
369 TCP_SKB_CB(skb)->end_seq = seq; 370 TCP_SKB_CB(skb)->end_seq = seq;
370} 371}
371 372
372static inline int tcp_urg_mode(const struct tcp_sock *tp) 373static inline bool tcp_urg_mode(const struct tcp_sock *tp)
373{ 374{
374 return tp->snd_una != tp->snd_up; 375 return tp->snd_una != tp->snd_up;
375} 376}
@@ -563,13 +564,13 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
563/* Compute TCP options for SYN packets. This is not the final 564/* Compute TCP options for SYN packets. This is not the final
564 * network wire format yet. 565 * network wire format yet.
565 */ 566 */
566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 567static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
567 struct tcp_out_options *opts, 568 struct tcp_out_options *opts,
568 struct tcp_md5sig_key **md5) 569 struct tcp_md5sig_key **md5)
569{ 570{
570 struct tcp_sock *tp = tcp_sk(sk); 571 struct tcp_sock *tp = tcp_sk(sk);
571 struct tcp_cookie_values *cvp = tp->cookie_values; 572 struct tcp_cookie_values *cvp = tp->cookie_values;
572 unsigned remaining = MAX_TCP_OPTION_SPACE; 573 unsigned int remaining = MAX_TCP_OPTION_SPACE;
573 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 574 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
574 tcp_cookie_size_check(cvp->cookie_desired) : 575 tcp_cookie_size_check(cvp->cookie_desired) :
575 0; 576 0;
@@ -663,15 +664,15 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
663} 664}
664 665
665/* Set up TCP options for SYN-ACKs. */ 666/* Set up TCP options for SYN-ACKs. */
666static unsigned tcp_synack_options(struct sock *sk, 667static unsigned int tcp_synack_options(struct sock *sk,
667 struct request_sock *req, 668 struct request_sock *req,
668 unsigned mss, struct sk_buff *skb, 669 unsigned int mss, struct sk_buff *skb,
669 struct tcp_out_options *opts, 670 struct tcp_out_options *opts,
670 struct tcp_md5sig_key **md5, 671 struct tcp_md5sig_key **md5,
671 struct tcp_extend_values *xvp) 672 struct tcp_extend_values *xvp)
672{ 673{
673 struct inet_request_sock *ireq = inet_rsk(req); 674 struct inet_request_sock *ireq = inet_rsk(req);
674 unsigned remaining = MAX_TCP_OPTION_SPACE; 675 unsigned int remaining = MAX_TCP_OPTION_SPACE;
675 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? 676 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
676 xvp->cookie_plus : 677 xvp->cookie_plus :
677 0; 678 0;
@@ -742,13 +743,13 @@ static unsigned tcp_synack_options(struct sock *sk,
742/* Compute TCP options for ESTABLISHED sockets. This is not the 743/* Compute TCP options for ESTABLISHED sockets. This is not the
743 * final wire format yet. 744 * final wire format yet.
744 */ 745 */
745static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 746static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
746 struct tcp_out_options *opts, 747 struct tcp_out_options *opts,
747 struct tcp_md5sig_key **md5) 748 struct tcp_md5sig_key **md5)
748{ 749{
749 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 750 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
750 struct tcp_sock *tp = tcp_sk(sk); 751 struct tcp_sock *tp = tcp_sk(sk);
751 unsigned size = 0; 752 unsigned int size = 0;
752 unsigned int eff_sacks; 753 unsigned int eff_sacks;
753 754
754#ifdef CONFIG_TCP_MD5SIG 755#ifdef CONFIG_TCP_MD5SIG
@@ -770,9 +771,9 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
770 771
771 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 772 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
772 if (unlikely(eff_sacks)) { 773 if (unlikely(eff_sacks)) {
773 const unsigned remaining = MAX_TCP_OPTION_SPACE - size; 774 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
774 opts->num_sack_blocks = 775 opts->num_sack_blocks =
775 min_t(unsigned, eff_sacks, 776 min_t(unsigned int, eff_sacks,
776 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 777 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
777 TCPOLEN_SACK_PERBLOCK); 778 TCPOLEN_SACK_PERBLOCK);
778 size += TCPOLEN_SACK_BASE_ALIGNED + 779 size += TCPOLEN_SACK_BASE_ALIGNED +
@@ -801,7 +802,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
801 struct tcp_sock *tp; 802 struct tcp_sock *tp;
802 struct tcp_skb_cb *tcb; 803 struct tcp_skb_cb *tcb;
803 struct tcp_out_options opts; 804 struct tcp_out_options opts;
804 unsigned tcp_options_size, tcp_header_size; 805 unsigned int tcp_options_size, tcp_header_size;
805 struct tcp_md5sig_key *md5; 806 struct tcp_md5sig_key *md5;
806 struct tcphdr *th; 807 struct tcphdr *th;
807 int err; 808 int err;
@@ -1096,6 +1097,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1096 eat = min_t(int, len, skb_headlen(skb)); 1097 eat = min_t(int, len, skb_headlen(skb));
1097 if (eat) { 1098 if (eat) {
1098 __skb_pull(skb, eat); 1099 __skb_pull(skb, eat);
1100 skb->avail_size -= eat;
1099 len -= eat; 1101 len -= eat;
1100 if (!len) 1102 if (!len)
1101 return; 1103 return;
@@ -1149,7 +1151,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1149} 1151}
1150 1152
1151/* Calculate MSS. Not accounting for SACKs here. */ 1153/* Calculate MSS. Not accounting for SACKs here. */
1152int tcp_mtu_to_mss(const struct sock *sk, int pmtu) 1154int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1153{ 1155{
1154 const struct tcp_sock *tp = tcp_sk(sk); 1156 const struct tcp_sock *tp = tcp_sk(sk);
1155 const struct inet_connection_sock *icsk = inet_csk(sk); 1157 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1160,6 +1162,14 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
1160 */ 1162 */
1161 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 1163 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1162 1164
1165 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1166 if (icsk->icsk_af_ops->net_frag_header_len) {
1167 const struct dst_entry *dst = __sk_dst_get(sk);
1168
1169 if (dst && dst_allfrag(dst))
1170 mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1171 }
1172
1163 /* Clamp it (mss_clamp does not include tcp options) */ 1173 /* Clamp it (mss_clamp does not include tcp options) */
1164 if (mss_now > tp->rx_opt.mss_clamp) 1174 if (mss_now > tp->rx_opt.mss_clamp)
1165 mss_now = tp->rx_opt.mss_clamp; 1175 mss_now = tp->rx_opt.mss_clamp;
@@ -1178,7 +1188,7 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
1178} 1188}
1179 1189
1180/* Inverse of above */ 1190/* Inverse of above */
1181int tcp_mss_to_mtu(const struct sock *sk, int mss) 1191int tcp_mss_to_mtu(struct sock *sk, int mss)
1182{ 1192{
1183 const struct tcp_sock *tp = tcp_sk(sk); 1193 const struct tcp_sock *tp = tcp_sk(sk);
1184 const struct inet_connection_sock *icsk = inet_csk(sk); 1194 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1189,6 +1199,13 @@ int tcp_mss_to_mtu(const struct sock *sk, int mss)
1189 icsk->icsk_ext_hdr_len + 1199 icsk->icsk_ext_hdr_len +
1190 icsk->icsk_af_ops->net_header_len; 1200 icsk->icsk_af_ops->net_header_len;
1191 1201
1202 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1203 if (icsk->icsk_af_ops->net_frag_header_len) {
1204 const struct dst_entry *dst = __sk_dst_get(sk);
1205
1206 if (dst && dst_allfrag(dst))
1207 mtu += icsk->icsk_af_ops->net_frag_header_len;
1208 }
1192 return mtu; 1209 return mtu;
1193} 1210}
1194 1211
@@ -1258,7 +1275,7 @@ unsigned int tcp_current_mss(struct sock *sk)
1258 const struct tcp_sock *tp = tcp_sk(sk); 1275 const struct tcp_sock *tp = tcp_sk(sk);
1259 const struct dst_entry *dst = __sk_dst_get(sk); 1276 const struct dst_entry *dst = __sk_dst_get(sk);
1260 u32 mss_now; 1277 u32 mss_now;
1261 unsigned header_len; 1278 unsigned int header_len;
1262 struct tcp_out_options opts; 1279 struct tcp_out_options opts;
1263 struct tcp_md5sig_key *md5; 1280 struct tcp_md5sig_key *md5;
1264 1281
@@ -1374,33 +1391,33 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1374} 1391}
1375 1392
1376/* Minshall's variant of the Nagle send check. */ 1393/* Minshall's variant of the Nagle send check. */
1377static inline int tcp_minshall_check(const struct tcp_sock *tp) 1394static inline bool tcp_minshall_check(const struct tcp_sock *tp)
1378{ 1395{
1379 return after(tp->snd_sml, tp->snd_una) && 1396 return after(tp->snd_sml, tp->snd_una) &&
1380 !after(tp->snd_sml, tp->snd_nxt); 1397 !after(tp->snd_sml, tp->snd_nxt);
1381} 1398}
1382 1399
1383/* Return 0, if packet can be sent now without violation Nagle's rules: 1400/* Return false, if packet can be sent now without violation Nagle's rules:
1384 * 1. It is full sized. 1401 * 1. It is full sized.
1385 * 2. Or it contains FIN. (already checked by caller) 1402 * 2. Or it contains FIN. (already checked by caller)
1386 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1403 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1387 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1404 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1388 * With Minshall's modification: all sent small packets are ACKed. 1405 * With Minshall's modification: all sent small packets are ACKed.
1389 */ 1406 */
1390static inline int tcp_nagle_check(const struct tcp_sock *tp, 1407static inline bool tcp_nagle_check(const struct tcp_sock *tp,
1391 const struct sk_buff *skb, 1408 const struct sk_buff *skb,
1392 unsigned mss_now, int nonagle) 1409 unsigned int mss_now, int nonagle)
1393{ 1410{
1394 return skb->len < mss_now && 1411 return skb->len < mss_now &&
1395 ((nonagle & TCP_NAGLE_CORK) || 1412 ((nonagle & TCP_NAGLE_CORK) ||
1396 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1413 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1397} 1414}
1398 1415
1399/* Return non-zero if the Nagle test allows this packet to be 1416/* Return true if the Nagle test allows this packet to be
1400 * sent now. 1417 * sent now.
1401 */ 1418 */
1402static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1419static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1403 unsigned int cur_mss, int nonagle) 1420 unsigned int cur_mss, int nonagle)
1404{ 1421{
1405 /* Nagle rule does not apply to frames, which sit in the middle of the 1422 /* Nagle rule does not apply to frames, which sit in the middle of the
1406 * write_queue (they have no chances to get new data). 1423 * write_queue (they have no chances to get new data).
@@ -1409,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff
1409 * argument based upon the location of SKB in the send queue. 1426 * argument based upon the location of SKB in the send queue.
1410 */ 1427 */
1411 if (nonagle & TCP_NAGLE_PUSH) 1428 if (nonagle & TCP_NAGLE_PUSH)
1412 return 1; 1429 return true;
1413 1430
1414 /* Don't use the nagle rule for urgent data (or for the final FIN). 1431 /* Don't use the nagle rule for urgent data (or for the final FIN).
1415 * Nagle can be ignored during F-RTO too (see RFC4138). 1432 * Nagle can be ignored during F-RTO too (see RFC4138).
1416 */ 1433 */
1417 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1434 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1418 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1435 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1419 return 1; 1436 return true;
1420 1437
1421 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1438 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1422 return 1; 1439 return true;
1423 1440
1424 return 0; 1441 return false;
1425} 1442}
1426 1443
1427/* Does at least the first segment of SKB fit into the send window? */ 1444/* Does at least the first segment of SKB fit into the send window? */
1428static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1445static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1429 unsigned int cur_mss) 1446 const struct sk_buff *skb,
1447 unsigned int cur_mss)
1430{ 1448{
1431 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1449 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1432 1450
@@ -1459,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1459} 1477}
1460 1478
1461/* Test if sending is allowed right now. */ 1479/* Test if sending is allowed right now. */
1462int tcp_may_send_now(struct sock *sk) 1480bool tcp_may_send_now(struct sock *sk)
1463{ 1481{
1464 const struct tcp_sock *tp = tcp_sk(sk); 1482 const struct tcp_sock *tp = tcp_sk(sk);
1465 struct sk_buff *skb = tcp_send_head(sk); 1483 struct sk_buff *skb = tcp_send_head(sk);
@@ -1529,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1529 * 1547 *
1530 * This algorithm is from John Heffner. 1548 * This algorithm is from John Heffner.
1531 */ 1549 */
1532static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1550static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1533{ 1551{
1534 struct tcp_sock *tp = tcp_sk(sk); 1552 struct tcp_sock *tp = tcp_sk(sk);
1535 const struct inet_connection_sock *icsk = inet_csk(sk); 1553 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1589,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1589 /* Ok, it looks like it is advisable to defer. */ 1607 /* Ok, it looks like it is advisable to defer. */
1590 tp->tso_deferred = 1 | (jiffies << 1); 1608 tp->tso_deferred = 1 | (jiffies << 1);
1591 1609
1592 return 1; 1610 return true;
1593 1611
1594send_now: 1612send_now:
1595 tp->tso_deferred = 0; 1613 tp->tso_deferred = 0;
1596 return 0; 1614 return false;
1597} 1615}
1598 1616
1599/* Create a new MTU probe if we are ready. 1617/* Create a new MTU probe if we are ready.
@@ -1735,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk)
1735 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1753 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1736 * account rare use of URG, this is not a big flaw. 1754 * account rare use of URG, this is not a big flaw.
1737 * 1755 *
1738 * Returns 1, if no segments are in flight and we have queued segments, but 1756 * Returns true, if no segments are in flight and we have queued segments,
1739 * cannot send anything now because of SWS or another problem. 1757 * but cannot send anything now because of SWS or another problem.
1740 */ 1758 */
1741static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1759static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1742 int push_one, gfp_t gfp) 1760 int push_one, gfp_t gfp)
1743{ 1761{
1744 struct tcp_sock *tp = tcp_sk(sk); 1762 struct tcp_sock *tp = tcp_sk(sk);
1745 struct sk_buff *skb; 1763 struct sk_buff *skb;
@@ -1753,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1753 /* Do MTU probing. */ 1771 /* Do MTU probing. */
1754 result = tcp_mtu_probe(sk); 1772 result = tcp_mtu_probe(sk);
1755 if (!result) { 1773 if (!result) {
1756 return 0; 1774 return false;
1757 } else if (result > 0) { 1775 } else if (result > 0) {
1758 sent_pkts = 1; 1776 sent_pkts = 1;
1759 } 1777 }
@@ -1812,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1812 1830
1813 if (likely(sent_pkts)) { 1831 if (likely(sent_pkts)) {
1814 tcp_cwnd_validate(sk); 1832 tcp_cwnd_validate(sk);
1815 return 0; 1833 return false;
1816 } 1834 }
1817 return !tp->packets_out && tcp_send_head(sk); 1835 return !tp->packets_out && tcp_send_head(sk);
1818} 1836}
@@ -2011,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2011} 2029}
2012 2030
2013/* Check if coalescing SKBs is legal. */ 2031/* Check if coalescing SKBs is legal. */
2014static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 2032static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2015{ 2033{
2016 if (tcp_skb_pcount(skb) > 1) 2034 if (tcp_skb_pcount(skb) > 1)
2017 return 0; 2035 return false;
2018 /* TODO: SACK collapsing could be used to remove this condition */ 2036 /* TODO: SACK collapsing could be used to remove this condition */
2019 if (skb_shinfo(skb)->nr_frags != 0) 2037 if (skb_shinfo(skb)->nr_frags != 0)
2020 return 0; 2038 return false;
2021 if (skb_cloned(skb)) 2039 if (skb_cloned(skb))
2022 return 0; 2040 return false;
2023 if (skb == tcp_send_head(sk)) 2041 if (skb == tcp_send_head(sk))
2024 return 0; 2042 return false;
2025 /* Some heurestics for collapsing over SACK'd could be invented */ 2043 /* Some heurestics for collapsing over SACK'd could be invented */
2026 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2044 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2027 return 0; 2045 return false;
2028 2046
2029 return 1; 2047 return true;
2030} 2048}
2031 2049
2032/* Collapse packets in the retransmit queue to make to create 2050/* Collapse packets in the retransmit queue to make to create
@@ -2037,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2037{ 2055{
2038 struct tcp_sock *tp = tcp_sk(sk); 2056 struct tcp_sock *tp = tcp_sk(sk);
2039 struct sk_buff *skb = to, *tmp; 2057 struct sk_buff *skb = to, *tmp;
2040 int first = 1; 2058 bool first = true;
2041 2059
2042 if (!sysctl_tcp_retrans_collapse) 2060 if (!sysctl_tcp_retrans_collapse)
2043 return; 2061 return;
@@ -2051,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2051 space -= skb->len; 2069 space -= skb->len;
2052 2070
2053 if (first) { 2071 if (first) {
2054 first = 0; 2072 first = false;
2055 continue; 2073 continue;
2056 } 2074 }
2057 2075
@@ -2060,7 +2078,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2060 /* Punt if not enough space exists in the first SKB for 2078 /* Punt if not enough space exists in the first SKB for
2061 * the data in the second 2079 * the data in the second
2062 */ 2080 */
2063 if (skb->len > skb_tailroom(to)) 2081 if (skb->len > skb_availroom(to))
2064 break; 2082 break;
2065 2083
2066 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2084 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
@@ -2166,8 +2184,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2166 2184
2167#if FASTRETRANS_DEBUG > 0 2185#if FASTRETRANS_DEBUG > 0
2168 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2186 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2169 if (net_ratelimit()) 2187 net_dbg_ratelimited("retrans_out leaked\n");
2170 printk(KERN_DEBUG "retrans_out leaked.\n");
2171 } 2188 }
2172#endif 2189#endif
2173 if (!tp->retrans_out) 2190 if (!tp->retrans_out)
@@ -2192,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2192/* Check if we forward retransmits are possible in the current 2209/* Check if we forward retransmits are possible in the current
2193 * window/congestion state. 2210 * window/congestion state.
2194 */ 2211 */
2195static int tcp_can_forward_retransmit(struct sock *sk) 2212static bool tcp_can_forward_retransmit(struct sock *sk)
2196{ 2213{
2197 const struct inet_connection_sock *icsk = inet_csk(sk); 2214 const struct inet_connection_sock *icsk = inet_csk(sk);
2198 const struct tcp_sock *tp = tcp_sk(sk); 2215 const struct tcp_sock *tp = tcp_sk(sk);
2199 2216
2200 /* Forward retransmissions are possible only during Recovery. */ 2217 /* Forward retransmissions are possible only during Recovery. */
2201 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2218 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2202 return 0; 2219 return false;
2203 2220
2204 /* No forward retransmissions in Reno are possible. */ 2221 /* No forward retransmissions in Reno are possible. */
2205 if (tcp_is_reno(tp)) 2222 if (tcp_is_reno(tp))
2206 return 0; 2223 return false;
2207 2224
2208 /* Yeah, we have to make difficult choice between forward transmission 2225 /* Yeah, we have to make difficult choice between forward transmission
2209 * and retransmission... Both ways have their merits... 2226 * and retransmission... Both ways have their merits...
@@ -2214,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk)
2214 */ 2231 */
2215 2232
2216 if (tcp_may_send_now(sk)) 2233 if (tcp_may_send_now(sk))
2217 return 0; 2234 return false;
2218 2235
2219 return 1; 2236 return true;
2220} 2237}
2221 2238
2222/* This gets called after a retransmit timeout, and the initially 2239/* This gets called after a retransmit timeout, and the initially
@@ -2306,8 +2323,10 @@ begin_fwd:
2306 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2323 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2307 continue; 2324 continue;
2308 2325
2309 if (tcp_retransmit_skb(sk, skb)) 2326 if (tcp_retransmit_skb(sk, skb)) {
2327 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2310 return; 2328 return;
2329 }
2311 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2330 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2312 2331
2313 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) 2332 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
@@ -2399,7 +2418,7 @@ int tcp_send_synack(struct sock *sk)
2399 2418
2400 skb = tcp_write_queue_head(sk); 2419 skb = tcp_write_queue_head(sk);
2401 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2420 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2402 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2421 pr_debug("%s: wrong queue state\n", __func__);
2403 return -EFAULT; 2422 return -EFAULT;
2404 } 2423 }
2405 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 2424 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
@@ -2559,7 +2578,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2559EXPORT_SYMBOL(tcp_make_synack); 2578EXPORT_SYMBOL(tcp_make_synack);
2560 2579
2561/* Do all connect socket setups that can be done AF independent. */ 2580/* Do all connect socket setups that can be done AF independent. */
2562static void tcp_connect_init(struct sock *sk) 2581void tcp_connect_init(struct sock *sk)
2563{ 2582{
2564 const struct dst_entry *dst = __sk_dst_get(sk); 2583 const struct dst_entry *dst = __sk_dst_get(sk);
2565 struct tcp_sock *tp = tcp_sk(sk); 2584 struct tcp_sock *tp = tcp_sk(sk);
@@ -2614,9 +2633,12 @@ static void tcp_connect_init(struct sock *sk)
2614 tp->snd_una = tp->write_seq; 2633 tp->snd_una = tp->write_seq;
2615 tp->snd_sml = tp->write_seq; 2634 tp->snd_sml = tp->write_seq;
2616 tp->snd_up = tp->write_seq; 2635 tp->snd_up = tp->write_seq;
2617 tp->rcv_nxt = 0; 2636 tp->snd_nxt = tp->write_seq;
2618 tp->rcv_wup = 0; 2637
2619 tp->copied_seq = 0; 2638 if (likely(!tp->repair))
2639 tp->rcv_nxt = 0;
2640 tp->rcv_wup = tp->rcv_nxt;
2641 tp->copied_seq = tp->rcv_nxt;
2620 2642
2621 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2643 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2622 inet_csk(sk)->icsk_retransmits = 0; 2644 inet_csk(sk)->icsk_retransmits = 0;
@@ -2639,7 +2661,6 @@ int tcp_connect(struct sock *sk)
2639 /* Reserve space for headers. */ 2661 /* Reserve space for headers. */
2640 skb_reserve(buff, MAX_TCP_HEADER); 2662 skb_reserve(buff, MAX_TCP_HEADER);
2641 2663
2642 tp->snd_nxt = tp->write_seq;
2643 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2664 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2644 TCP_ECN_send_syn(sk, buff); 2665 TCP_ECN_send_syn(sk, buff);
2645 2666
@@ -2788,6 +2809,15 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2788 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2809 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2789} 2810}
2790 2811
2812void tcp_send_window_probe(struct sock *sk)
2813{
2814 if (sk->sk_state == TCP_ESTABLISHED) {
2815 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
2816 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
2817 tcp_xmit_probe_skb(sk, 0);
2818 }
2819}
2820
2791/* Initiate keepalive or window probe from timer. */ 2821/* Initiate keepalive or window probe from timer. */
2792int tcp_write_wakeup(struct sock *sk) 2822int tcp_write_wakeup(struct sock *sk)
2793{ 2823{
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 85ee7eb7e38e..4526fe68e60e 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -18,6 +18,8 @@
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/kprobes.h> 24#include <linux/kprobes.h>
23#include <linux/socket.h> 25#include <linux/socket.h>
@@ -89,7 +91,7 @@ static inline int tcp_probe_avail(void)
89 * Note: arguments must match tcp_rcv_established()! 91 * Note: arguments must match tcp_rcv_established()!
90 */ 92 */
91static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, 93static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
92 struct tcphdr *th, unsigned len) 94 struct tcphdr *th, unsigned int len)
93{ 95{
94 const struct tcp_sock *tp = tcp_sk(sk); 96 const struct tcp_sock *tp = tcp_sk(sk);
95 const struct inet_sock *inet = inet_sk(sk); 97 const struct inet_sock *inet = inet_sk(sk);
@@ -136,7 +138,7 @@ static struct jprobe tcp_jprobe = {
136 .entry = jtcp_rcv_established, 138 .entry = jtcp_rcv_established,
137}; 139};
138 140
139static int tcpprobe_open(struct inode * inode, struct file * file) 141static int tcpprobe_open(struct inode *inode, struct file *file)
140{ 142{
141 /* Reset (empty) log */ 143 /* Reset (empty) log */
142 spin_lock_bh(&tcp_probe.lock); 144 spin_lock_bh(&tcp_probe.lock);
@@ -239,7 +241,7 @@ static __init int tcpprobe_init(void)
239 if (ret) 241 if (ret)
240 goto err1; 242 goto err1;
241 243
242 pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize); 244 pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize);
243 return 0; 245 return 0;
244 err1: 246 err1:
245 proc_net_remove(&init_net, procname); 247 proc_net_remove(&init_net, procname);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index cd2e0723266d..e911e6c523ec 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -319,6 +319,11 @@ void tcp_retransmit_timer(struct sock *sk)
319 struct tcp_sock *tp = tcp_sk(sk); 319 struct tcp_sock *tp = tcp_sk(sk);
320 struct inet_connection_sock *icsk = inet_csk(sk); 320 struct inet_connection_sock *icsk = inet_csk(sk);
321 321
322 if (tp->early_retrans_delayed) {
323 tcp_resume_early_retransmit(sk);
324 return;
325 }
326
322 if (!tp->packets_out) 327 if (!tp->packets_out)
323 goto out; 328 goto out;
324 329
@@ -333,16 +338,18 @@ void tcp_retransmit_timer(struct sock *sk)
333 */ 338 */
334 struct inet_sock *inet = inet_sk(sk); 339 struct inet_sock *inet = inet_sk(sk);
335 if (sk->sk_family == AF_INET) { 340 if (sk->sk_family == AF_INET) {
336 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 341 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
337 &inet->inet_daddr, ntohs(inet->inet_dport), 342 &inet->inet_daddr,
338 inet->inet_num, tp->snd_una, tp->snd_nxt); 343 ntohs(inet->inet_dport), inet->inet_num,
344 tp->snd_una, tp->snd_nxt);
339 } 345 }
340#if IS_ENABLED(CONFIG_IPV6) 346#if IS_ENABLED(CONFIG_IPV6)
341 else if (sk->sk_family == AF_INET6) { 347 else if (sk->sk_family == AF_INET6) {
342 struct ipv6_pinfo *np = inet6_sk(sk); 348 struct ipv6_pinfo *np = inet6_sk(sk);
343 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 349 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
344 &np->daddr, ntohs(inet->inet_dport), 350 &np->daddr,
345 inet->inet_num, tp->snd_una, tp->snd_nxt); 351 ntohs(inet->inet_dport), inet->inet_num,
352 tp->snd_una, tp->snd_nxt);
346 } 353 }
347#endif 354#endif
348 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { 355 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 01775983b997..0d0171830620 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -164,12 +164,12 @@ static const struct net_protocol tunnel64_protocol = {
164static int __init tunnel4_init(void) 164static int __init tunnel4_init(void)
165{ 165{
166 if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) { 166 if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) {
167 printk(KERN_ERR "tunnel4 init: can't add protocol\n"); 167 pr_err("%s: can't add protocol\n", __func__);
168 return -EAGAIN; 168 return -EAGAIN;
169 } 169 }
170#if IS_ENABLED(CONFIG_IPV6) 170#if IS_ENABLED(CONFIG_IPV6)
171 if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) { 171 if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
172 printk(KERN_ERR "tunnel64 init: can't add protocol\n"); 172 pr_err("tunnel64 init: can't add protocol\n");
173 inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); 173 inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
174 return -EAGAIN; 174 return -EAGAIN;
175 } 175 }
@@ -181,10 +181,10 @@ static void __exit tunnel4_fini(void)
181{ 181{
182#if IS_ENABLED(CONFIG_IPV6) 182#if IS_ENABLED(CONFIG_IPV6)
183 if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6)) 183 if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
184 printk(KERN_ERR "tunnel64 close: can't remove protocol\n"); 184 pr_err("tunnel64 close: can't remove protocol\n");
185#endif 185#endif
186 if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP)) 186 if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP))
187 printk(KERN_ERR "tunnel4 close: can't remove protocol\n"); 187 pr_err("tunnel4 close: can't remove protocol\n");
188} 188}
189 189
190module_init(tunnel4_init); 190module_init(tunnel4_init);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5d075b5f70fc..eaca73644e79 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -77,7 +77,8 @@
77 * 2 of the License, or (at your option) any later version. 77 * 2 of the License, or (at your option) any later version.
78 */ 78 */
79 79
80#include <asm/system.h> 80#define pr_fmt(fmt) "UDP: " fmt
81
81#include <asm/uaccess.h> 82#include <asm/uaccess.h>
82#include <asm/ioctls.h> 83#include <asm/ioctls.h>
83#include <linux/bootmem.h> 84#include <linux/bootmem.h>
@@ -106,6 +107,7 @@
106#include <net/checksum.h> 107#include <net/checksum.h>
107#include <net/xfrm.h> 108#include <net/xfrm.h>
108#include <trace/events/udp.h> 109#include <trace/events/udp.h>
110#include <linux/static_key.h>
109#include "udp_impl.h" 111#include "udp_impl.h"
110 112
111struct udp_table udp_table __read_mostly; 113struct udp_table udp_table __read_mostly;
@@ -205,7 +207,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
205 207
206 if (!snum) { 208 if (!snum) {
207 int low, high, remaining; 209 int low, high, remaining;
208 unsigned rand; 210 unsigned int rand;
209 unsigned short first, last; 211 unsigned short first, last;
210 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); 212 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
211 213
@@ -845,7 +847,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
845 * Get and verify the address. 847 * Get and verify the address.
846 */ 848 */
847 if (msg->msg_name) { 849 if (msg->msg_name) {
848 struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name; 850 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
849 if (msg->msg_namelen < sizeof(*usin)) 851 if (msg->msg_namelen < sizeof(*usin))
850 return -EINVAL; 852 return -EINVAL;
851 if (usin->sin_family != AF_INET) { 853 if (usin->sin_family != AF_INET) {
@@ -917,7 +919,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
917 if (!saddr) 919 if (!saddr)
918 saddr = inet->mc_addr; 920 saddr = inet->mc_addr;
919 connected = 0; 921 connected = 0;
920 } 922 } else if (!ipc.oif)
923 ipc.oif = inet->uc_index;
921 924
922 if (connected) 925 if (connected)
923 rt = (struct rtable *)sk_dst_check(sk, 0); 926 rt = (struct rtable *)sk_dst_check(sk, 0);
@@ -974,7 +977,7 @@ back_from_confirm:
974 /* ... which is an evident application bug. --ANK */ 977 /* ... which is an evident application bug. --ANK */
975 release_sock(sk); 978 release_sock(sk);
976 979
977 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); 980 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n"));
978 err = -EINVAL; 981 err = -EINVAL;
979 goto out; 982 goto out;
980 } 983 }
@@ -1053,7 +1056,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1053 if (unlikely(!up->pending)) { 1056 if (unlikely(!up->pending)) {
1054 release_sock(sk); 1057 release_sock(sk);
1055 1058
1056 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n"); 1059 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n"));
1057 return -EINVAL; 1060 return -EINVAL;
1058 } 1061 }
1059 1062
@@ -1166,7 +1169,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1166 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 1169 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1167 struct sk_buff *skb; 1170 struct sk_buff *skb;
1168 unsigned int ulen, copied; 1171 unsigned int ulen, copied;
1169 int peeked; 1172 int peeked, off = 0;
1170 int err; 1173 int err;
1171 int is_udplite = IS_UDPLITE(sk); 1174 int is_udplite = IS_UDPLITE(sk);
1172 bool slow; 1175 bool slow;
@@ -1182,7 +1185,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1182 1185
1183try_again: 1186try_again:
1184 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 1187 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
1185 &peeked, &err); 1188 &peeked, &off, &err);
1186 if (!skb) 1189 if (!skb)
1187 goto out; 1190 goto out;
1188 1191
@@ -1377,6 +1380,14 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1377 1380
1378} 1381}
1379 1382
1383static struct static_key udp_encap_needed __read_mostly;
1384void udp_encap_enable(void)
1385{
1386 if (!static_key_enabled(&udp_encap_needed))
1387 static_key_slow_inc(&udp_encap_needed);
1388}
1389EXPORT_SYMBOL(udp_encap_enable);
1390
1380/* returns: 1391/* returns:
1381 * -1: error 1392 * -1: error
1382 * 0: success 1393 * 0: success
@@ -1398,7 +1409,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1398 goto drop; 1409 goto drop;
1399 nf_reset(skb); 1410 nf_reset(skb);
1400 1411
1401 if (up->encap_type) { 1412 if (static_key_false(&udp_encap_needed) && up->encap_type) {
1402 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 1413 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
1403 1414
1404 /* 1415 /*
@@ -1446,9 +1457,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1446 * provided by the application." 1457 * provided by the application."
1447 */ 1458 */
1448 if (up->pcrlen == 0) { /* full coverage was set */ 1459 if (up->pcrlen == 0) { /* full coverage was set */
1449 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage " 1460 LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n",
1450 "%d while full coverage %d requested\n", 1461 UDP_SKB_CB(skb)->cscov, skb->len);
1451 UDP_SKB_CB(skb)->cscov, skb->len);
1452 goto drop; 1462 goto drop;
1453 } 1463 }
1454 /* The next case involves violating the min. coverage requested 1464 /* The next case involves violating the min. coverage requested
@@ -1458,9 +1468,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1458 * Therefore the above ...()->partial_cov statement is essential. 1468 * Therefore the above ...()->partial_cov statement is essential.
1459 */ 1469 */
1460 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 1470 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
1461 LIMIT_NETDEBUG(KERN_WARNING 1471 LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n",
1462 "UDPLITE: coverage %d too small, need min %d\n", 1472 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1463 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1464 goto drop; 1473 goto drop;
1465 } 1474 }
1466 } 1475 }
@@ -1470,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1470 goto drop; 1479 goto drop;
1471 1480
1472 1481
1473 if (sk_rcvqueues_full(sk, skb)) 1482 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
1474 goto drop; 1483 goto drop;
1475 1484
1476 rc = 0; 1485 rc = 0;
@@ -1479,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1479 bh_lock_sock(sk); 1488 bh_lock_sock(sk);
1480 if (!sock_owned_by_user(sk)) 1489 if (!sock_owned_by_user(sk))
1481 rc = __udp_queue_rcv_skb(sk, skb); 1490 rc = __udp_queue_rcv_skb(sk, skb);
1482 else if (sk_add_backlog(sk, skb)) { 1491 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
1483 bh_unlock_sock(sk); 1492 bh_unlock_sock(sk);
1484 goto drop; 1493 goto drop;
1485 } 1494 }
@@ -1688,13 +1697,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1688 1697
1689short_packet: 1698short_packet:
1690 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", 1699 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
1691 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1700 proto == IPPROTO_UDPLITE ? "Lite" : "",
1692 &saddr, 1701 &saddr, ntohs(uh->source),
1693 ntohs(uh->source), 1702 ulen, skb->len,
1694 ulen, 1703 &daddr, ntohs(uh->dest));
1695 skb->len,
1696 &daddr,
1697 ntohs(uh->dest));
1698 goto drop; 1704 goto drop;
1699 1705
1700csum_error: 1706csum_error:
@@ -1703,11 +1709,8 @@ csum_error:
1703 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1709 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1704 */ 1710 */
1705 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", 1711 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
1706 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1712 proto == IPPROTO_UDPLITE ? "Lite" : "",
1707 &saddr, 1713 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
1708 ntohs(uh->source),
1709 &daddr,
1710 ntohs(uh->dest),
1711 ulen); 1714 ulen);
1712drop: 1715drop:
1713 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1716 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
@@ -1766,6 +1769,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1766 /* FALLTHROUGH */ 1769 /* FALLTHROUGH */
1767 case UDP_ENCAP_L2TPINUDP: 1770 case UDP_ENCAP_L2TPINUDP:
1768 up->encap_type = val; 1771 up->encap_type = val;
1772 udp_encap_enable();
1769 break; 1773 break;
1770 default: 1774 default:
1771 err = -ENOPROTOOPT; 1775 err = -ENOPROTOOPT;
@@ -2169,9 +2173,15 @@ void udp4_proc_exit(void)
2169static __initdata unsigned long uhash_entries; 2173static __initdata unsigned long uhash_entries;
2170static int __init set_uhash_entries(char *str) 2174static int __init set_uhash_entries(char *str)
2171{ 2175{
2176 ssize_t ret;
2177
2172 if (!str) 2178 if (!str)
2173 return 0; 2179 return 0;
2174 uhash_entries = simple_strtoul(str, &str, 0); 2180
2181 ret = kstrtoul(str, 0, &uhash_entries);
2182 if (ret)
2183 return 0;
2184
2175 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) 2185 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
2176 uhash_entries = UDP_HTABLE_SIZE_MIN; 2186 uhash_entries = UDP_HTABLE_SIZE_MIN;
2177 return 1; 2187 return 1;
@@ -2182,26 +2192,16 @@ void __init udp_table_init(struct udp_table *table, const char *name)
2182{ 2192{
2183 unsigned int i; 2193 unsigned int i;
2184 2194
2185 if (!CONFIG_BASE_SMALL) 2195 table->hash = alloc_large_system_hash(name,
2186 table->hash = alloc_large_system_hash(name, 2196 2 * sizeof(struct udp_hslot),
2187 2 * sizeof(struct udp_hslot), 2197 uhash_entries,
2188 uhash_entries, 2198 21, /* one slot per 2 MB */
2189 21, /* one slot per 2 MB */ 2199 0,
2190 0, 2200 &table->log,
2191 &table->log, 2201 &table->mask,
2192 &table->mask, 2202 UDP_HTABLE_SIZE_MIN,
2193 64 * 1024); 2203 64 * 1024);
2194 /* 2204
2195 * Make sure hash table has the minimum size
2196 */
2197 if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
2198 table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
2199 2 * sizeof(struct udp_hslot), GFP_KERNEL);
2200 if (!table->hash)
2201 panic(name);
2202 table->log = ilog2(UDP_HTABLE_SIZE_MIN);
2203 table->mask = UDP_HTABLE_SIZE_MIN - 1;
2204 }
2205 table->hash2 = table->hash + (table->mask + 1); 2205 table->hash2 = table->hash + (table->mask + 1);
2206 for (i = 0; i <= table->mask; i++) { 2206 for (i = 0; i <= table->mask; i++) {
2207 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); 2207 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb6..a7f86a3cd502 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
146 return udp_dump_one(&udp_table, in_skb, nlh, req); 146 return udp_dump_one(&udp_table, in_skb, nlh, req);
147} 147}
148 148
149static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
150 void *info)
151{
152 r->idiag_rqueue = sk_rmem_alloc_get(sk);
153 r->idiag_wqueue = sk_wmem_alloc_get(sk);
154}
155
149static const struct inet_diag_handler udp_diag_handler = { 156static const struct inet_diag_handler udp_diag_handler = {
150 .dump = udp_diag_dump, 157 .dump = udp_diag_dump,
151 .dump_one = udp_diag_dump_one, 158 .dump_one = udp_diag_dump_one,
159 .idiag_get_info = udp_diag_get_info,
152 .idiag_type = IPPROTO_UDP, 160 .idiag_type = IPPROTO_UDP,
153}; 161};
154 162
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
167static const struct inet_diag_handler udplite_diag_handler = { 175static const struct inet_diag_handler udplite_diag_handler = {
168 .dump = udplite_diag_dump, 176 .dump = udplite_diag_dump,
169 .dump_one = udplite_diag_dump_one, 177 .dump_one = udplite_diag_dump_one,
178 .idiag_get_info = udp_diag_get_info,
170 .idiag_type = IPPROTO_UDPLITE, 179 .idiag_type = IPPROTO_UDPLITE,
171}; 180};
172 181
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index aaad650d47d9..5a681e298b90 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,7 @@ extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
25 size_t len, int noblock, int flags, int *addr_len); 25 size_t len, int noblock, int flags, int *addr_len);
26extern int udp_sendpage(struct sock *sk, struct page *page, int offset, 26extern int udp_sendpage(struct sock *sk, struct page *page, int offset,
27 size_t size, int flags); 27 size_t size, int flags);
28extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 28extern int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
29extern void udp_destroy_sock(struct sock *sk); 29extern void udp_destroy_sock(struct sock *sk);
30 30
31#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 12e9499a1a6c..2c46acd4cc36 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -10,6 +10,9 @@
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13
14#define pr_fmt(fmt) "UDPLite: " fmt
15
13#include <linux/export.h> 16#include <linux/export.h>
14#include "udp_impl.h" 17#include "udp_impl.h"
15 18
@@ -129,11 +132,11 @@ void __init udplite4_register(void)
129 inet_register_protosw(&udplite4_protosw); 132 inet_register_protosw(&udplite4_protosw);
130 133
131 if (udplite4_proc_init()) 134 if (udplite4_proc_init())
132 printk(KERN_ERR "%s: Cannot register /proc!\n", __func__); 135 pr_err("%s: Cannot register /proc!\n", __func__);
133 return; 136 return;
134 137
135out_unregister_proto: 138out_unregister_proto:
136 proto_unregister(&udplite_prot); 139 proto_unregister(&udplite_prot);
137out_register_err: 140out_register_err:
138 printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); 141 pr_crit("%s: Cannot add UDP-Lite protocol\n", __func__);
139} 142}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index a0b4c5da8d43..0d3426cb5c4f 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -152,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
152 152
153 case IPPROTO_AH: 153 case IPPROTO_AH:
154 if (pskb_may_pull(skb, xprth + 8 - skb->data)) { 154 if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
155 __be32 *ah_hdr = (__be32*)xprth; 155 __be32 *ah_hdr = (__be32 *)xprth;
156 156
157 fl4->fl4_ipsec_spi = ah_hdr[1]; 157 fl4->fl4_ipsec_spi = ah_hdr[1];
158 } 158 }
@@ -298,8 +298,8 @@ void __init xfrm4_init(int rt_max_size)
298 xfrm4_state_init(); 298 xfrm4_state_init();
299 xfrm4_policy_init(); 299 xfrm4_policy_init();
300#ifdef CONFIG_SYSCTL 300#ifdef CONFIG_SYSCTL
301 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, 301 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4",
302 xfrm4_policy_table); 302 xfrm4_policy_table);
303#endif 303#endif
304} 304}
305 305
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 9247d9d70e9d..05a5df2febc9 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -3,6 +3,8 @@
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 */ 4 */
5 5
6#define pr_fmt(fmt) "IPsec: " fmt
7
6#include <linux/skbuff.h> 8#include <linux/skbuff.h>
7#include <linux/module.h> 9#include <linux/module.h>
8#include <linux/mutex.h> 10#include <linux/mutex.h>
@@ -75,18 +77,18 @@ static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
75static int __init ipip_init(void) 77static int __init ipip_init(void)
76{ 78{
77 if (xfrm_register_type(&ipip_type, AF_INET) < 0) { 79 if (xfrm_register_type(&ipip_type, AF_INET) < 0) {
78 printk(KERN_INFO "ipip init: can't add xfrm type\n"); 80 pr_info("%s: can't add xfrm type\n", __func__);
79 return -EAGAIN; 81 return -EAGAIN;
80 } 82 }
81 83
82 if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) { 84 if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) {
83 printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET\n"); 85 pr_info("%s: can't add xfrm handler for AF_INET\n", __func__);
84 xfrm_unregister_type(&ipip_type, AF_INET); 86 xfrm_unregister_type(&ipip_type, AF_INET);
85 return -EAGAIN; 87 return -EAGAIN;
86 } 88 }
87#if IS_ENABLED(CONFIG_IPV6) 89#if IS_ENABLED(CONFIG_IPV6)
88 if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) { 90 if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
89 printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n"); 91 pr_info("%s: can't add xfrm handler for AF_INET6\n", __func__);
90 xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET); 92 xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
91 xfrm_unregister_type(&ipip_type, AF_INET); 93 xfrm_unregister_type(&ipip_type, AF_INET);
92 return -EAGAIN; 94 return -EAGAIN;
@@ -99,12 +101,14 @@ static void __exit ipip_fini(void)
99{ 101{
100#if IS_ENABLED(CONFIG_IPV6) 102#if IS_ENABLED(CONFIG_IPV6)
101 if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6)) 103 if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
102 printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n"); 104 pr_info("%s: can't remove xfrm handler for AF_INET6\n",
105 __func__);
103#endif 106#endif
104 if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET)) 107 if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET))
105 printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET\n"); 108 pr_info("%s: can't remove xfrm handler for AF_INET\n",
109 __func__);
106 if (xfrm_unregister_type(&ipip_type, AF_INET) < 0) 110 if (xfrm_unregister_type(&ipip_type, AF_INET) < 0)
107 printk(KERN_INFO "ipip close: can't remove xfrm type\n"); 111 pr_info("%s: can't remove xfrm type\n", __func__);
108} 112}
109 113
110module_init(ipip_init); 114module_init(ipip_init);
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 36d7437ac054..5728695b5449 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -69,7 +69,7 @@ config IPV6_OPTIMISTIC_DAD
69 69
70config INET6_AH 70config INET6_AH
71 tristate "IPv6: AH transformation" 71 tristate "IPv6: AH transformation"
72 select XFRM 72 select XFRM_ALGO
73 select CRYPTO 73 select CRYPTO
74 select CRYPTO_HMAC 74 select CRYPTO_HMAC
75 select CRYPTO_MD5 75 select CRYPTO_MD5
@@ -81,7 +81,7 @@ config INET6_AH
81 81
82config INET6_ESP 82config INET6_ESP
83 tristate "IPv6: ESP transformation" 83 tristate "IPv6: ESP transformation"
84 select XFRM 84 select XFRM_ALGO
85 select CRYPTO 85 select CRYPTO
86 select CRYPTO_AUTHENC 86 select CRYPTO_AUTHENC
87 select CRYPTO_HMAC 87 select CRYPTO_HMAC
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6b8ebc5da0e1..8f6411c97189 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -38,6 +38,8 @@
38 * status etc. 38 * status etc.
39 */ 39 */
40 40
41#define pr_fmt(fmt) "IPv6: " fmt
42
41#include <linux/errno.h> 43#include <linux/errno.h>
42#include <linux/types.h> 44#include <linux/types.h>
43#include <linux/kernel.h> 45#include <linux/kernel.h>
@@ -66,6 +68,7 @@
66#include <net/sock.h> 68#include <net/sock.h>
67#include <net/snmp.h> 69#include <net/snmp.h>
68 70
71#include <net/af_ieee802154.h>
69#include <net/ipv6.h> 72#include <net/ipv6.h>
70#include <net/protocol.h> 73#include <net/protocol.h>
71#include <net/ndisc.h> 74#include <net/ndisc.h>
@@ -149,7 +152,7 @@ static void addrconf_type_change(struct net_device *dev,
149 unsigned long event); 152 unsigned long event);
150static int addrconf_ifdown(struct net_device *dev, int how); 153static int addrconf_ifdown(struct net_device *dev, int how);
151 154
152static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); 155static void addrconf_dad_start(struct inet6_ifaddr *ifp);
153static void addrconf_dad_timer(unsigned long data); 156static void addrconf_dad_timer(unsigned long data);
154static void addrconf_dad_completed(struct inet6_ifaddr *ifp); 157static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
155static void addrconf_dad_run(struct inet6_dev *idev); 158static void addrconf_dad_run(struct inet6_dev *idev);
@@ -326,20 +329,19 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
326 WARN_ON(idev->mc_list != NULL); 329 WARN_ON(idev->mc_list != NULL);
327 330
328#ifdef NET_REFCNT_DEBUG 331#ifdef NET_REFCNT_DEBUG
329 printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL"); 332 pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
330#endif 333#endif
331 dev_put(dev); 334 dev_put(dev);
332 if (!idev->dead) { 335 if (!idev->dead) {
333 pr_warning("Freeing alive inet6 device %p\n", idev); 336 pr_warn("Freeing alive inet6 device %p\n", idev);
334 return; 337 return;
335 } 338 }
336 snmp6_free_dev(idev); 339 snmp6_free_dev(idev);
337 kfree_rcu(idev, rcu); 340 kfree_rcu(idev, rcu);
338} 341}
339
340EXPORT_SYMBOL(in6_dev_finish_destroy); 342EXPORT_SYMBOL(in6_dev_finish_destroy);
341 343
342static struct inet6_dev * ipv6_add_dev(struct net_device *dev) 344static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
343{ 345{
344 struct inet6_dev *ndev; 346 struct inet6_dev *ndev;
345 347
@@ -372,7 +374,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
372 374
373 if (snmp6_alloc_dev(ndev) < 0) { 375 if (snmp6_alloc_dev(ndev) < 0) {
374 ADBG((KERN_WARNING 376 ADBG((KERN_WARNING
375 "%s(): cannot allocate memory for statistics; dev=%s.\n", 377 "%s: cannot allocate memory for statistics; dev=%s.\n",
376 __func__, dev->name)); 378 __func__, dev->name));
377 neigh_parms_release(&nd_tbl, ndev->nd_parms); 379 neigh_parms_release(&nd_tbl, ndev->nd_parms);
378 dev_put(dev); 380 dev_put(dev);
@@ -382,7 +384,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
382 384
383 if (snmp6_register_dev(ndev) < 0) { 385 if (snmp6_register_dev(ndev) < 0) {
384 ADBG((KERN_WARNING 386 ADBG((KERN_WARNING
385 "%s(): cannot create /proc/net/dev_snmp6/%s\n", 387 "%s: cannot create /proc/net/dev_snmp6/%s\n",
386 __func__, dev->name)); 388 __func__, dev->name));
387 neigh_parms_release(&nd_tbl, ndev->nd_parms); 389 neigh_parms_release(&nd_tbl, ndev->nd_parms);
388 ndev->dead = 1; 390 ndev->dead = 1;
@@ -400,9 +402,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
400 402
401#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 403#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
402 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { 404 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
403 printk(KERN_INFO 405 pr_info("%s: Disabled Multicast RS\n", dev->name);
404 "%s: Disabled Multicast RS\n",
405 dev->name);
406 ndev->cnf.rtr_solicits = 0; 406 ndev->cnf.rtr_solicits = 0;
407 } 407 }
408#endif 408#endif
@@ -435,13 +435,13 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
435 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 435 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
436 436
437 /* Join all-router multicast group if forwarding is set */ 437 /* Join all-router multicast group if forwarding is set */
438 if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST)) 438 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
439 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 439 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
440 440
441 return ndev; 441 return ndev;
442} 442}
443 443
444static struct inet6_dev * ipv6_find_idev(struct net_device *dev) 444static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
445{ 445{
446 struct inet6_dev *idev; 446 struct inet6_dev *idev;
447 447
@@ -542,7 +542,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
542 WARN_ON(!hlist_unhashed(&ifp->addr_lst)); 542 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
543 543
544#ifdef NET_REFCNT_DEBUG 544#ifdef NET_REFCNT_DEBUG
545 printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); 545 pr_debug("%s\n", __func__);
546#endif 546#endif
547 547
548 in6_dev_put(ifp->idev); 548 in6_dev_put(ifp->idev);
@@ -551,7 +551,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
551 pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); 551 pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
552 552
553 if (ifp->state != INET6_IFADDR_STATE_DEAD) { 553 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
554 pr_warning("Freeing alive inet6 address %p\n", ifp); 554 pr_warn("Freeing alive inet6 address %p\n", ifp);
555 return; 555 return;
556 } 556 }
557 dst_release(&ifp->rt->dst); 557 dst_release(&ifp->rt->dst);
@@ -803,8 +803,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
803 ip6_del_rt(rt); 803 ip6_del_rt(rt);
804 rt = NULL; 804 rt = NULL;
805 } else if (!(rt->rt6i_flags & RTF_EXPIRES)) { 805 } else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
806 rt->dst.expires = expires; 806 rt6_set_expires(rt, expires);
807 rt->rt6i_flags |= RTF_EXPIRES;
808 } 807 }
809 } 808 }
810 dst_release(&rt->dst); 809 dst_release(&rt->dst);
@@ -842,8 +841,7 @@ retry:
842 in6_dev_hold(idev); 841 in6_dev_hold(idev);
843 if (idev->cnf.use_tempaddr <= 0) { 842 if (idev->cnf.use_tempaddr <= 0) {
844 write_unlock(&idev->lock); 843 write_unlock(&idev->lock);
845 printk(KERN_INFO 844 pr_info("%s: use_tempaddr is disabled\n", __func__);
846 "ipv6_create_tempaddr(): use_tempaddr is disabled.\n");
847 in6_dev_put(idev); 845 in6_dev_put(idev);
848 ret = -1; 846 ret = -1;
849 goto out; 847 goto out;
@@ -853,8 +851,8 @@ retry:
853 idev->cnf.use_tempaddr = -1; /*XXX*/ 851 idev->cnf.use_tempaddr = -1; /*XXX*/
854 spin_unlock_bh(&ifp->lock); 852 spin_unlock_bh(&ifp->lock);
855 write_unlock(&idev->lock); 853 write_unlock(&idev->lock);
856 printk(KERN_WARNING 854 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
857 "ipv6_create_tempaddr(): regeneration time exceeded. disabled temporary address support.\n"); 855 __func__);
858 in6_dev_put(idev); 856 in6_dev_put(idev);
859 ret = -1; 857 ret = -1;
860 goto out; 858 goto out;
@@ -864,8 +862,8 @@ retry:
864 if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) { 862 if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) {
865 spin_unlock_bh(&ifp->lock); 863 spin_unlock_bh(&ifp->lock);
866 write_unlock(&idev->lock); 864 write_unlock(&idev->lock);
867 printk(KERN_WARNING 865 pr_warn("%s: regeneration of randomized interface id failed\n",
868 "ipv6_create_tempaddr(): regeneration of randomized interface id failed.\n"); 866 __func__);
869 in6_ifa_put(ifp); 867 in6_ifa_put(ifp);
870 in6_dev_put(idev); 868 in6_dev_put(idev);
871 ret = -1; 869 ret = -1;
@@ -915,8 +913,7 @@ retry:
915 if (!ift || IS_ERR(ift)) { 913 if (!ift || IS_ERR(ift)) {
916 in6_ifa_put(ifp); 914 in6_ifa_put(ifp);
917 in6_dev_put(idev); 915 in6_dev_put(idev);
918 printk(KERN_INFO 916 pr_info("%s: retry temporary address regeneration\n", __func__);
919 "ipv6_create_tempaddr(): retry temporary address regeneration.\n");
920 tmpaddr = &addr; 917 tmpaddr = &addr;
921 write_lock(&idev->lock); 918 write_lock(&idev->lock);
922 goto retry; 919 goto retry;
@@ -930,7 +927,7 @@ retry:
930 ift->tstamp = tmp_tstamp; 927 ift->tstamp = tmp_tstamp;
931 spin_unlock_bh(&ift->lock); 928 spin_unlock_bh(&ift->lock);
932 929
933 addrconf_dad_start(ift, 0); 930 addrconf_dad_start(ift);
934 in6_ifa_put(ift); 931 in6_ifa_put(ift);
935 in6_dev_put(idev); 932 in6_dev_put(idev);
936out: 933out:
@@ -1333,7 +1330,6 @@ int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1333 rcu_read_unlock(); 1330 rcu_read_unlock();
1334 return onlink; 1331 return onlink;
1335} 1332}
1336
1337EXPORT_SYMBOL(ipv6_chk_prefix); 1333EXPORT_SYMBOL(ipv6_chk_prefix);
1338 1334
1339struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, 1335struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
@@ -1417,9 +1413,8 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1417 return; 1413 return;
1418 } 1414 }
1419 1415
1420 if (net_ratelimit()) 1416 net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
1421 printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", 1417 ifp->idev->dev->name, &ifp->addr);
1422 ifp->idev->dev->name, &ifp->addr);
1423 1418
1424 if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) { 1419 if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
1425 struct in6_addr addr; 1420 struct in6_addr addr;
@@ -1432,7 +1427,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1432 /* DAD failed for link-local based on MAC address */ 1427 /* DAD failed for link-local based on MAC address */
1433 idev->cnf.disable_ipv6 = 1; 1428 idev->cnf.disable_ipv6 = 1;
1434 1429
1435 printk(KERN_INFO "%s: IPv6 being disabled!\n", 1430 pr_info("%s: IPv6 being disabled!\n",
1436 ifp->idev->dev->name); 1431 ifp->idev->dev->name);
1437 } 1432 }
1438 } 1433 }
@@ -1517,13 +1512,21 @@ static int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
1517 return 0; 1512 return 0;
1518} 1513}
1519 1514
1515static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
1516{
1517 if (dev->addr_len != IEEE802154_ADDR_LEN)
1518 return -1;
1519 memcpy(eui, dev->dev_addr, 8);
1520 return 0;
1521}
1522
1520static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev) 1523static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
1521{ 1524{
1522 /* XXX: inherit EUI-64 from other interface -- yoshfuji */ 1525 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
1523 if (dev->addr_len != ARCNET_ALEN) 1526 if (dev->addr_len != ARCNET_ALEN)
1524 return -1; 1527 return -1;
1525 memset(eui, 0, 7); 1528 memset(eui, 0, 7);
1526 eui[7] = *(u8*)dev->dev_addr; 1529 eui[7] = *(u8 *)dev->dev_addr;
1527 return 0; 1530 return 0;
1528} 1531}
1529 1532
@@ -1570,7 +1573,6 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
1570 switch (dev->type) { 1573 switch (dev->type) {
1571 case ARPHRD_ETHER: 1574 case ARPHRD_ETHER:
1572 case ARPHRD_FDDI: 1575 case ARPHRD_FDDI:
1573 case ARPHRD_IEEE802_TR:
1574 return addrconf_ifid_eui48(eui, dev); 1576 return addrconf_ifid_eui48(eui, dev);
1575 case ARPHRD_ARCNET: 1577 case ARPHRD_ARCNET:
1576 return addrconf_ifid_arcnet(eui, dev); 1578 return addrconf_ifid_arcnet(eui, dev);
@@ -1580,6 +1582,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
1580 return addrconf_ifid_sit(eui, dev); 1582 return addrconf_ifid_sit(eui, dev);
1581 case ARPHRD_IPGRE: 1583 case ARPHRD_IPGRE:
1582 return addrconf_ifid_gre(eui, dev); 1584 return addrconf_ifid_gre(eui, dev);
1585 case ARPHRD_IEEE802154:
1586 return addrconf_ifid_eui64(eui, dev);
1583 } 1587 }
1584 return -1; 1588 return -1;
1585} 1589}
@@ -1653,9 +1657,8 @@ static void ipv6_regen_rndid(unsigned long data)
1653 idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - 1657 idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
1654 idev->cnf.max_desync_factor * HZ; 1658 idev->cnf.max_desync_factor * HZ;
1655 if (time_before(expires, jiffies)) { 1659 if (time_before(expires, jiffies)) {
1656 printk(KERN_WARNING 1660 pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
1657 "ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n", 1661 __func__, idev->dev->name);
1658 idev->dev->name);
1659 goto out; 1662 goto out;
1660 } 1663 }
1661 1664
@@ -1668,7 +1671,8 @@ out:
1668 in6_dev_put(idev); 1671 in6_dev_put(idev);
1669} 1672}
1670 1673
1671static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) { 1674static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
1675{
1672 int ret = 0; 1676 int ret = 0;
1673 1677
1674 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) 1678 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
@@ -1838,16 +1842,15 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
1838 prefered_lft = ntohl(pinfo->prefered); 1842 prefered_lft = ntohl(pinfo->prefered);
1839 1843
1840 if (prefered_lft > valid_lft) { 1844 if (prefered_lft > valid_lft) {
1841 if (net_ratelimit()) 1845 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
1842 printk(KERN_WARNING "addrconf: prefix option has invalid lifetime\n");
1843 return; 1846 return;
1844 } 1847 }
1845 1848
1846 in6_dev = in6_dev_get(dev); 1849 in6_dev = in6_dev_get(dev);
1847 1850
1848 if (in6_dev == NULL) { 1851 if (in6_dev == NULL) {
1849 if (net_ratelimit()) 1852 net_dbg_ratelimited("addrconf: device %s not configured\n",
1850 printk(KERN_DEBUG "addrconf: device %s not configured\n", dev->name); 1853 dev->name);
1851 return; 1854 return;
1852 } 1855 }
1853 1856
@@ -1887,11 +1890,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
1887 rt = NULL; 1890 rt = NULL;
1888 } else if (addrconf_finite_timeout(rt_expires)) { 1891 } else if (addrconf_finite_timeout(rt_expires)) {
1889 /* not infinity */ 1892 /* not infinity */
1890 rt->dst.expires = jiffies + rt_expires; 1893 rt6_set_expires(rt, jiffies + rt_expires);
1891 rt->rt6i_flags |= RTF_EXPIRES;
1892 } else { 1894 } else {
1893 rt->rt6i_flags &= ~RTF_EXPIRES; 1895 rt6_clean_expires(rt);
1894 rt->dst.expires = 0;
1895 } 1896 }
1896 } else if (valid_lft) { 1897 } else if (valid_lft) {
1897 clock_t expires = 0; 1898 clock_t expires = 0;
@@ -1911,7 +1912,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
1911 /* Try to figure out our local address for this prefix */ 1912 /* Try to figure out our local address for this prefix */
1912 1913
1913 if (pinfo->autoconf && in6_dev->cnf.autoconf) { 1914 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
1914 struct inet6_ifaddr * ifp; 1915 struct inet6_ifaddr *ifp;
1915 struct in6_addr addr; 1916 struct in6_addr addr;
1916 int create = 0, update_lft = 0; 1917 int create = 0, update_lft = 0;
1917 1918
@@ -1924,9 +1925,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
1924 } 1925 }
1925 goto ok; 1926 goto ok;
1926 } 1927 }
1927 if (net_ratelimit()) 1928 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
1928 printk(KERN_DEBUG "IPv6 addrconf: prefix with wrong length %d\n", 1929 pinfo->prefix_len);
1929 pinfo->prefix_len);
1930 in6_dev_put(in6_dev); 1930 in6_dev_put(in6_dev);
1931 return; 1931 return;
1932 1932
@@ -1960,7 +1960,7 @@ ok:
1960 1960
1961 update_lft = create = 1; 1961 update_lft = create = 1;
1962 ifp->cstamp = jiffies; 1962 ifp->cstamp = jiffies;
1963 addrconf_dad_start(ifp, RTF_ADDRCONF|RTF_PREFIX_RT); 1963 addrconf_dad_start(ifp);
1964 } 1964 }
1965 1965
1966 if (ifp) { 1966 if (ifp) {
@@ -2239,7 +2239,7 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
2239 * that the Optimistic flag should not be set for 2239 * that the Optimistic flag should not be set for
2240 * manually configured addresses 2240 * manually configured addresses
2241 */ 2241 */
2242 addrconf_dad_start(ifp, 0); 2242 addrconf_dad_start(ifp);
2243 in6_ifa_put(ifp); 2243 in6_ifa_put(ifp);
2244 addrconf_verify(0); 2244 addrconf_verify(0);
2245 return 0; 2245 return 0;
@@ -2365,9 +2365,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2365 } 2365 }
2366 2366
2367 for_each_netdev(net, dev) { 2367 for_each_netdev(net, dev) {
2368 struct in_device * in_dev = __in_dev_get_rtnl(dev); 2368 struct in_device *in_dev = __in_dev_get_rtnl(dev);
2369 if (in_dev && (dev->flags & IFF_UP)) { 2369 if (in_dev && (dev->flags & IFF_UP)) {
2370 struct in_ifaddr * ifa; 2370 struct in_ifaddr *ifa;
2371 2371
2372 int flag = scope; 2372 int flag = scope;
2373 2373
@@ -2404,7 +2404,7 @@ static void init_loopback(struct net_device *dev)
2404 ASSERT_RTNL(); 2404 ASSERT_RTNL();
2405 2405
2406 if ((idev = ipv6_find_idev(dev)) == NULL) { 2406 if ((idev = ipv6_find_idev(dev)) == NULL) {
2407 printk(KERN_DEBUG "init loopback: add_dev failed\n"); 2407 pr_debug("%s: add_dev failed\n", __func__);
2408 return; 2408 return;
2409 } 2409 }
2410 2410
@@ -2413,7 +2413,7 @@ static void init_loopback(struct net_device *dev)
2413 2413
2414static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) 2414static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
2415{ 2415{
2416 struct inet6_ifaddr * ifp; 2416 struct inet6_ifaddr *ifp;
2417 u32 addr_flags = IFA_F_PERMANENT; 2417 u32 addr_flags = IFA_F_PERMANENT;
2418 2418
2419#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 2419#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -2426,7 +2426,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
2426 ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags); 2426 ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
2427 if (!IS_ERR(ifp)) { 2427 if (!IS_ERR(ifp)) {
2428 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2428 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
2429 addrconf_dad_start(ifp, 0); 2429 addrconf_dad_start(ifp);
2430 in6_ifa_put(ifp); 2430 in6_ifa_put(ifp);
2431 } 2431 }
2432} 2432}
@@ -2434,15 +2434,15 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
2434static void addrconf_dev_config(struct net_device *dev) 2434static void addrconf_dev_config(struct net_device *dev)
2435{ 2435{
2436 struct in6_addr addr; 2436 struct in6_addr addr;
2437 struct inet6_dev * idev; 2437 struct inet6_dev *idev;
2438 2438
2439 ASSERT_RTNL(); 2439 ASSERT_RTNL();
2440 2440
2441 if ((dev->type != ARPHRD_ETHER) && 2441 if ((dev->type != ARPHRD_ETHER) &&
2442 (dev->type != ARPHRD_FDDI) && 2442 (dev->type != ARPHRD_FDDI) &&
2443 (dev->type != ARPHRD_IEEE802_TR) &&
2444 (dev->type != ARPHRD_ARCNET) && 2443 (dev->type != ARPHRD_ARCNET) &&
2445 (dev->type != ARPHRD_INFINIBAND)) { 2444 (dev->type != ARPHRD_INFINIBAND) &&
2445 (dev->type != ARPHRD_IEEE802154)) {
2446 /* Alas, we support only Ethernet autoconfiguration. */ 2446 /* Alas, we support only Ethernet autoconfiguration. */
2447 return; 2447 return;
2448 } 2448 }
@@ -2472,7 +2472,7 @@ static void addrconf_sit_config(struct net_device *dev)
2472 */ 2472 */
2473 2473
2474 if ((idev = ipv6_find_idev(dev)) == NULL) { 2474 if ((idev = ipv6_find_idev(dev)) == NULL) {
2475 printk(KERN_DEBUG "init sit: add_dev failed\n"); 2475 pr_debug("%s: add_dev failed\n", __func__);
2476 return; 2476 return;
2477 } 2477 }
2478 2478
@@ -2502,12 +2502,12 @@ static void addrconf_gre_config(struct net_device *dev)
2502 struct inet6_dev *idev; 2502 struct inet6_dev *idev;
2503 struct in6_addr addr; 2503 struct in6_addr addr;
2504 2504
2505 pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name); 2505 pr_info("%s(%s)\n", __func__, dev->name);
2506 2506
2507 ASSERT_RTNL(); 2507 ASSERT_RTNL();
2508 2508
2509 if ((idev = ipv6_find_idev(dev)) == NULL) { 2509 if ((idev = ipv6_find_idev(dev)) == NULL) {
2510 printk(KERN_DEBUG "init gre: add_dev failed\n"); 2510 pr_debug("%s: add_dev failed\n", __func__);
2511 return; 2511 return;
2512 } 2512 }
2513 2513
@@ -2547,7 +2547,7 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
2547 if (!ipv6_inherit_linklocal(idev, link_dev)) 2547 if (!ipv6_inherit_linklocal(idev, link_dev))
2548 return; 2548 return;
2549 } 2549 }
2550 printk(KERN_DEBUG "init ip6-ip6: add_linklocal failed\n"); 2550 pr_debug("init ip6-ip6: add_linklocal failed\n");
2551} 2551}
2552 2552
2553/* 2553/*
@@ -2563,14 +2563,14 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2563 2563
2564 idev = addrconf_add_dev(dev); 2564 idev = addrconf_add_dev(dev);
2565 if (IS_ERR(idev)) { 2565 if (IS_ERR(idev)) {
2566 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); 2566 pr_debug("init ip6-ip6: add_dev failed\n");
2567 return; 2567 return;
2568 } 2568 }
2569 ip6_tnl_add_linklocal(idev); 2569 ip6_tnl_add_linklocal(idev);
2570} 2570}
2571 2571
2572static int addrconf_notify(struct notifier_block *this, unsigned long event, 2572static int addrconf_notify(struct notifier_block *this, unsigned long event,
2573 void * data) 2573 void *data)
2574{ 2574{
2575 struct net_device *dev = (struct net_device *) data; 2575 struct net_device *dev = (struct net_device *) data;
2576 struct inet6_dev *idev = __in6_dev_get(dev); 2576 struct inet6_dev *idev = __in6_dev_get(dev);
@@ -2594,9 +2594,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2594 if (event == NETDEV_UP) { 2594 if (event == NETDEV_UP) {
2595 if (!addrconf_qdisc_ok(dev)) { 2595 if (!addrconf_qdisc_ok(dev)) {
2596 /* device is not ready yet. */ 2596 /* device is not ready yet. */
2597 printk(KERN_INFO 2597 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
2598 "ADDRCONF(NETDEV_UP): %s: "
2599 "link is not ready\n",
2600 dev->name); 2598 dev->name);
2601 break; 2599 break;
2602 } 2600 }
@@ -2621,10 +2619,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2621 idev->if_flags |= IF_READY; 2619 idev->if_flags |= IF_READY;
2622 } 2620 }
2623 2621
2624 printk(KERN_INFO 2622 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
2625 "ADDRCONF(NETDEV_CHANGE): %s: " 2623 dev->name);
2626 "link becomes ready\n",
2627 dev->name);
2628 2624
2629 run_pending = 1; 2625 run_pending = 1;
2630 } 2626 }
@@ -2895,8 +2891,7 @@ static void addrconf_rs_timer(unsigned long data)
2895 * Note: we do not support deprecated "all on-link" 2891 * Note: we do not support deprecated "all on-link"
2896 * assumption any longer. 2892 * assumption any longer.
2897 */ 2893 */
2898 printk(KERN_DEBUG "%s: no IPv6 routers present\n", 2894 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
2899 idev->dev->name);
2900 } 2895 }
2901 2896
2902out: 2897out:
@@ -2921,7 +2916,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
2921 addrconf_mod_timer(ifp, AC_DAD, rand_num); 2916 addrconf_mod_timer(ifp, AC_DAD, rand_num);
2922} 2917}
2923 2918
2924static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) 2919static void addrconf_dad_start(struct inet6_ifaddr *ifp)
2925{ 2920{
2926 struct inet6_dev *idev = ifp->idev; 2921 struct inet6_dev *idev = ifp->idev;
2927 struct net_device *dev = idev->dev; 2922 struct net_device *dev = idev->dev;
@@ -3794,7 +3789,7 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
3794 return inet6_dump_addr(skb, cb, type); 3789 return inet6_dump_addr(skb, cb, type);
3795} 3790}
3796 3791
3797static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, 3792static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3798 void *arg) 3793 void *arg)
3799{ 3794{
3800 struct net *net = sock_net(in_skb->sk); 3795 struct net *net = sock_net(in_skb->sk);
@@ -3989,14 +3984,14 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
3989 struct nlattr *nla; 3984 struct nlattr *nla;
3990 struct ifla_cacheinfo ci; 3985 struct ifla_cacheinfo ci;
3991 3986
3992 NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); 3987 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
3993 3988 goto nla_put_failure;
3994 ci.max_reasm_len = IPV6_MAXPLEN; 3989 ci.max_reasm_len = IPV6_MAXPLEN;
3995 ci.tstamp = cstamp_delta(idev->tstamp); 3990 ci.tstamp = cstamp_delta(idev->tstamp);
3996 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time); 3991 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
3997 ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time); 3992 ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
3998 NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); 3993 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
3999 3994 goto nla_put_failure;
4000 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); 3995 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
4001 if (nla == NULL) 3996 if (nla == NULL)
4002 goto nla_put_failure; 3997 goto nla_put_failure;
@@ -4061,15 +4056,13 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
4061 hdr->ifi_flags = dev_get_flags(dev); 4056 hdr->ifi_flags = dev_get_flags(dev);
4062 hdr->ifi_change = 0; 4057 hdr->ifi_change = 0;
4063 4058
4064 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 4059 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4065 4060 (dev->addr_len &&
4066 if (dev->addr_len) 4061 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4067 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); 4062 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4068 4063 (dev->ifindex != dev->iflink &&
4069 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); 4064 nla_put_u32(skb, IFLA_LINK, dev->iflink)))
4070 if (dev->ifindex != dev->iflink) 4065 goto nla_put_failure;
4071 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
4072
4073 protoinfo = nla_nest_start(skb, IFLA_PROTINFO); 4066 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
4074 if (protoinfo == NULL) 4067 if (protoinfo == NULL)
4075 goto nla_put_failure; 4068 goto nla_put_failure;
@@ -4182,12 +4175,12 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
4182 if (pinfo->autoconf) 4175 if (pinfo->autoconf)
4183 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF; 4176 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
4184 4177
4185 NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix); 4178 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
4186 4179 goto nla_put_failure;
4187 ci.preferred_time = ntohl(pinfo->prefered); 4180 ci.preferred_time = ntohl(pinfo->prefered);
4188 ci.valid_time = ntohl(pinfo->valid); 4181 ci.valid_time = ntohl(pinfo->valid);
4189 NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci); 4182 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
4190 4183 goto nla_put_failure;
4191 return nlmsg_end(skb, nlh); 4184 return nlmsg_end(skb, nlh);
4192 4185
4193nla_put_failure: 4186nla_put_failure:
@@ -4371,7 +4364,6 @@ static struct addrconf_sysctl_table
4371{ 4364{
4372 struct ctl_table_header *sysctl_header; 4365 struct ctl_table_header *sysctl_header;
4373 ctl_table addrconf_vars[DEVCONF_MAX+1]; 4366 ctl_table addrconf_vars[DEVCONF_MAX+1];
4374 char *dev_name;
4375} addrconf_sysctl __read_mostly = { 4367} addrconf_sysctl __read_mostly = {
4376 .sysctl_header = NULL, 4368 .sysctl_header = NULL,
4377 .addrconf_vars = { 4369 .addrconf_vars = {
@@ -4600,17 +4592,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
4600{ 4592{
4601 int i; 4593 int i;
4602 struct addrconf_sysctl_table *t; 4594 struct addrconf_sysctl_table *t;
4603 4595 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
4604#define ADDRCONF_CTL_PATH_DEV 3
4605
4606 struct ctl_path addrconf_ctl_path[] = {
4607 { .procname = "net", },
4608 { .procname = "ipv6", },
4609 { .procname = "conf", },
4610 { /* to be set */ },
4611 { },
4612 };
4613
4614 4596
4615 t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL); 4597 t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
4616 if (t == NULL) 4598 if (t == NULL)
@@ -4622,27 +4604,15 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
4622 t->addrconf_vars[i].extra2 = net; 4604 t->addrconf_vars[i].extra2 = net;
4623 } 4605 }
4624 4606
4625 /* 4607 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
4626 * Make a copy of dev_name, because '.procname' is regarded as const
4627 * by sysctl and we wouldn't want anyone to change it under our feet
4628 * (see SIOCSIFNAME).
4629 */
4630 t->dev_name = kstrdup(dev_name, GFP_KERNEL);
4631 if (!t->dev_name)
4632 goto free;
4633
4634 addrconf_ctl_path[ADDRCONF_CTL_PATH_DEV].procname = t->dev_name;
4635 4608
4636 t->sysctl_header = register_net_sysctl_table(net, addrconf_ctl_path, 4609 t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
4637 t->addrconf_vars);
4638 if (t->sysctl_header == NULL) 4610 if (t->sysctl_header == NULL)
4639 goto free_procname; 4611 goto free;
4640 4612
4641 p->sysctl = t; 4613 p->sysctl = t;
4642 return 0; 4614 return 0;
4643 4615
4644free_procname:
4645 kfree(t->dev_name);
4646free: 4616free:
4647 kfree(t); 4617 kfree(t);
4648out: 4618out:
@@ -4659,7 +4629,6 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
4659 t = p->sysctl; 4629 t = p->sysctl;
4660 p->sysctl = NULL; 4630 p->sysctl = NULL;
4661 unregister_net_sysctl_table(t->sysctl_header); 4631 unregister_net_sysctl_table(t->sysctl_header);
4662 kfree(t->dev_name);
4663 kfree(t); 4632 kfree(t);
4664} 4633}
4665 4634
@@ -4778,8 +4747,8 @@ int __init addrconf_init(void)
4778 4747
4779 err = ipv6_addr_label_init(); 4748 err = ipv6_addr_label_init();
4780 if (err < 0) { 4749 if (err < 0) {
4781 printk(KERN_CRIT "IPv6 Addrconf:" 4750 pr_crit("%s: cannot initialize default policy table: %d\n",
4782 " cannot initialize default policy table: %d.\n", err); 4751 __func__, err);
4783 goto out; 4752 goto out;
4784 } 4753 }
4785 4754
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 399287e595d7..d051e5f4bf34 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -8,9 +8,9 @@
8 8
9#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) 9#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
10 10
11static inline unsigned ipv6_addr_scope2type(unsigned scope) 11static inline unsigned int ipv6_addr_scope2type(unsigned int scope)
12{ 12{
13 switch(scope) { 13 switch (scope) {
14 case IPV6_ADDR_SCOPE_NODELOCAL: 14 case IPV6_ADDR_SCOPE_NODELOCAL:
15 return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) | 15 return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
16 IPV6_ADDR_LOOPBACK); 16 IPV6_ADDR_LOOPBACK);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 2d8ddba9ee58..eb6a63632d3c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -129,7 +129,7 @@ static void ip6addrlbl_free_rcu(struct rcu_head *h)
129 ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu)); 129 ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu));
130} 130}
131 131
132static inline int ip6addrlbl_hold(struct ip6addrlbl_entry *p) 132static bool ip6addrlbl_hold(struct ip6addrlbl_entry *p)
133{ 133{
134 return atomic_inc_not_zero(&p->refcnt); 134 return atomic_inc_not_zero(&p->refcnt);
135} 135}
@@ -141,20 +141,20 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p)
141} 141}
142 142
143/* Find label */ 143/* Find label */
144static int __ip6addrlbl_match(struct net *net, 144static bool __ip6addrlbl_match(struct net *net,
145 struct ip6addrlbl_entry *p, 145 const struct ip6addrlbl_entry *p,
146 const struct in6_addr *addr, 146 const struct in6_addr *addr,
147 int addrtype, int ifindex) 147 int addrtype, int ifindex)
148{ 148{
149 if (!net_eq(ip6addrlbl_net(p), net)) 149 if (!net_eq(ip6addrlbl_net(p), net))
150 return 0; 150 return false;
151 if (p->ifindex && p->ifindex != ifindex) 151 if (p->ifindex && p->ifindex != ifindex)
152 return 0; 152 return false;
153 if (p->addrtype && p->addrtype != addrtype) 153 if (p->addrtype && p->addrtype != addrtype)
154 return 0; 154 return false;
155 if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen)) 155 if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen))
156 return 0; 156 return false;
157 return 1; 157 return true;
158} 158}
159 159
160static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net, 160static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
@@ -350,7 +350,7 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
350 int err = 0; 350 int err = 0;
351 int i; 351 int i;
352 352
353 ADDRLABEL(KERN_DEBUG "%s()\n", __func__); 353 ADDRLABEL(KERN_DEBUG "%s\n", __func__);
354 354
355 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { 355 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
356 int ret = ip6addrlbl_add(net, 356 int ret = ip6addrlbl_add(net,
@@ -456,8 +456,8 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
456 return err; 456 return err;
457} 457}
458 458
459static inline void ip6addrlbl_putmsg(struct nlmsghdr *nlh, 459static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
460 int prefixlen, int ifindex, u32 lseq) 460 int prefixlen, int ifindex, u32 lseq)
461{ 461{
462 struct ifaddrlblmsg *ifal = nlmsg_data(nlh); 462 struct ifaddrlblmsg *ifal = nlmsg_data(nlh);
463 ifal->ifal_family = AF_INET6; 463 ifal->ifal_family = AF_INET6;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 273f48d1df2e..e22e6d88bac6 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -18,6 +18,7 @@
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21#define pr_fmt(fmt) "IPv6: " fmt
21 22
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/capability.h> 24#include <linux/capability.h>
@@ -60,7 +61,6 @@
60#endif 61#endif
61 62
62#include <asm/uaccess.h> 63#include <asm/uaccess.h>
63#include <asm/system.h>
64#include <linux/mroute6.h> 64#include <linux/mroute6.h>
65 65
66MODULE_AUTHOR("Cast of dozens"); 66MODULE_AUTHOR("Cast of dozens");
@@ -78,7 +78,7 @@ struct ipv6_params ipv6_defaults = {
78 .autoconf = 1, 78 .autoconf = 1,
79}; 79};
80 80
81static int disable_ipv6_mod = 0; 81static int disable_ipv6_mod;
82 82
83module_param_named(disable, disable_ipv6_mod, int, 0444); 83module_param_named(disable, disable_ipv6_mod, int, 0444);
84MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional"); 84MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional");
@@ -181,7 +181,7 @@ lookup_protocol:
181 err = 0; 181 err = 0;
182 sk->sk_no_check = answer_no_check; 182 sk->sk_no_check = answer_no_check;
183 if (INET_PROTOSW_REUSE & answer_flags) 183 if (INET_PROTOSW_REUSE & answer_flags)
184 sk->sk_reuse = 1; 184 sk->sk_reuse = SK_CAN_REUSE;
185 185
186 inet = inet_sk(sk); 186 inet = inet_sk(sk);
187 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; 187 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -214,6 +214,7 @@ lookup_protocol:
214 inet->mc_ttl = 1; 214 inet->mc_ttl = 1;
215 inet->mc_index = 0; 215 inet->mc_index = 0;
216 inet->mc_list = NULL; 216 inet->mc_list = NULL;
217 inet->rcv_tos = 0;
217 218
218 if (ipv4_config.no_pmtu_disc) 219 if (ipv4_config.no_pmtu_disc)
219 inet->pmtudisc = IP_PMTUDISC_DONT; 220 inet->pmtudisc = IP_PMTUDISC_DONT;
@@ -256,7 +257,7 @@ out_rcu_unlock:
256/* bind for INET6 API */ 257/* bind for INET6 API */
257int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 258int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
258{ 259{
259 struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr; 260 struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr;
260 struct sock *sk = sock->sk; 261 struct sock *sk = sock->sk;
261 struct inet_sock *inet = inet_sk(sk); 262 struct inet_sock *inet = inet_sk(sk);
262 struct ipv6_pinfo *np = inet6_sk(sk); 263 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -390,7 +391,6 @@ out_unlock:
390 rcu_read_unlock(); 391 rcu_read_unlock();
391 goto out; 392 goto out;
392} 393}
393
394EXPORT_SYMBOL(inet6_bind); 394EXPORT_SYMBOL(inet6_bind);
395 395
396int inet6_release(struct socket *sock) 396int inet6_release(struct socket *sock)
@@ -408,7 +408,6 @@ int inet6_release(struct socket *sock)
408 408
409 return inet_release(sock); 409 return inet_release(sock);
410} 410}
411
412EXPORT_SYMBOL(inet6_release); 411EXPORT_SYMBOL(inet6_release);
413 412
414void inet6_destroy_sock(struct sock *sk) 413void inet6_destroy_sock(struct sock *sk)
@@ -419,10 +418,12 @@ void inet6_destroy_sock(struct sock *sk)
419 418
420 /* Release rx options */ 419 /* Release rx options */
421 420
422 if ((skb = xchg(&np->pktoptions, NULL)) != NULL) 421 skb = xchg(&np->pktoptions, NULL);
422 if (skb != NULL)
423 kfree_skb(skb); 423 kfree_skb(skb);
424 424
425 if ((skb = xchg(&np->rxpmtu, NULL)) != NULL) 425 skb = xchg(&np->rxpmtu, NULL);
426 if (skb != NULL)
426 kfree_skb(skb); 427 kfree_skb(skb);
427 428
428 /* Free flowlabels */ 429 /* Free flowlabels */
@@ -430,10 +431,10 @@ void inet6_destroy_sock(struct sock *sk)
430 431
431 /* Free tx options */ 432 /* Free tx options */
432 433
433 if ((opt = xchg(&np->opt, NULL)) != NULL) 434 opt = xchg(&np->opt, NULL);
435 if (opt != NULL)
434 sock_kfree_s(sk, opt, opt->tot_len); 436 sock_kfree_s(sk, opt, opt->tot_len);
435} 437}
436
437EXPORT_SYMBOL_GPL(inet6_destroy_sock); 438EXPORT_SYMBOL_GPL(inet6_destroy_sock);
438 439
439/* 440/*
@@ -443,7 +444,7 @@ EXPORT_SYMBOL_GPL(inet6_destroy_sock);
443int inet6_getname(struct socket *sock, struct sockaddr *uaddr, 444int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
444 int *uaddr_len, int peer) 445 int *uaddr_len, int peer)
445{ 446{
446 struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr; 447 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr;
447 struct sock *sk = sock->sk; 448 struct sock *sk = sock->sk;
448 struct inet_sock *inet = inet_sk(sk); 449 struct inet_sock *inet = inet_sk(sk);
449 struct ipv6_pinfo *np = inet6_sk(sk); 450 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +475,6 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
474 *uaddr_len = sizeof(*sin); 475 *uaddr_len = sizeof(*sin);
475 return 0; 476 return 0;
476} 477}
477
478EXPORT_SYMBOL(inet6_getname); 478EXPORT_SYMBOL(inet6_getname);
479 479
480int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 480int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -482,8 +482,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
482 struct sock *sk = sock->sk; 482 struct sock *sk = sock->sk;
483 struct net *net = sock_net(sk); 483 struct net *net = sock_net(sk);
484 484
485 switch(cmd) 485 switch (cmd) {
486 {
487 case SIOCGSTAMP: 486 case SIOCGSTAMP:
488 return sock_get_timestamp(sk, (struct timeval __user *)arg); 487 return sock_get_timestamp(sk, (struct timeval __user *)arg);
489 488
@@ -509,7 +508,6 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
509 /*NOTREACHED*/ 508 /*NOTREACHED*/
510 return 0; 509 return 0;
511} 510}
512
513EXPORT_SYMBOL(inet6_ioctl); 511EXPORT_SYMBOL(inet6_ioctl);
514 512
515const struct proto_ops inet6_stream_ops = { 513const struct proto_ops inet6_stream_ops = {
@@ -615,25 +613,21 @@ out:
615 return ret; 613 return ret;
616 614
617out_permanent: 615out_permanent:
618 printk(KERN_ERR "Attempt to override permanent protocol %d.\n", 616 pr_err("Attempt to override permanent protocol %d\n", protocol);
619 protocol);
620 goto out; 617 goto out;
621 618
622out_illegal: 619out_illegal:
623 printk(KERN_ERR 620 pr_err("Ignoring attempt to register invalid socket type %d\n",
624 "Ignoring attempt to register invalid socket type %d.\n",
625 p->type); 621 p->type);
626 goto out; 622 goto out;
627} 623}
628
629EXPORT_SYMBOL(inet6_register_protosw); 624EXPORT_SYMBOL(inet6_register_protosw);
630 625
631void 626void
632inet6_unregister_protosw(struct inet_protosw *p) 627inet6_unregister_protosw(struct inet_protosw *p)
633{ 628{
634 if (INET_PROTOSW_PERMANENT & p->flags) { 629 if (INET_PROTOSW_PERMANENT & p->flags) {
635 printk(KERN_ERR 630 pr_err("Attempt to unregister permanent protocol %d\n",
636 "Attempt to unregister permanent protocol %d.\n",
637 p->protocol); 631 p->protocol);
638 } else { 632 } else {
639 spin_lock_bh(&inetsw6_lock); 633 spin_lock_bh(&inetsw6_lock);
@@ -643,7 +637,6 @@ inet6_unregister_protosw(struct inet_protosw *p)
643 synchronize_net(); 637 synchronize_net();
644 } 638 }
645} 639}
646
647EXPORT_SYMBOL(inet6_unregister_protosw); 640EXPORT_SYMBOL(inet6_unregister_protosw);
648 641
649int inet6_sk_rebuild_header(struct sock *sk) 642int inet6_sk_rebuild_header(struct sock *sk)
@@ -683,13 +676,12 @@ int inet6_sk_rebuild_header(struct sock *sk)
683 676
684 return 0; 677 return 0;
685} 678}
686
687EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header); 679EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header);
688 680
689int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) 681bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb)
690{ 682{
691 struct ipv6_pinfo *np = inet6_sk(sk); 683 const struct ipv6_pinfo *np = inet6_sk(sk);
692 struct inet6_skb_parm *opt = IP6CB(skb); 684 const struct inet6_skb_parm *opt = IP6CB(skb);
693 685
694 if (np->rxopt.all) { 686 if (np->rxopt.all) {
695 if ((opt->hop && (np->rxopt.bits.hopopts || 687 if ((opt->hop && (np->rxopt.bits.hopopts ||
@@ -701,11 +693,10 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
701 np->rxopt.bits.osrcrt)) || 693 np->rxopt.bits.osrcrt)) ||
702 ((opt->dst1 || opt->dst0) && 694 ((opt->dst1 || opt->dst0) &&
703 (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts))) 695 (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts)))
704 return 1; 696 return true;
705 } 697 }
706 return 0; 698 return false;
707} 699}
708
709EXPORT_SYMBOL_GPL(ipv6_opt_accepted); 700EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
710 701
711static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) 702static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
@@ -1070,13 +1061,11 @@ static int __init inet6_init(void)
1070 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); 1061 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
1071 1062
1072 /* Register the socket-side information for inet6_create. */ 1063 /* Register the socket-side information for inet6_create. */
1073 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) 1064 for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
1074 INIT_LIST_HEAD(r); 1065 INIT_LIST_HEAD(r);
1075 1066
1076 if (disable_ipv6_mod) { 1067 if (disable_ipv6_mod) {
1077 printk(KERN_INFO 1068 pr_info("Loaded, but administratively disabled, reboot required to enable\n");
1078 "IPv6: Loaded, but administratively disabled, "
1079 "reboot required to enable\n");
1080 goto out; 1069 goto out;
1081 } 1070 }
1082 1071
@@ -1111,11 +1100,6 @@ static int __init inet6_init(void)
1111 if (err) 1100 if (err)
1112 goto out_sock_register_fail; 1101 goto out_sock_register_fail;
1113 1102
1114#ifdef CONFIG_SYSCTL
1115 err = ipv6_static_sysctl_register();
1116 if (err)
1117 goto static_sysctl_fail;
1118#endif
1119 tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; 1103 tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
1120 1104
1121 /* 1105 /*
@@ -1242,10 +1226,6 @@ ipmr_fail:
1242icmp_fail: 1226icmp_fail:
1243 unregister_pernet_subsys(&inet6_net_ops); 1227 unregister_pernet_subsys(&inet6_net_ops);
1244register_pernet_fail: 1228register_pernet_fail:
1245#ifdef CONFIG_SYSCTL
1246 ipv6_static_sysctl_unregister();
1247static_sysctl_fail:
1248#endif
1249 sock_unregister(PF_INET6); 1229 sock_unregister(PF_INET6);
1250 rtnl_unregister_all(PF_INET6); 1230 rtnl_unregister_all(PF_INET6);
1251out_sock_register_fail: 1231out_sock_register_fail:
@@ -1272,9 +1252,6 @@ static void __exit inet6_exit(void)
1272 /* Disallow any further netlink messages */ 1252 /* Disallow any further netlink messages */
1273 rtnl_unregister_all(PF_INET6); 1253 rtnl_unregister_all(PF_INET6);
1274 1254
1275#ifdef CONFIG_SYSCTL
1276 ipv6_sysctl_unregister();
1277#endif
1278 udpv6_exit(); 1255 udpv6_exit();
1279 udplitev6_exit(); 1256 udplitev6_exit();
1280 tcpv6_exit(); 1257 tcpv6_exit();
@@ -1302,9 +1279,6 @@ static void __exit inet6_exit(void)
1302 rawv6_exit(); 1279 rawv6_exit();
1303 1280
1304 unregister_pernet_subsys(&inet6_net_ops); 1281 unregister_pernet_subsys(&inet6_net_ops);
1305#ifdef CONFIG_SYSCTL
1306 ipv6_static_sysctl_unregister();
1307#endif
1308 proto_unregister(&rawv6_prot); 1282 proto_unregister(&rawv6_prot);
1309 proto_unregister(&udplitev6_prot); 1283 proto_unregister(&udplitev6_prot);
1310 proto_unregister(&udpv6_prot); 1284 proto_unregister(&udpv6_prot);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2ae79dbeec2f..f1a4a2c28ed3 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -24,6 +24,8 @@
24 * This file is derived from net/ipv4/ah.c. 24 * This file is derived from net/ipv4/ah.c.
25 */ 25 */
26 26
27#define pr_fmt(fmt) "IPv6: " fmt
28
27#include <crypto/hash.h> 29#include <crypto/hash.h>
28#include <linux/module.h> 30#include <linux/module.h>
29#include <linux/slab.h> 31#include <linux/slab.h>
@@ -111,7 +113,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
111 __alignof__(struct scatterlist)); 113 __alignof__(struct scatterlist));
112} 114}
113 115
114static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) 116static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
115{ 117{
116 u8 *opt = (u8 *)opthdr; 118 u8 *opt = (u8 *)opthdr;
117 int len = ipv6_optlen(opthdr); 119 int len = ipv6_optlen(opthdr);
@@ -125,7 +127,7 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
125 127
126 switch (opt[off]) { 128 switch (opt[off]) {
127 129
128 case IPV6_TLV_PAD0: 130 case IPV6_TLV_PAD1:
129 optlen = 1; 131 optlen = 1;
130 break; 132 break;
131 default: 133 default:
@@ -143,10 +145,10 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
143 len -= optlen; 145 len -= optlen;
144 } 146 }
145 if (len == 0) 147 if (len == 0)
146 return 1; 148 return true;
147 149
148bad: 150bad:
149 return 0; 151 return false;
150} 152}
151 153
152#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 154#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -169,7 +171,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
169 171
170 switch (opt[off]) { 172 switch (opt[off]) {
171 173
172 case IPV6_TLV_PAD0: 174 case IPV6_TLV_PAD1:
173 optlen = 1; 175 optlen = 1;
174 break; 176 break;
175 default: 177 default:
@@ -189,8 +191,8 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
189 191
190 hao = (struct ipv6_destopt_hao *)&opt[off]; 192 hao = (struct ipv6_destopt_hao *)&opt[off];
191 if (hao->length != sizeof(hao->addr)) { 193 if (hao->length != sizeof(hao->addr)) {
192 if (net_ratelimit()) 194 net_warn_ratelimited("destopt hao: invalid header length: %u\n",
193 printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length); 195 hao->length);
194 goto bad; 196 goto bad;
195 } 197 }
196 final_addr = hao->addr; 198 final_addr = hao->addr;
@@ -659,9 +661,9 @@ static int ah6_init_state(struct xfrm_state *x)
659 661
660 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 662 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
661 crypto_ahash_digestsize(ahash)) { 663 crypto_ahash_digestsize(ahash)) {
662 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 664 pr_info("AH: %s digestsize %u != %hu\n",
663 x->aalg->alg_name, crypto_ahash_digestsize(ahash), 665 x->aalg->alg_name, crypto_ahash_digestsize(ahash),
664 aalg_desc->uinfo.auth.icv_fullbits/8); 666 aalg_desc->uinfo.auth.icv_fullbits/8);
665 goto error; 667 goto error;
666 } 668 }
667 669
@@ -727,12 +729,12 @@ static const struct inet6_protocol ah6_protocol = {
727static int __init ah6_init(void) 729static int __init ah6_init(void)
728{ 730{
729 if (xfrm_register_type(&ah6_type, AF_INET6) < 0) { 731 if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
730 printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n"); 732 pr_info("%s: can't add xfrm type\n", __func__);
731 return -EAGAIN; 733 return -EAGAIN;
732 } 734 }
733 735
734 if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) { 736 if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
735 printk(KERN_INFO "ipv6 ah init: can't add protocol\n"); 737 pr_info("%s: can't add protocol\n", __func__);
736 xfrm_unregister_type(&ah6_type, AF_INET6); 738 xfrm_unregister_type(&ah6_type, AF_INET6);
737 return -EAGAIN; 739 return -EAGAIN;
738 } 740 }
@@ -743,10 +745,10 @@ static int __init ah6_init(void)
743static void __exit ah6_fini(void) 745static void __exit ah6_fini(void)
744{ 746{
745 if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0) 747 if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
746 printk(KERN_INFO "ipv6 ah close: can't remove protocol\n"); 748 pr_info("%s: can't remove protocol\n", __func__);
747 749
748 if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0) 750 if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
749 printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n"); 751 pr_info("%s: can't remove xfrm type\n", __func__);
750 752
751} 753}
752 754
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 59402b4637f9..cdf02be5f191 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -211,35 +211,6 @@ void ipv6_sock_ac_close(struct sock *sk)
211 rcu_read_unlock(); 211 rcu_read_unlock();
212} 212}
213 213
214#if 0
215/* The function is not used, which is funny. Apparently, author
216 * supposed to use it to filter out datagrams inside udp/raw but forgot.
217 *
218 * It is OK, anycasts are not special comparing to delivery to unicasts.
219 */
220
221int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex)
222{
223 struct ipv6_ac_socklist *pac;
224 struct ipv6_pinfo *np = inet6_sk(sk);
225 int found;
226
227 found = 0;
228 read_lock(&ipv6_sk_ac_lock);
229 for (pac=np->ipv6_ac_list; pac; pac=pac->acl_next) {
230 if (ifindex && pac->acl_ifindex != ifindex)
231 continue;
232 found = ipv6_addr_equal(&pac->acl_addr, addr);
233 if (found)
234 break;
235 }
236 read_unlock(&ipv6_sk_ac_lock);
237
238 return found;
239}
240
241#endif
242
243static void aca_put(struct ifacaddr6 *ac) 214static void aca_put(struct ifacaddr6 *ac)
244{ 215{
245 if (atomic_dec_and_test(&ac->aca_refcnt)) { 216 if (atomic_dec_and_test(&ac->aca_refcnt)) {
@@ -371,7 +342,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
371 * check if the interface has this anycast address 342 * check if the interface has this anycast address
372 * called with rcu_read_lock() 343 * called with rcu_read_lock()
373 */ 344 */
374static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr) 345static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
375{ 346{
376 struct inet6_dev *idev; 347 struct inet6_dev *idev;
377 struct ifacaddr6 *aca; 348 struct ifacaddr6 *aca;
@@ -385,16 +356,16 @@ static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *add
385 read_unlock_bh(&idev->lock); 356 read_unlock_bh(&idev->lock);
386 return aca != NULL; 357 return aca != NULL;
387 } 358 }
388 return 0; 359 return false;
389} 360}
390 361
391/* 362/*
392 * check if given interface (or any, if dev==0) has this anycast address 363 * check if given interface (or any, if dev==0) has this anycast address
393 */ 364 */
394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, 365bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
395 const struct in6_addr *addr) 366 const struct in6_addr *addr)
396{ 367{
397 int found = 0; 368 bool found = false;
398 369
399 rcu_read_lock(); 370 rcu_read_lock();
400 if (dev) 371 if (dev)
@@ -402,7 +373,7 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
402 else 373 else
403 for_each_netdev_rcu(net, dev) 374 for_each_netdev_rcu(net, dev)
404 if (ipv6_chk_acast_dev(dev, addr)) { 375 if (ipv6_chk_acast_dev(dev, addr)) {
405 found = 1; 376 found = true;
406 break; 377 break;
407 } 378 }
408 rcu_read_unlock(); 379 rcu_read_unlock();
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 251e7cd75e89..be2b67d631e5 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -22,6 +22,7 @@
22#include <linux/ipv6.h> 22#include <linux/ipv6.h>
23#include <linux/route.h> 23#include <linux/route.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/export.h>
25 26
26#include <net/ipv6.h> 27#include <net/ipv6.h>
27#include <net/ndisc.h> 28#include <net/ndisc.h>
@@ -33,9 +34,9 @@
33#include <linux/errqueue.h> 34#include <linux/errqueue.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35 36
36static inline int ipv6_mapped_addr_any(const struct in6_addr *a) 37static bool ipv6_mapped_addr_any(const struct in6_addr *a)
37{ 38{
38 return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0)); 39 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
39} 40}
40 41
41int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 42int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -98,7 +99,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
98 sin.sin_port = usin->sin6_port; 99 sin.sin_port = usin->sin6_port;
99 100
100 err = ip4_datagram_connect(sk, 101 err = ip4_datagram_connect(sk,
101 (struct sockaddr*) &sin, 102 (struct sockaddr *) &sin,
102 sizeof(sin)); 103 sizeof(sin));
103 104
104ipv4_connected: 105ipv4_connected:
@@ -202,6 +203,7 @@ out:
202 fl6_sock_release(flowlabel); 203 fl6_sock_release(flowlabel);
203 return err; 204 return err;
204} 205}
206EXPORT_SYMBOL_GPL(ip6_datagram_connect);
205 207
206void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 208void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
207 __be16 port, u32 info, u8 *payload) 209 __be16 port, u32 info, u8 *payload)
@@ -414,6 +416,7 @@ out_free_skb:
414out: 416out:
415 return err; 417 return err;
416} 418}
419EXPORT_SYMBOL_GPL(ipv6_recv_error);
417 420
418/* 421/*
419 * Handle IPV6_RECVPATHMTU 422 * Handle IPV6_RECVPATHMTU
@@ -485,7 +488,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
485 } 488 }
486 489
487 if (np->rxopt.bits.rxtclass) { 490 if (np->rxopt.bits.rxtclass) {
488 int tclass = (ntohl(*(__be32 *)ipv6_hdr(skb)) >> 20) & 0xff; 491 int tclass = ipv6_tclass(ipv6_hdr(skb));
489 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); 492 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
490 } 493 }
491 494
@@ -515,10 +518,10 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
515 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 518 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
516 519
517 while (off <= opt->lastopt) { 520 while (off <= opt->lastopt) {
518 unsigned len; 521 unsigned int len;
519 u8 *ptr = nh + off; 522 u8 *ptr = nh + off;
520 523
521 switch(nexthdr) { 524 switch (nexthdr) {
522 case IPPROTO_DSTOPTS: 525 case IPPROTO_DSTOPTS:
523 nexthdr = ptr[0]; 526 nexthdr = ptr[0];
524 len = (ptr[1] + 1) << 3; 527 len = (ptr[1] + 1) << 3;
@@ -827,9 +830,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
827 int tc; 830 int tc;
828 831
829 err = -EINVAL; 832 err = -EINVAL;
830 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { 833 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
831 goto exit_f; 834 goto exit_f;
832 }
833 835
834 tc = *(int *)CMSG_DATA(cmsg); 836 tc = *(int *)CMSG_DATA(cmsg);
835 if (tc < -1 || tc > 0xff) 837 if (tc < -1 || tc > 0xff)
@@ -846,9 +848,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
846 int df; 848 int df;
847 849
848 err = -EINVAL; 850 err = -EINVAL;
849 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { 851 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
850 goto exit_f; 852 goto exit_f;
851 }
852 853
853 df = *(int *)CMSG_DATA(cmsg); 854 df = *(int *)CMSG_DATA(cmsg);
854 if (df < 0 || df > 1) 855 if (df < 0 || df > 1)
@@ -870,3 +871,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
870exit_f: 871exit_f:
871 return err; 872 return err;
872} 873}
874EXPORT_SYMBOL_GPL(datagram_send_ctl);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1ac7938dd9ec..db1521fcda5b 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -24,6 +24,8 @@
24 * This file is derived from net/ipv4/esp.c 24 * This file is derived from net/ipv4/esp.c
25 */ 25 */
26 26
27#define pr_fmt(fmt) "IPv6: " fmt
28
27#include <crypto/aead.h> 29#include <crypto/aead.h>
28#include <crypto/authenc.h> 30#include <crypto/authenc.h>
29#include <linux/err.h> 31#include <linux/err.h>
@@ -411,19 +413,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
411 struct esp_data *esp = x->data; 413 struct esp_data *esp = x->data;
412 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); 414 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
413 u32 align = max_t(u32, blksize, esp->padlen); 415 u32 align = max_t(u32, blksize, esp->padlen);
414 u32 rem; 416 unsigned int net_adj;
415
416 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
417 rem = mtu & (align - 1);
418 mtu &= ~(align - 1);
419 417
420 if (x->props.mode != XFRM_MODE_TUNNEL) { 418 if (x->props.mode != XFRM_MODE_TUNNEL)
421 u32 padsize = ((blksize - 1) & 7) + 1; 419 net_adj = sizeof(struct ipv6hdr);
422 mtu -= blksize - padsize; 420 else
423 mtu += min_t(u32, blksize - padsize, rem); 421 net_adj = 0;
424 }
425 422
426 return mtu - 2; 423 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
424 net_adj) & ~(align - 1)) + (net_adj - 2);
427} 425}
428 426
429static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 427static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -442,8 +440,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
442 esph->spi, IPPROTO_ESP, AF_INET6); 440 esph->spi, IPPROTO_ESP, AF_INET6);
443 if (!x) 441 if (!x)
444 return; 442 return;
445 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", 443 pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n",
446 ntohl(esph->spi), &iph->daddr); 444 ntohl(esph->spi), &iph->daddr);
447 xfrm_state_put(x); 445 xfrm_state_put(x);
448} 446}
449 447
@@ -651,11 +649,11 @@ static const struct inet6_protocol esp6_protocol = {
651static int __init esp6_init(void) 649static int __init esp6_init(void)
652{ 650{
653 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { 651 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
654 printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n"); 652 pr_info("%s: can't add xfrm type\n", __func__);
655 return -EAGAIN; 653 return -EAGAIN;
656 } 654 }
657 if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) { 655 if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
658 printk(KERN_INFO "ipv6 esp init: can't add protocol\n"); 656 pr_info("%s: can't add protocol\n", __func__);
659 xfrm_unregister_type(&esp6_type, AF_INET6); 657 xfrm_unregister_type(&esp6_type, AF_INET6);
660 return -EAGAIN; 658 return -EAGAIN;
661 } 659 }
@@ -666,9 +664,9 @@ static int __init esp6_init(void)
666static void __exit esp6_fini(void) 664static void __exit esp6_fini(void)
667{ 665{
668 if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0) 666 if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
669 printk(KERN_INFO "ipv6 esp close: can't remove protocol\n"); 667 pr_info("%s: can't remove protocol\n", __func__);
670 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0) 668 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
671 printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n"); 669 pr_info("%s: can't remove xfrm type\n", __func__);
672} 670}
673 671
674module_init(esp6_init); 672module_init(esp6_init);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 3d641b6e9b09..6447dc49429f 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -75,7 +75,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
75 return offset; 75 return offset;
76 76
77 switch (opttype) { 77 switch (opttype) {
78 case IPV6_TLV_PAD0: 78 case IPV6_TLV_PAD1:
79 optlen = 1; 79 optlen = 1;
80 break; 80 break;
81 default: 81 default:
@@ -96,14 +96,14 @@ EXPORT_SYMBOL_GPL(ipv6_find_tlv);
96/* 96/*
97 * Parsing tlv encoded headers. 97 * Parsing tlv encoded headers.
98 * 98 *
99 * Parsing function "func" returns 1, if parsing succeed 99 * Parsing function "func" returns true, if parsing succeed
100 * and 0, if it failed. 100 * and false, if it failed.
101 * It MUST NOT touch skb->h. 101 * It MUST NOT touch skb->h.
102 */ 102 */
103 103
104struct tlvtype_proc { 104struct tlvtype_proc {
105 int type; 105 int type;
106 int (*func)(struct sk_buff *skb, int offset); 106 bool (*func)(struct sk_buff *skb, int offset);
107}; 107};
108 108
109/********************* 109/*********************
@@ -112,11 +112,11 @@ struct tlvtype_proc {
112 112
113/* An unknown option is detected, decide what to do */ 113/* An unknown option is detected, decide what to do */
114 114
115static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) 115static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
116{ 116{
117 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { 117 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
118 case 0: /* ignore */ 118 case 0: /* ignore */
119 return 1; 119 return true;
120 120
121 case 1: /* drop packet */ 121 case 1: /* drop packet */
122 break; 122 break;
@@ -129,21 +129,22 @@ static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
129 break; 129 break;
130 case 2: /* send ICMP PARM PROB regardless and drop packet */ 130 case 2: /* send ICMP PARM PROB regardless and drop packet */
131 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); 131 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
132 return 0; 132 return false;
133 } 133 }
134 134
135 kfree_skb(skb); 135 kfree_skb(skb);
136 return 0; 136 return false;
137} 137}
138 138
139/* Parse tlv encoded option header (hop-by-hop or destination) */ 139/* Parse tlv encoded option header (hop-by-hop or destination) */
140 140
141static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) 141static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
142{ 142{
143 struct tlvtype_proc *curr; 143 const struct tlvtype_proc *curr;
144 const unsigned char *nh = skb_network_header(skb); 144 const unsigned char *nh = skb_network_header(skb);
145 int off = skb_network_header_len(skb); 145 int off = skb_network_header_len(skb);
146 int len = (skb_transport_header(skb)[1] + 1) << 3; 146 int len = (skb_transport_header(skb)[1] + 1) << 3;
147 int padlen = 0;
147 148
148 if (skb_transport_offset(skb) + len > skb_headlen(skb)) 149 if (skb_transport_offset(skb) + len > skb_headlen(skb))
149 goto bad; 150 goto bad;
@@ -153,13 +154,33 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
153 154
154 while (len > 0) { 155 while (len > 0) {
155 int optlen = nh[off + 1] + 2; 156 int optlen = nh[off + 1] + 2;
157 int i;
156 158
157 switch (nh[off]) { 159 switch (nh[off]) {
158 case IPV6_TLV_PAD0: 160 case IPV6_TLV_PAD1:
159 optlen = 1; 161 optlen = 1;
162 padlen++;
163 if (padlen > 7)
164 goto bad;
160 break; 165 break;
161 166
162 case IPV6_TLV_PADN: 167 case IPV6_TLV_PADN:
168 /* RFC 2460 states that the purpose of PadN is
169 * to align the containing header to multiples
170 * of 8. 7 is therefore the highest valid value.
171 * See also RFC 4942, Section 2.1.9.5.
172 */
173 padlen += optlen;
174 if (padlen > 7)
175 goto bad;
176 /* RFC 4942 recommends receiving hosts to
177 * actively check PadN payload to contain
178 * only zeroes.
179 */
180 for (i = 2; i < optlen; i++) {
181 if (nh[off + i] != 0)
182 goto bad;
183 }
163 break; 184 break;
164 185
165 default: /* Other TLV code so scan list */ 186 default: /* Other TLV code so scan list */
@@ -170,25 +191,33 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
170 /* type specific length/alignment 191 /* type specific length/alignment
171 checks will be performed in the 192 checks will be performed in the
172 func(). */ 193 func(). */
173 if (curr->func(skb, off) == 0) 194 if (curr->func(skb, off) == false)
174 return 0; 195 return false;
175 break; 196 break;
176 } 197 }
177 } 198 }
178 if (curr->type < 0) { 199 if (curr->type < 0) {
179 if (ip6_tlvopt_unknown(skb, off) == 0) 200 if (ip6_tlvopt_unknown(skb, off) == 0)
180 return 0; 201 return false;
181 } 202 }
203 padlen = 0;
182 break; 204 break;
183 } 205 }
184 off += optlen; 206 off += optlen;
185 len -= optlen; 207 len -= optlen;
186 } 208 }
209 /* This case will not be caught by above check since its padding
210 * length is smaller than 7:
211 * 1 byte NH + 1 byte Length + 6 bytes Padding
212 */
213 if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8))
214 goto bad;
215
187 if (len == 0) 216 if (len == 0)
188 return 1; 217 return true;
189bad: 218bad:
190 kfree_skb(skb); 219 kfree_skb(skb);
191 return 0; 220 return false;
192} 221}
193 222
194/***************************** 223/*****************************
@@ -196,7 +225,7 @@ bad:
196 *****************************/ 225 *****************************/
197 226
198#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 227#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
199static int ipv6_dest_hao(struct sk_buff *skb, int optoff) 228static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
200{ 229{
201 struct ipv6_destopt_hao *hao; 230 struct ipv6_destopt_hao *hao;
202 struct inet6_skb_parm *opt = IP6CB(skb); 231 struct inet6_skb_parm *opt = IP6CB(skb);
@@ -250,15 +279,15 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
250 if (skb->tstamp.tv64 == 0) 279 if (skb->tstamp.tv64 == 0)
251 __net_timestamp(skb); 280 __net_timestamp(skb);
252 281
253 return 1; 282 return true;
254 283
255 discard: 284 discard:
256 kfree_skb(skb); 285 kfree_skb(skb);
257 return 0; 286 return false;
258} 287}
259#endif 288#endif
260 289
261static struct tlvtype_proc tlvprocdestopt_lst[] = { 290static const struct tlvtype_proc tlvprocdestopt_lst[] = {
262#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 291#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
263 { 292 {
264 .type = IPV6_TLV_HAO, 293 .type = IPV6_TLV_HAO,
@@ -563,23 +592,23 @@ static inline struct net *ipv6_skb_net(struct sk_buff *skb)
563 592
564/* Router Alert as of RFC 2711 */ 593/* Router Alert as of RFC 2711 */
565 594
566static int ipv6_hop_ra(struct sk_buff *skb, int optoff) 595static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
567{ 596{
568 const unsigned char *nh = skb_network_header(skb); 597 const unsigned char *nh = skb_network_header(skb);
569 598
570 if (nh[optoff + 1] == 2) { 599 if (nh[optoff + 1] == 2) {
571 IP6CB(skb)->ra = optoff; 600 IP6CB(skb)->ra = optoff;
572 return 1; 601 return true;
573 } 602 }
574 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", 603 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
575 nh[optoff + 1]); 604 nh[optoff + 1]);
576 kfree_skb(skb); 605 kfree_skb(skb);
577 return 0; 606 return false;
578} 607}
579 608
580/* Jumbo payload */ 609/* Jumbo payload */
581 610
582static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) 611static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
583{ 612{
584 const unsigned char *nh = skb_network_header(skb); 613 const unsigned char *nh = skb_network_header(skb);
585 struct net *net = ipv6_skb_net(skb); 614 struct net *net = ipv6_skb_net(skb);
@@ -598,13 +627,13 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
598 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), 627 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
599 IPSTATS_MIB_INHDRERRORS); 628 IPSTATS_MIB_INHDRERRORS);
600 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); 629 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
601 return 0; 630 return false;
602 } 631 }
603 if (ipv6_hdr(skb)->payload_len) { 632 if (ipv6_hdr(skb)->payload_len) {
604 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), 633 IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
605 IPSTATS_MIB_INHDRERRORS); 634 IPSTATS_MIB_INHDRERRORS);
606 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); 635 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
607 return 0; 636 return false;
608 } 637 }
609 638
610 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { 639 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
@@ -616,14 +645,14 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
616 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) 645 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
617 goto drop; 646 goto drop;
618 647
619 return 1; 648 return true;
620 649
621drop: 650drop:
622 kfree_skb(skb); 651 kfree_skb(skb);
623 return 0; 652 return false;
624} 653}
625 654
626static struct tlvtype_proc tlvprochopopt_lst[] = { 655static const struct tlvtype_proc tlvprochopopt_lst[] = {
627 { 656 {
628 .type = IPV6_TLV_ROUTERALERT, 657 .type = IPV6_TLV_ROUTERALERT,
629 .func = ipv6_hop_ra, 658 .func = ipv6_hop_ra,
@@ -722,7 +751,6 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
722 if (opt->hopopt) 751 if (opt->hopopt)
723 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); 752 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
724} 753}
725
726EXPORT_SYMBOL(ipv6_push_nfrag_opts); 754EXPORT_SYMBOL(ipv6_push_nfrag_opts);
727 755
728void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) 756void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
@@ -738,20 +766,19 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
738 766
739 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); 767 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
740 if (opt2) { 768 if (opt2) {
741 long dif = (char*)opt2 - (char*)opt; 769 long dif = (char *)opt2 - (char *)opt;
742 memcpy(opt2, opt, opt->tot_len); 770 memcpy(opt2, opt, opt->tot_len);
743 if (opt2->hopopt) 771 if (opt2->hopopt)
744 *((char**)&opt2->hopopt) += dif; 772 *((char **)&opt2->hopopt) += dif;
745 if (opt2->dst0opt) 773 if (opt2->dst0opt)
746 *((char**)&opt2->dst0opt) += dif; 774 *((char **)&opt2->dst0opt) += dif;
747 if (opt2->dst1opt) 775 if (opt2->dst1opt)
748 *((char**)&opt2->dst1opt) += dif; 776 *((char **)&opt2->dst1opt) += dif;
749 if (opt2->srcrt) 777 if (opt2->srcrt)
750 *((char**)&opt2->srcrt) += dif; 778 *((char **)&opt2->srcrt) += dif;
751 } 779 }
752 return opt2; 780 return opt2;
753} 781}
754
755EXPORT_SYMBOL_GPL(ipv6_dup_options); 782EXPORT_SYMBOL_GPL(ipv6_dup_options);
756 783
757static int ipv6_renew_option(void *ohdr, 784static int ipv6_renew_option(void *ohdr,
@@ -869,6 +896,7 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
869 896
870 return opt; 897 return opt;
871} 898}
899EXPORT_SYMBOL_GPL(ipv6_fixup_options);
872 900
873/** 901/**
874 * fl6_update_dst - update flowi destination address with info given 902 * fl6_update_dst - update flowi destination address with info given
@@ -892,5 +920,4 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
892 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; 920 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
893 return orig; 921 return orig;
894} 922}
895
896EXPORT_SYMBOL_GPL(fl6_update_dst); 923EXPORT_SYMBOL_GPL(fl6_update_dst);
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 72957f4a7c6c..f73d59a14131 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -9,7 +9,7 @@
9 * find out if nexthdr is a well-known extension header or a protocol 9 * find out if nexthdr is a well-known extension header or a protocol
10 */ 10 */
11 11
12int ipv6_ext_hdr(u8 nexthdr) 12bool ipv6_ext_hdr(u8 nexthdr)
13{ 13{
14 /* 14 /*
15 * find out if nexthdr is an extension header or a protocol 15 * find out if nexthdr is an extension header or a protocol
@@ -21,6 +21,7 @@ int ipv6_ext_hdr(u8 nexthdr)
21 (nexthdr == NEXTHDR_NONE) || 21 (nexthdr == NEXTHDR_NONE) ||
22 (nexthdr == NEXTHDR_DEST); 22 (nexthdr == NEXTHDR_DEST);
23} 23}
24EXPORT_SYMBOL(ipv6_ext_hdr);
24 25
25/* 26/*
26 * Skip any extension headers. This is used by the ICMP module. 27 * Skip any extension headers. This is used by the ICMP module.
@@ -109,6 +110,4 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
109 *nexthdrp = nexthdr; 110 *nexthdrp = nexthdr;
110 return start; 111 return start;
111} 112}
112
113EXPORT_SYMBOL(ipv6_ext_hdr);
114EXPORT_SYMBOL(ipv6_skip_exthdr); 113EXPORT_SYMBOL(ipv6_skip_exthdr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b6c573152067..0ff1cfd55bc4 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -22,8 +22,7 @@
22#include <net/ip6_route.h> 22#include <net/ip6_route.h>
23#include <net/netlink.h> 23#include <net/netlink.h>
24 24
25struct fib6_rule 25struct fib6_rule {
26{
27 struct fib_rule common; 26 struct fib_rule common;
28 struct rt6key src; 27 struct rt6key src;
29 struct rt6key dst; 28 struct rt6key dst;
@@ -215,14 +214,13 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
215 frh->src_len = rule6->src.plen; 214 frh->src_len = rule6->src.plen;
216 frh->tos = rule6->tclass; 215 frh->tos = rule6->tclass;
217 216
218 if (rule6->dst.plen) 217 if ((rule6->dst.plen &&
219 NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr), 218 nla_put(skb, FRA_DST, sizeof(struct in6_addr),
220 &rule6->dst.addr); 219 &rule6->dst.addr)) ||
221 220 (rule6->src.plen &&
222 if (rule6->src.plen) 221 nla_put(skb, FRA_SRC, sizeof(struct in6_addr),
223 NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr), 222 &rule6->src.addr)))
224 &rule6->src.addr); 223 goto nla_put_failure;
225
226 return 0; 224 return 0;
227 225
228nla_put_failure: 226nla_put_failure:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 01d46bff63c3..091a2971c7b7 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -29,6 +29,8 @@
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data 29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
30 */ 30 */
31 31
32#define pr_fmt(fmt) "IPv6: " fmt
33
32#include <linux/module.h> 34#include <linux/module.h>
33#include <linux/errno.h> 35#include <linux/errno.h>
34#include <linux/types.h> 36#include <linux/types.h>
@@ -66,7 +68,6 @@
66#include <net/inet_common.h> 68#include <net/inet_common.h>
67 69
68#include <asm/uaccess.h> 70#include <asm/uaccess.h>
69#include <asm/system.h>
70 71
71/* 72/*
72 * The ICMP socket(s). This is the most convenient way to flow control 73 * The ICMP socket(s). This is the most convenient way to flow control
@@ -130,7 +131,7 @@ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
130 * --ANK (980726) 131 * --ANK (980726)
131 */ 132 */
132 133
133static int is_ineligible(struct sk_buff *skb) 134static bool is_ineligible(const struct sk_buff *skb)
134{ 135{
135 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; 136 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
136 int len = skb->len - ptr; 137 int len = skb->len - ptr;
@@ -138,11 +139,11 @@ static int is_ineligible(struct sk_buff *skb)
138 __be16 frag_off; 139 __be16 frag_off;
139 140
140 if (len < 0) 141 if (len < 0)
141 return 1; 142 return true;
142 143
143 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off); 144 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
144 if (ptr < 0) 145 if (ptr < 0)
145 return 0; 146 return false;
146 if (nexthdr == IPPROTO_ICMPV6) { 147 if (nexthdr == IPPROTO_ICMPV6) {
147 u8 _type, *tp; 148 u8 _type, *tp;
148 tp = skb_header_pointer(skb, 149 tp = skb_header_pointer(skb,
@@ -150,9 +151,9 @@ static int is_ineligible(struct sk_buff *skb)
150 sizeof(_type), &_type); 151 sizeof(_type), &_type);
151 if (tp == NULL || 152 if (tp == NULL ||
152 !(*tp & ICMPV6_INFOMSG_MASK)) 153 !(*tp & ICMPV6_INFOMSG_MASK))
153 return 1; 154 return true;
154 } 155 }
155 return 0; 156 return false;
156} 157}
157 158
158/* 159/*
@@ -207,14 +208,14 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
207 * highest-order two bits set to 10 208 * highest-order two bits set to 10
208 */ 209 */
209 210
210static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset) 211static bool opt_unrec(struct sk_buff *skb, __u32 offset)
211{ 212{
212 u8 _optval, *op; 213 u8 _optval, *op;
213 214
214 offset += skb_network_offset(skb); 215 offset += skb_network_offset(skb);
215 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); 216 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
216 if (op == NULL) 217 if (op == NULL)
217 return 1; 218 return true;
218 return (*op & 0xC0) == 0x80; 219 return (*op & 0xC0) == 0x80;
219} 220}
220 221
@@ -468,6 +469,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
468 469
469 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 470 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
470 fl6.flowi6_oif = np->mcast_oif; 471 fl6.flowi6_oif = np->mcast_oif;
472 else if (!fl6.flowi6_oif)
473 fl6.flowi6_oif = np->ucast_oif;
471 474
472 dst = icmpv6_route_lookup(net, skb, sk, &fl6); 475 dst = icmpv6_route_lookup(net, skb, sk, &fl6);
473 if (IS_ERR(dst)) 476 if (IS_ERR(dst))
@@ -497,7 +500,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
497 err = ip6_append_data(sk, icmpv6_getfrag, &msg, 500 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
498 len + sizeof(struct icmp6hdr), 501 len + sizeof(struct icmp6hdr),
499 sizeof(struct icmp6hdr), hlimit, 502 sizeof(struct icmp6hdr), hlimit,
500 np->tclass, NULL, &fl6, (struct rt6_info*)dst, 503 np->tclass, NULL, &fl6, (struct rt6_info *)dst,
501 MSG_DONTWAIT, np->dontfrag); 504 MSG_DONTWAIT, np->dontfrag);
502 if (err) { 505 if (err) {
503 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 506 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
@@ -553,6 +556,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
553 556
554 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 557 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
555 fl6.flowi6_oif = np->mcast_oif; 558 fl6.flowi6_oif = np->mcast_oif;
559 else if (!fl6.flowi6_oif)
560 fl6.flowi6_oif = np->ucast_oif;
556 561
557 err = ip6_dst_lookup(sk, &dst, &fl6); 562 err = ip6_dst_lookup(sk, &dst, &fl6);
558 if (err) 563 if (err)
@@ -576,7 +581,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
576 581
577 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), 582 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
578 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, 583 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
579 (struct rt6_info*)dst, MSG_DONTWAIT, 584 (struct rt6_info *)dst, MSG_DONTWAIT,
580 np->dontfrag); 585 np->dontfrag);
581 586
582 if (err) { 587 if (err) {
@@ -817,9 +822,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
817 err = inet_ctl_sock_create(&sk, PF_INET6, 822 err = inet_ctl_sock_create(&sk, PF_INET6,
818 SOCK_RAW, IPPROTO_ICMPV6, net); 823 SOCK_RAW, IPPROTO_ICMPV6, net);
819 if (err < 0) { 824 if (err < 0) {
820 printk(KERN_ERR 825 pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
821 "Failed to initialize the ICMP6 control socket "
822 "(err %d).\n",
823 err); 826 err);
824 goto fail; 827 goto fail;
825 } 828 }
@@ -878,7 +881,7 @@ int __init icmpv6_init(void)
878 return 0; 881 return 0;
879 882
880fail: 883fail:
881 printk(KERN_ERR "Failed to register ICMP6 protocol\n"); 884 pr_err("Failed to register ICMP6 protocol\n");
882 unregister_pernet_subsys(&icmpv6_sk_ops); 885 unregister_pernet_subsys(&icmpv6_sk_ops);
883 return err; 886 return err;
884} 887}
@@ -947,7 +950,6 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
947 950
948 return fatal; 951 return fatal;
949} 952}
950
951EXPORT_SYMBOL(icmpv6_err_convert); 953EXPORT_SYMBOL(icmpv6_err_convert);
952 954
953#ifdef CONFIG_SYSCTL 955#ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 02dd203d9eac..e6cee5292a0b 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -28,7 +28,7 @@
28#include <net/inet6_connection_sock.h> 28#include <net/inet6_connection_sock.h>
29 29
30int inet6_csk_bind_conflict(const struct sock *sk, 30int inet6_csk_bind_conflict(const struct sock *sk,
31 const struct inet_bind_bucket *tb) 31 const struct inet_bind_bucket *tb, bool relax)
32{ 32{
33 const struct sock *sk2; 33 const struct sock *sk2;
34 const struct hlist_node *node; 34 const struct hlist_node *node;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index b82bcde53f7a..0c220a416626 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -18,6 +18,9 @@
18 * routing table. 18 * routing table.
19 * Ville Nuorvala: Fixed routing subtrees. 19 * Ville Nuorvala: Fixed routing subtrees.
20 */ 20 */
21
22#define pr_fmt(fmt) "IPv6: " fmt
23
21#include <linux/errno.h> 24#include <linux/errno.h>
22#include <linux/types.h> 25#include <linux/types.h>
23#include <linux/net.h> 26#include <linux/net.h>
@@ -38,7 +41,7 @@
38#define RT6_DEBUG 2 41#define RT6_DEBUG 2
39 42
40#if RT6_DEBUG >= 3 43#if RT6_DEBUG >= 3
41#define RT6_TRACE(x...) printk(KERN_DEBUG x) 44#define RT6_TRACE(x...) pr_debug(x)
42#else 45#else
43#define RT6_TRACE(x...) do { ; } while (0) 46#define RT6_TRACE(x...) do { ; } while (0)
44#endif 47#endif
@@ -451,12 +454,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
451 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) { 454 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
452 if (!allow_create) { 455 if (!allow_create) {
453 if (replace_required) { 456 if (replace_required) {
454 pr_warn("IPv6: Can't replace route, " 457 pr_warn("Can't replace route, no match found\n");
455 "no match found\n");
456 return ERR_PTR(-ENOENT); 458 return ERR_PTR(-ENOENT);
457 } 459 }
458 pr_warn("IPv6: NLM_F_CREATE should be set " 460 pr_warn("NLM_F_CREATE should be set when creating new route\n");
459 "when creating new route\n");
460 } 461 }
461 goto insert_above; 462 goto insert_above;
462 } 463 }
@@ -499,11 +500,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
499 * That would keep IPv6 consistent with IPv4 500 * That would keep IPv6 consistent with IPv4
500 */ 501 */
501 if (replace_required) { 502 if (replace_required) {
502 pr_warn("IPv6: Can't replace route, no match found\n"); 503 pr_warn("Can't replace route, no match found\n");
503 return ERR_PTR(-ENOENT); 504 return ERR_PTR(-ENOENT);
504 } 505 }
505 pr_warn("IPv6: NLM_F_CREATE should be set " 506 pr_warn("NLM_F_CREATE should be set when creating new route\n");
506 "when creating new route\n");
507 } 507 }
508 /* 508 /*
509 * We walked to the bottom of tree. 509 * We walked to the bottom of tree.
@@ -673,11 +673,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
673 &rt->rt6i_gateway)) { 673 &rt->rt6i_gateway)) {
674 if (!(iter->rt6i_flags & RTF_EXPIRES)) 674 if (!(iter->rt6i_flags & RTF_EXPIRES))
675 return -EEXIST; 675 return -EEXIST;
676 iter->dst.expires = rt->dst.expires; 676 if (!(rt->rt6i_flags & RTF_EXPIRES))
677 if (!(rt->rt6i_flags & RTF_EXPIRES)) { 677 rt6_clean_expires(iter);
678 iter->rt6i_flags &= ~RTF_EXPIRES; 678 else
679 iter->dst.expires = 0; 679 rt6_set_expires(iter, rt->dst.expires);
680 }
681 return -EEXIST; 680 return -EEXIST;
682 } 681 }
683 } 682 }
@@ -697,7 +696,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
697 */ 696 */
698 if (!replace) { 697 if (!replace) {
699 if (!add) 698 if (!add)
700 pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n"); 699 pr_warn("NLM_F_CREATE should be set when creating new route\n");
701 700
702add: 701add:
703 rt->dst.rt6_next = iter; 702 rt->dst.rt6_next = iter;
@@ -716,7 +715,7 @@ add:
716 if (!found) { 715 if (!found) {
717 if (add) 716 if (add)
718 goto add; 717 goto add;
719 pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n"); 718 pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
720 return -ENOENT; 719 return -ENOENT;
721 } 720 }
722 *ins = rt; 721 *ins = rt;
@@ -769,7 +768,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
769 replace_required = 1; 768 replace_required = 1;
770 } 769 }
771 if (!allow_create && !replace_required) 770 if (!allow_create && !replace_required)
772 pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); 771 pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
773 772
774 fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), 773 fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
775 rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), 774 rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
@@ -1421,7 +1420,8 @@ static int fib6_clean_node(struct fib6_walker_t *w)
1421 res = fib6_del(rt, &info); 1420 res = fib6_del(rt, &info);
1422 if (res) { 1421 if (res) {
1423#if RT6_DEBUG >= 2 1422#if RT6_DEBUG >= 2
1424 printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res); 1423 pr_debug("%s: del failed: rt=%p@%p err=%d\n",
1424 __func__, rt, rt->rt6i_node, res);
1425#endif 1425#endif
1426 continue; 1426 continue;
1427 } 1427 }
@@ -1552,11 +1552,20 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1552 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) { 1552 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
1553 RT6_TRACE("aging clone %p\n", rt); 1553 RT6_TRACE("aging clone %p\n", rt);
1554 return -1; 1554 return -1;
1555 } else if ((rt->rt6i_flags & RTF_GATEWAY) && 1555 } else if (rt->rt6i_flags & RTF_GATEWAY) {
1556 (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) { 1556 struct neighbour *neigh;
1557 RT6_TRACE("purging route %p via non-router but gateway\n", 1557 __u8 neigh_flags = 0;
1558 rt); 1558
1559 return -1; 1559 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
1560 if (neigh) {
1561 neigh_flags = neigh->flags;
1562 neigh_release(neigh);
1563 }
1564 if (neigh_flags & NTF_ROUTER) {
1565 RT6_TRACE("purging route %p via non-router but gateway\n",
1566 rt);
1567 return -1;
1568 }
1560 } 1569 }
1561 gc_args.more++; 1570 gc_args.more++;
1562 } 1571 }
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b7867a1215b1..9772fbd8a3f5 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -294,6 +294,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
294 opt_space->opt_flen = fopt->opt_flen; 294 opt_space->opt_flen = fopt->opt_flen;
295 return opt_space; 295 return opt_space;
296} 296}
297EXPORT_SYMBOL_GPL(fl6_merge_options);
297 298
298static unsigned long check_linger(unsigned long ttl) 299static unsigned long check_linger(unsigned long ttl)
299{ 300{
@@ -432,32 +433,32 @@ static int mem_check(struct sock *sk)
432 return 0; 433 return 0;
433} 434}
434 435
435static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2) 436static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
436{ 437{
437 if (h1 == h2) 438 if (h1 == h2)
438 return 0; 439 return false;
439 if (h1 == NULL || h2 == NULL) 440 if (h1 == NULL || h2 == NULL)
440 return 1; 441 return true;
441 if (h1->hdrlen != h2->hdrlen) 442 if (h1->hdrlen != h2->hdrlen)
442 return 1; 443 return true;
443 return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1)); 444 return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
444} 445}
445 446
446static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2) 447static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
447{ 448{
448 if (o1 == o2) 449 if (o1 == o2)
449 return 0; 450 return false;
450 if (o1 == NULL || o2 == NULL) 451 if (o1 == NULL || o2 == NULL)
451 return 1; 452 return true;
452 if (o1->opt_nflen != o2->opt_nflen) 453 if (o1->opt_nflen != o2->opt_nflen)
453 return 1; 454 return true;
454 if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt)) 455 if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
455 return 1; 456 return true;
456 if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt)) 457 if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
457 return 1; 458 return true;
458 if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt)) 459 if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
459 return 1; 460 return true;
460 return 0; 461 return false;
461} 462}
462 463
463static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, 464static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
@@ -705,9 +706,9 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v)
705 struct ip6_flowlabel *fl = v; 706 struct ip6_flowlabel *fl = v;
706 seq_printf(seq, 707 seq_printf(seq,
707 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", 708 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
708 (unsigned)ntohl(fl->label), 709 (unsigned int)ntohl(fl->label),
709 fl->share, 710 fl->share,
710 (unsigned)fl->owner, 711 (int)fl->owner,
711 atomic_read(&fl->users), 712 atomic_read(&fl->users),
712 fl->linger/HZ, 713 fl->linger/HZ,
713 (long)(fl->expires - jiffies)/HZ, 714 (long)(fl->expires - jiffies)/HZ,
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 1ca5d45a12e8..21a15dfe4a9e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -170,7 +170,8 @@ static int ip6_input_finish(struct sk_buff *skb)
170{ 170{
171 const struct inet6_protocol *ipprot; 171 const struct inet6_protocol *ipprot;
172 unsigned int nhoff; 172 unsigned int nhoff;
173 int nexthdr, raw; 173 int nexthdr;
174 bool raw;
174 u8 hash; 175 u8 hash;
175 struct inet6_dev *idev; 176 struct inet6_dev *idev;
176 struct net *net = dev_net(skb_dst(skb)->dev); 177 struct net *net = dev_net(skb_dst(skb)->dev);
@@ -251,7 +252,7 @@ int ip6_input(struct sk_buff *skb)
251int ip6_mc_input(struct sk_buff *skb) 252int ip6_mc_input(struct sk_buff *skb)
252{ 253{
253 const struct ipv6hdr *hdr; 254 const struct ipv6hdr *hdr;
254 int deliver; 255 bool deliver;
255 256
256 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), 257 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
257 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, 258 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
@@ -287,7 +288,7 @@ int ip6_mc_input(struct sk_buff *skb)
287 * is for MLD (0x0000). 288 * is for MLD (0x0000).
288 */ 289 */
289 if ((ptr[2] | ptr[3]) == 0) { 290 if ((ptr[2] | ptr[3]) == 0) {
290 deliver = 0; 291 deliver = false;
291 292
292 if (!ipv6_ext_hdr(nexthdr)) { 293 if (!ipv6_ext_hdr(nexthdr)) {
293 /* BUG */ 294 /* BUG */
@@ -312,7 +313,7 @@ int ip6_mc_input(struct sk_buff *skb)
312 case ICMPV6_MGM_REPORT: 313 case ICMPV6_MGM_REPORT:
313 case ICMPV6_MGM_REDUCTION: 314 case ICMPV6_MGM_REDUCTION:
314 case ICMPV6_MLD2_REPORT: 315 case ICMPV6_MLD2_REPORT:
315 deliver = 1; 316 deliver = true;
316 break; 317 break;
317 } 318 }
318 goto out; 319 goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d97e07183ce9..17b8c67998bb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -210,7 +210,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
210 kfree_skb(skb); 210 kfree_skb(skb);
211 return -ENOBUFS; 211 return -ENOBUFS;
212 } 212 }
213 kfree_skb(skb); 213 consume_skb(skb);
214 skb = skb2; 214 skb = skb2;
215 skb_set_owner_w(skb, sk); 215 skb_set_owner_w(skb, sk);
216 } 216 }
@@ -252,8 +252,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
252 dst->dev, dst_output); 252 dst->dev, dst_output);
253 } 253 }
254 254
255 if (net_ratelimit()) 255 net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n");
256 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
257 skb->dev = dst->dev; 256 skb->dev = dst->dev;
258 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 257 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
259 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); 258 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
@@ -388,7 +387,6 @@ int ip6_forward(struct sk_buff *skb)
388 struct ipv6hdr *hdr = ipv6_hdr(skb); 387 struct ipv6hdr *hdr = ipv6_hdr(skb);
389 struct inet6_skb_parm *opt = IP6CB(skb); 388 struct inet6_skb_parm *opt = IP6CB(skb);
390 struct net *net = dev_net(dst->dev); 389 struct net *net = dev_net(dst->dev);
391 struct neighbour *n;
392 u32 mtu; 390 u32 mtu;
393 391
394 if (net->ipv6.devconf_all->forwarding == 0) 392 if (net->ipv6.devconf_all->forwarding == 0)
@@ -463,8 +461,7 @@ int ip6_forward(struct sk_buff *skb)
463 send redirects to source routed frames. 461 send redirects to source routed frames.
464 We don't send redirects to frames decapsulated from IPsec. 462 We don't send redirects to frames decapsulated from IPsec.
465 */ 463 */
466 n = dst_get_neighbour_noref(dst); 464 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
467 if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
468 struct in6_addr *target = NULL; 465 struct in6_addr *target = NULL;
469 struct rt6_info *rt; 466 struct rt6_info *rt;
470 467
@@ -474,8 +471,8 @@ int ip6_forward(struct sk_buff *skb)
474 */ 471 */
475 472
476 rt = (struct rt6_info *) dst; 473 rt = (struct rt6_info *) dst;
477 if ((rt->rt6i_flags & RTF_GATEWAY)) 474 if (rt->rt6i_flags & RTF_GATEWAY)
478 target = (struct in6_addr*)&n->primary_key; 475 target = &rt->rt6i_gateway;
479 else 476 else
480 target = &hdr->daddr; 477 target = &hdr->daddr;
481 478
@@ -486,7 +483,7 @@ int ip6_forward(struct sk_buff *skb)
486 and by source (inside ndisc_send_redirect) 483 and by source (inside ndisc_send_redirect)
487 */ 484 */
488 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) 485 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
489 ndisc_send_redirect(skb, n, target); 486 ndisc_send_redirect(skb, target);
490 } else { 487 } else {
491 int addrtype = ipv6_addr_type(&hdr->saddr); 488 int addrtype = ipv6_addr_type(&hdr->saddr);
492 489
@@ -646,7 +643,10 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
646 /* We must not fragment if the socket is set to force MTU discovery 643 /* We must not fragment if the socket is set to force MTU discovery
647 * or if the skb it not generated by a local socket. 644 * or if the skb it not generated by a local socket.
648 */ 645 */
649 if (!skb->local_df && skb->len > mtu) { 646 if (unlikely(!skb->local_df && skb->len > mtu)) {
647 if (skb->sk && dst_allfrag(skb_dst(skb)))
648 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
649
650 skb->dev = skb_dst(skb)->dev; 650 skb->dev = skb_dst(skb)->dev;
651 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 651 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
652 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 652 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
@@ -791,6 +791,10 @@ slow_path_clean:
791 } 791 }
792 792
793slow_path: 793slow_path:
794 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
795 skb_checksum_help(skb))
796 goto fail;
797
794 left = skb->len - hlen; /* Space per frame */ 798 left = skb->len - hlen; /* Space per frame */
795 ptr = hlen; /* Where to start from */ 799 ptr = hlen; /* Where to start from */
796 800
@@ -891,7 +895,7 @@ slow_path:
891 } 895 }
892 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 896 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
893 IPSTATS_MIB_FRAGOKS); 897 IPSTATS_MIB_FRAGOKS);
894 kfree_skb(skb); 898 consume_skb(skb);
895 return err; 899 return err;
896 900
897fail: 901fail:
@@ -1183,6 +1187,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1183 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; 1187 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1184} 1188}
1185 1189
1190static void ip6_append_data_mtu(int *mtu,
1191 int *maxfraglen,
1192 unsigned int fragheaderlen,
1193 struct sk_buff *skb,
1194 struct rt6_info *rt)
1195{
1196 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1197 if (skb == NULL) {
1198 /* first fragment, reserve header_len */
1199 *mtu = *mtu - rt->dst.header_len;
1200
1201 } else {
1202 /*
1203 * this fragment is not first, the headers
1204 * space is regarded as data space.
1205 */
1206 *mtu = dst_mtu(rt->dst.path);
1207 }
1208 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1209 + fragheaderlen - sizeof(struct frag_hdr);
1210 }
1211}
1212
1186int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, 1213int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1187 int offset, int len, int odd, struct sk_buff *skb), 1214 int offset, int len, int odd, struct sk_buff *skb),
1188 void *from, int length, int transhdrlen, 1215 void *from, int length, int transhdrlen,
@@ -1192,7 +1219,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1192 struct inet_sock *inet = inet_sk(sk); 1219 struct inet_sock *inet = inet_sk(sk);
1193 struct ipv6_pinfo *np = inet6_sk(sk); 1220 struct ipv6_pinfo *np = inet6_sk(sk);
1194 struct inet_cork *cork; 1221 struct inet_cork *cork;
1195 struct sk_buff *skb; 1222 struct sk_buff *skb, *skb_prev = NULL;
1196 unsigned int maxfraglen, fragheaderlen; 1223 unsigned int maxfraglen, fragheaderlen;
1197 int exthdrlen; 1224 int exthdrlen;
1198 int dst_exthdrlen; 1225 int dst_exthdrlen;
@@ -1201,7 +1228,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1201 int copy; 1228 int copy;
1202 int err; 1229 int err;
1203 int offset = 0; 1230 int offset = 0;
1204 int csummode = CHECKSUM_NONE;
1205 __u8 tx_flags = 0; 1231 __u8 tx_flags = 0;
1206 1232
1207 if (flags&MSG_PROBE) 1233 if (flags&MSG_PROBE)
@@ -1250,8 +1276,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1250 inet->cork.fl.u.ip6 = *fl6; 1276 inet->cork.fl.u.ip6 = *fl6;
1251 np->cork.hop_limit = hlimit; 1277 np->cork.hop_limit = hlimit;
1252 np->cork.tclass = tclass; 1278 np->cork.tclass = tclass;
1253 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? 1279 if (rt->dst.flags & DST_XFRM_TUNNEL)
1254 rt->dst.dev->mtu : dst_mtu(&rt->dst); 1280 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1281 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1282 else
1283 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1284 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1255 if (np->frag_size < mtu) { 1285 if (np->frag_size < mtu) {
1256 if (np->frag_size) 1286 if (np->frag_size)
1257 mtu = np->frag_size; 1287 mtu = np->frag_size;
@@ -1347,25 +1377,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1347 unsigned int fraglen; 1377 unsigned int fraglen;
1348 unsigned int fraggap; 1378 unsigned int fraggap;
1349 unsigned int alloclen; 1379 unsigned int alloclen;
1350 struct sk_buff *skb_prev;
1351alloc_new_skb: 1380alloc_new_skb:
1352 skb_prev = skb;
1353
1354 /* There's no room in the current skb */ 1381 /* There's no room in the current skb */
1355 if (skb_prev) 1382 if (skb)
1356 fraggap = skb_prev->len - maxfraglen; 1383 fraggap = skb->len - maxfraglen;
1357 else 1384 else
1358 fraggap = 0; 1385 fraggap = 0;
1386 /* update mtu and maxfraglen if necessary */
1387 if (skb == NULL || skb_prev == NULL)
1388 ip6_append_data_mtu(&mtu, &maxfraglen,
1389 fragheaderlen, skb, rt);
1390
1391 skb_prev = skb;
1359 1392
1360 /* 1393 /*
1361 * If remaining data exceeds the mtu, 1394 * If remaining data exceeds the mtu,
1362 * we know we need more fragment(s). 1395 * we know we need more fragment(s).
1363 */ 1396 */
1364 datalen = length + fraggap; 1397 datalen = length + fraggap;
1365 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1366 datalen = maxfraglen - fragheaderlen;
1367 1398
1368 fraglen = datalen + fragheaderlen; 1399 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1400 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1369 if ((flags & MSG_MORE) && 1401 if ((flags & MSG_MORE) &&
1370 !(rt->dst.dev->features&NETIF_F_SG)) 1402 !(rt->dst.dev->features&NETIF_F_SG))
1371 alloclen = mtu; 1403 alloclen = mtu;
@@ -1374,13 +1406,16 @@ alloc_new_skb:
1374 1406
1375 alloclen += dst_exthdrlen; 1407 alloclen += dst_exthdrlen;
1376 1408
1377 /* 1409 if (datalen != length + fraggap) {
1378 * The last fragment gets additional space at tail. 1410 /*
1379 * Note: we overallocate on fragments with MSG_MODE 1411 * this is not the last fragment, the trailer
1380 * because we have no idea if we're the last one. 1412 * space is regarded as data space.
1381 */ 1413 */
1382 if (datalen == length + fraggap) 1414 datalen += rt->dst.trailer_len;
1383 alloclen += rt->dst.trailer_len; 1415 }
1416
1417 alloclen += rt->dst.trailer_len;
1418 fraglen = datalen + fragheaderlen;
1384 1419
1385 /* 1420 /*
1386 * We just reserve space for fragment header. 1421 * We just reserve space for fragment header.
@@ -1414,10 +1449,11 @@ alloc_new_skb:
1414 /* 1449 /*
1415 * Fill in the control structures 1450 * Fill in the control structures
1416 */ 1451 */
1417 skb->ip_summed = csummode; 1452 skb->ip_summed = CHECKSUM_NONE;
1418 skb->csum = 0; 1453 skb->csum = 0;
1419 /* reserve for fragmentation */ 1454 /* reserve for fragmentation and ipsec header */
1420 skb_reserve(skb, hh_len+sizeof(struct frag_hdr)); 1455 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1456 dst_exthdrlen);
1421 1457
1422 if (sk->sk_type == SOCK_DGRAM) 1458 if (sk->sk_type == SOCK_DGRAM)
1423 skb_shinfo(skb)->tx_flags = tx_flags; 1459 skb_shinfo(skb)->tx_flags = tx_flags;
@@ -1425,9 +1461,9 @@ alloc_new_skb:
1425 /* 1461 /*
1426 * Find where to start putting bytes 1462 * Find where to start putting bytes
1427 */ 1463 */
1428 data = skb_put(skb, fraglen + dst_exthdrlen); 1464 data = skb_put(skb, fraglen);
1429 skb_set_network_header(skb, exthdrlen + dst_exthdrlen); 1465 skb_set_network_header(skb, exthdrlen);
1430 data += fragheaderlen + dst_exthdrlen; 1466 data += fragheaderlen;
1431 skb->transport_header = (skb->network_header + 1467 skb->transport_header = (skb->network_header +
1432 fragheaderlen); 1468 fragheaderlen);
1433 if (fraggap) { 1469 if (fraggap) {
@@ -1456,7 +1492,6 @@ alloc_new_skb:
1456 transhdrlen = 0; 1492 transhdrlen = 0;
1457 exthdrlen = 0; 1493 exthdrlen = 0;
1458 dst_exthdrlen = 0; 1494 dst_exthdrlen = 0;
1459 csummode = CHECKSUM_NONE;
1460 1495
1461 /* 1496 /*
1462 * Put the packet on the pending queue 1497 * Put the packet on the pending queue
@@ -1536,6 +1571,7 @@ error:
1536 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 1571 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1537 return err; 1572 return err;
1538} 1573}
1574EXPORT_SYMBOL_GPL(ip6_append_data);
1539 1575
1540static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) 1576static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1541{ 1577{
@@ -1639,6 +1675,7 @@ error:
1639 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 1675 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1640 goto out; 1676 goto out;
1641} 1677}
1678EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1642 1679
1643void ip6_flush_pending_frames(struct sock *sk) 1680void ip6_flush_pending_frames(struct sock *sk)
1644{ 1681{
@@ -1653,3 +1690,4 @@ void ip6_flush_pending_frames(struct sock *sk)
1653 1690
1654 ip6_cork_release(inet_sk(sk), inet6_sk(sk)); 1691 ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1655} 1692}
1693EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index aa21da6a09cd..c9015fad8d65 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -18,6 +18,8 @@
18 * 18 *
19 */ 19 */
20 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
21#include <linux/module.h> 23#include <linux/module.h>
22#include <linux/capability.h> 24#include <linux/capability.h>
23#include <linux/errno.h> 25#include <linux/errno.h>
@@ -60,7 +62,7 @@ MODULE_LICENSE("GPL");
60MODULE_ALIAS_NETDEV("ip6tnl0"); 62MODULE_ALIAS_NETDEV("ip6tnl0");
61 63
62#ifdef IP6_TNL_DEBUG 64#ifdef IP6_TNL_DEBUG
63#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) 65#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
64#else 66#else
65#define IP6_TNL_TRACE(x...) do {;} while(0) 67#define IP6_TNL_TRACE(x...) do {;} while(0)
66#endif 68#endif
@@ -198,7 +200,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
198{ 200{
199 const struct in6_addr *remote = &p->raddr; 201 const struct in6_addr *remote = &p->raddr;
200 const struct in6_addr *local = &p->laddr; 202 const struct in6_addr *local = &p->laddr;
201 unsigned h = 0; 203 unsigned int h = 0;
202 int prio = 0; 204 int prio = 0;
203 205
204 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 206 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
@@ -460,19 +462,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
460 struct ipv6_tlv_tnl_enc_lim *tel; 462 struct ipv6_tlv_tnl_enc_lim *tel;
461 __u32 mtu; 463 __u32 mtu;
462 case ICMPV6_DEST_UNREACH: 464 case ICMPV6_DEST_UNREACH:
463 if (net_ratelimit()) 465 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
464 printk(KERN_WARNING 466 t->parms.name);
465 "%s: Path to destination invalid "
466 "or inactive!\n", t->parms.name);
467 rel_msg = 1; 467 rel_msg = 1;
468 break; 468 break;
469 case ICMPV6_TIME_EXCEED: 469 case ICMPV6_TIME_EXCEED:
470 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 470 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
471 if (net_ratelimit()) 471 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
472 printk(KERN_WARNING 472 t->parms.name);
473 "%s: Too small hop limit or "
474 "routing loop in tunnel!\n",
475 t->parms.name);
476 rel_msg = 1; 473 rel_msg = 1;
477 } 474 }
478 break; 475 break;
@@ -484,17 +481,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
484 if (teli && teli == *info - 2) { 481 if (teli && teli == *info - 2) {
485 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 482 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
486 if (tel->encap_limit == 0) { 483 if (tel->encap_limit == 0) {
487 if (net_ratelimit()) 484 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
488 printk(KERN_WARNING 485 t->parms.name);
489 "%s: Too small encapsulation "
490 "limit or routing loop in "
491 "tunnel!\n", t->parms.name);
492 rel_msg = 1; 486 rel_msg = 1;
493 } 487 }
494 } else if (net_ratelimit()) { 488 } else {
495 printk(KERN_WARNING 489 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
496 "%s: Recipient unable to parse tunneled " 490 t->parms.name);
497 "packet!\n ", t->parms.name);
498 } 491 }
499 break; 492 break;
500 case ICMPV6_PKT_TOOBIG: 493 case ICMPV6_PKT_TOOBIG:
@@ -825,7 +818,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
825 * 0 else 818 * 0 else
826 **/ 819 **/
827 820
828static inline int 821static inline bool
829ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 822ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
830{ 823{
831 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 824 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
@@ -845,15 +838,12 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
845 ldev = dev_get_by_index_rcu(net, p->link); 838 ldev = dev_get_by_index_rcu(net, p->link);
846 839
847 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) 840 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
848 printk(KERN_WARNING 841 pr_warn("%s xmit: Local address not yet configured!\n",
849 "%s xmit: Local address not yet configured!\n", 842 p->name);
850 p->name);
851 else if (!ipv6_addr_is_multicast(&p->raddr) && 843 else if (!ipv6_addr_is_multicast(&p->raddr) &&
852 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) 844 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
853 printk(KERN_WARNING 845 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
854 "%s xmit: Routing loop! " 846 p->name);
855 "Remote address found on this node!\n",
856 p->name);
857 else 847 else
858 ret = 1; 848 ret = 1;
859 rcu_read_unlock(); 849 rcu_read_unlock();
@@ -919,10 +909,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
919 909
920 if (tdev == dev) { 910 if (tdev == dev) {
921 stats->collisions++; 911 stats->collisions++;
922 if (net_ratelimit()) 912 net_warn_ratelimited("%s: Local routing loop detected!\n",
923 printk(KERN_WARNING 913 t->parms.name);
924 "%s: Local routing loop detected!\n",
925 t->parms.name);
926 goto tx_err_dst_release; 914 goto tx_err_dst_release;
927 } 915 }
928 mtu = dst_mtu(dst) - sizeof (*ipv6h); 916 mtu = dst_mtu(dst) - sizeof (*ipv6h);
@@ -954,7 +942,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
954 942
955 if (skb->sk) 943 if (skb->sk)
956 skb_set_owner_w(new_skb, skb->sk); 944 skb_set_owner_w(new_skb, skb->sk);
957 kfree_skb(skb); 945 consume_skb(skb);
958 skb = new_skb; 946 skb = new_skb;
959 } 947 }
960 skb_dst_drop(skb); 948 skb_dst_drop(skb);
@@ -1553,13 +1541,13 @@ static int __init ip6_tunnel_init(void)
1553 1541
1554 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 1542 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
1555 if (err < 0) { 1543 if (err < 0) {
1556 printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); 1544 pr_err("%s: can't register ip4ip6\n", __func__);
1557 goto out_ip4ip6; 1545 goto out_ip4ip6;
1558 } 1546 }
1559 1547
1560 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 1548 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
1561 if (err < 0) { 1549 if (err < 0) {
1562 printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); 1550 pr_err("%s: can't register ip6ip6\n", __func__);
1563 goto out_ip6ip6; 1551 goto out_ip6ip6;
1564 } 1552 }
1565 1553
@@ -1580,10 +1568,10 @@ out_pernet:
1580static void __exit ip6_tunnel_cleanup(void) 1568static void __exit ip6_tunnel_cleanup(void)
1581{ 1569{
1582 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 1570 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
1583 printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n"); 1571 pr_info("%s: can't deregister ip4ip6\n", __func__);
1584 1572
1585 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 1573 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
1586 printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n"); 1574 pr_info("%s: can't deregister ip6ip6\n", __func__);
1587 1575
1588 unregister_pernet_device(&ip6_tnl_net_ops); 1576 unregister_pernet_device(&ip6_tnl_net_ops);
1589} 1577}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 5aa3981a3922..b15dc08643a4 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -16,7 +16,6 @@
16 * 16 *
17 */ 17 */
18 18
19#include <asm/system.h>
20#include <asm/uaccess.h> 19#include <asm/uaccess.h>
21#include <linux/types.h> 20#include <linux/types.h>
22#include <linux/sched.h> 21#include <linux/sched.h>
@@ -1148,8 +1147,7 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1148 */ 1147 */
1149 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb); 1148 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1150 if (ret < 0) { 1149 if (ret < 0) {
1151 if (net_ratelimit()) 1150 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1152 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
1153 kfree_skb(skb); 1151 kfree_skb(skb);
1154 } 1152 }
1155 1153
@@ -1352,7 +1350,7 @@ int __init ip6_mr_init(void)
1352 goto reg_notif_fail; 1350 goto reg_notif_fail;
1353#ifdef CONFIG_IPV6_PIMSM_V2 1351#ifdef CONFIG_IPV6_PIMSM_V2
1354 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) { 1352 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1355 printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n"); 1353 pr_err("%s: can't add PIM protocol\n", __func__);
1356 err = -EAGAIN; 1354 err = -EAGAIN;
1357 goto add_proto_fail; 1355 goto add_proto_fail;
1358 } 1356 }
@@ -2216,14 +2214,15 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2216 rtm->rtm_src_len = 128; 2214 rtm->rtm_src_len = 128;
2217 rtm->rtm_tos = 0; 2215 rtm->rtm_tos = 0;
2218 rtm->rtm_table = mrt->id; 2216 rtm->rtm_table = mrt->id;
2219 NLA_PUT_U32(skb, RTA_TABLE, mrt->id); 2217 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2218 goto nla_put_failure;
2220 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2219 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2221 rtm->rtm_protocol = RTPROT_UNSPEC; 2220 rtm->rtm_protocol = RTPROT_UNSPEC;
2222 rtm->rtm_flags = 0; 2221 rtm->rtm_flags = 0;
2223 2222
2224 NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin); 2223 if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
2225 NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp); 2224 nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
2226 2225 goto nla_put_failure;
2227 if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) 2226 if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
2228 goto nla_put_failure; 2227 goto nla_put_failure;
2229 2228
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index bba658d9a03c..5cb75bfe45b1 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -30,6 +30,9 @@
30 * The decompression of IP datagram MUST be done after the reassembly, 30 * The decompression of IP datagram MUST be done after the reassembly,
31 * AH/ESP processing. 31 * AH/ESP processing.
32 */ 32 */
33
34#define pr_fmt(fmt) "IPv6: " fmt
35
33#include <linux/module.h> 36#include <linux/module.h>
34#include <net/ip.h> 37#include <net/ip.h>
35#include <net/xfrm.h> 38#include <net/xfrm.h>
@@ -69,8 +72,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
69 if (!x) 72 if (!x)
70 return; 73 return;
71 74
72 printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI6\n", 75 pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n",
73 spi, &iph->daddr); 76 spi, &iph->daddr);
74 xfrm_state_put(x); 77 xfrm_state_put(x);
75} 78}
76 79
@@ -190,11 +193,11 @@ static const struct inet6_protocol ipcomp6_protocol =
190static int __init ipcomp6_init(void) 193static int __init ipcomp6_init(void)
191{ 194{
192 if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) { 195 if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) {
193 printk(KERN_INFO "ipcomp6 init: can't add xfrm type\n"); 196 pr_info("%s: can't add xfrm type\n", __func__);
194 return -EAGAIN; 197 return -EAGAIN;
195 } 198 }
196 if (inet6_add_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) { 199 if (inet6_add_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) {
197 printk(KERN_INFO "ipcomp6 init: can't add protocol\n"); 200 pr_info("%s: can't add protocol\n", __func__);
198 xfrm_unregister_type(&ipcomp6_type, AF_INET6); 201 xfrm_unregister_type(&ipcomp6_type, AF_INET6);
199 return -EAGAIN; 202 return -EAGAIN;
200 } 203 }
@@ -204,9 +207,9 @@ static int __init ipcomp6_init(void)
204static void __exit ipcomp6_fini(void) 207static void __exit ipcomp6_fini(void)
205{ 208{
206 if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) 209 if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0)
207 printk(KERN_INFO "ipv6 ipcomp close: can't remove protocol\n"); 210 pr_info("%s: can't remove protocol\n", __func__);
208 if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0) 211 if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0)
209 printk(KERN_INFO "ipv6 ipcomp close: can't remove xfrm type\n"); 212 pr_info("%s: can't remove xfrm type\n", __func__);
210} 213}
211 214
212module_init(ipcomp6_init); 215module_init(ipcomp6_init);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 18a2719003c3..ba6d13d1f1e1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -516,6 +516,36 @@ done:
516 retv = 0; 516 retv = 0;
517 break; 517 break;
518 518
519 case IPV6_UNICAST_IF:
520 {
521 struct net_device *dev = NULL;
522 int ifindex;
523
524 if (optlen != sizeof(int))
525 goto e_inval;
526
527 ifindex = (__force int)ntohl((__force __be32)val);
528 if (ifindex == 0) {
529 np->ucast_oif = 0;
530 retv = 0;
531 break;
532 }
533
534 dev = dev_get_by_index(net, ifindex);
535 retv = -EADDRNOTAVAIL;
536 if (!dev)
537 break;
538 dev_put(dev);
539
540 retv = -EINVAL;
541 if (sk->sk_bound_dev_if)
542 break;
543
544 np->ucast_oif = ifindex;
545 retv = 0;
546 break;
547 }
548
519 case IPV6_MULTICAST_IF: 549 case IPV6_MULTICAST_IF:
520 if (sk->sk_type == SOCK_STREAM) 550 if (sk->sk_type == SOCK_STREAM)
521 break; 551 break;
@@ -648,7 +678,6 @@ done:
648 } 678 }
649 case MCAST_MSFILTER: 679 case MCAST_MSFILTER:
650 { 680 {
651 extern int sysctl_mld_max_msf;
652 struct group_filter *gsf; 681 struct group_filter *gsf;
653 682
654 if (optlen < GROUP_FILTER_SIZE(0)) 683 if (optlen < GROUP_FILTER_SIZE(0))
@@ -913,7 +942,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
913} 942}
914 943
915static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, 944static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
916 char __user *optval, int __user *optlen, unsigned flags) 945 char __user *optval, int __user *optlen, unsigned int flags)
917{ 946{
918 struct ipv6_pinfo *np = inet6_sk(sk); 947 struct ipv6_pinfo *np = inet6_sk(sk);
919 int len; 948 int len;
@@ -987,6 +1016,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
987 int hlim = np->mcast_hops; 1016 int hlim = np->mcast_hops;
988 put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim); 1017 put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
989 } 1018 }
1019 if (np->rxopt.bits.rxtclass) {
1020 int tclass = np->rcv_tclass;
1021 put_cmsg(&msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
1022 }
990 if (np->rxopt.bits.rxoinfo) { 1023 if (np->rxopt.bits.rxoinfo) {
991 struct in6_pktinfo src_info; 1024 struct in6_pktinfo src_info;
992 src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : 1025 src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
@@ -1160,6 +1193,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1160 val = np->mcast_oif; 1193 val = np->mcast_oif;
1161 break; 1194 break;
1162 1195
1196 case IPV6_UNICAST_IF:
1197 val = (__force int)htonl((__u32) np->ucast_oif);
1198 break;
1199
1163 case IPV6_MTU_DISCOVER: 1200 case IPV6_MTU_DISCOVER:
1164 val = np->pmtudisc; 1201 val = np->pmtudisc;
1165 break; 1202 break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 16c33e308121..6d0f5dc8e3a6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -606,13 +606,13 @@ done:
606 return err; 606 return err;
607} 607}
608 608
609int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, 609bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
610 const struct in6_addr *src_addr) 610 const struct in6_addr *src_addr)
611{ 611{
612 struct ipv6_pinfo *np = inet6_sk(sk); 612 struct ipv6_pinfo *np = inet6_sk(sk);
613 struct ipv6_mc_socklist *mc; 613 struct ipv6_mc_socklist *mc;
614 struct ip6_sf_socklist *psl; 614 struct ip6_sf_socklist *psl;
615 int rv = 1; 615 bool rv = true;
616 616
617 rcu_read_lock(); 617 rcu_read_lock();
618 for_each_pmc_rcu(np, mc) { 618 for_each_pmc_rcu(np, mc) {
@@ -621,7 +621,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
621 } 621 }
622 if (!mc) { 622 if (!mc) {
623 rcu_read_unlock(); 623 rcu_read_unlock();
624 return 1; 624 return true;
625 } 625 }
626 read_lock(&mc->sflock); 626 read_lock(&mc->sflock);
627 psl = mc->sflist; 627 psl = mc->sflist;
@@ -635,9 +635,9 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
635 break; 635 break;
636 } 636 }
637 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 637 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
638 rv = 0; 638 rv = false;
639 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 639 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
640 rv = 0; 640 rv = false;
641 } 641 }
642 read_unlock(&mc->sflock); 642 read_unlock(&mc->sflock);
643 rcu_read_unlock(); 643 rcu_read_unlock();
@@ -931,15 +931,15 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
931/* 931/*
932 * identify MLD packets for MLD filter exceptions 932 * identify MLD packets for MLD filter exceptions
933 */ 933 */
934int ipv6_is_mld(struct sk_buff *skb, int nexthdr) 934bool ipv6_is_mld(struct sk_buff *skb, int nexthdr)
935{ 935{
936 struct icmp6hdr *pic; 936 struct icmp6hdr *pic;
937 937
938 if (nexthdr != IPPROTO_ICMPV6) 938 if (nexthdr != IPPROTO_ICMPV6)
939 return 0; 939 return false;
940 940
941 if (!pskb_may_pull(skb, sizeof(struct icmp6hdr))) 941 if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
942 return 0; 942 return false;
943 943
944 pic = icmp6_hdr(skb); 944 pic = icmp6_hdr(skb);
945 945
@@ -948,22 +948,22 @@ int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
948 case ICMPV6_MGM_REPORT: 948 case ICMPV6_MGM_REPORT:
949 case ICMPV6_MGM_REDUCTION: 949 case ICMPV6_MGM_REDUCTION:
950 case ICMPV6_MLD2_REPORT: 950 case ICMPV6_MLD2_REPORT:
951 return 1; 951 return true;
952 default: 952 default:
953 break; 953 break;
954 } 954 }
955 return 0; 955 return false;
956} 956}
957 957
958/* 958/*
959 * check if the interface/address pair is valid 959 * check if the interface/address pair is valid
960 */ 960 */
961int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, 961bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
962 const struct in6_addr *src_addr) 962 const struct in6_addr *src_addr)
963{ 963{
964 struct inet6_dev *idev; 964 struct inet6_dev *idev;
965 struct ifmcaddr6 *mc; 965 struct ifmcaddr6 *mc;
966 int rv = 0; 966 bool rv = false;
967 967
968 rcu_read_lock(); 968 rcu_read_lock();
969 idev = __in6_dev_get(dev); 969 idev = __in6_dev_get(dev);
@@ -990,7 +990,7 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
990 rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0; 990 rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
991 spin_unlock_bh(&mc->mca_lock); 991 spin_unlock_bh(&mc->mca_lock);
992 } else 992 } else
993 rv = 1; /* don't filter unspecified source */ 993 rv = true; /* don't filter unspecified source */
994 } 994 }
995 read_unlock_bh(&idev->lock); 995 read_unlock_bh(&idev->lock);
996 } 996 }
@@ -1046,8 +1046,8 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1046} 1046}
1047 1047
1048/* mark EXCLUDE-mode sources */ 1048/* mark EXCLUDE-mode sources */
1049static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, 1049static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1050 const struct in6_addr *srcs) 1050 const struct in6_addr *srcs)
1051{ 1051{
1052 struct ip6_sf_list *psf; 1052 struct ip6_sf_list *psf;
1053 int i, scount; 1053 int i, scount;
@@ -1061,7 +1061,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1061 if (psf->sf_count[MCAST_INCLUDE] || 1061 if (psf->sf_count[MCAST_INCLUDE] ||
1062 pmc->mca_sfcount[MCAST_EXCLUDE] != 1062 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1063 psf->sf_count[MCAST_EXCLUDE]) 1063 psf->sf_count[MCAST_EXCLUDE])
1064 continue; 1064 break;
1065 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1065 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1066 scount++; 1066 scount++;
1067 break; 1067 break;
@@ -1070,12 +1070,12 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1070 } 1070 }
1071 pmc->mca_flags &= ~MAF_GSQUERY; 1071 pmc->mca_flags &= ~MAF_GSQUERY;
1072 if (scount == nsrcs) /* all sources excluded */ 1072 if (scount == nsrcs) /* all sources excluded */
1073 return 0; 1073 return false;
1074 return 1; 1074 return true;
1075} 1075}
1076 1076
1077static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1077static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1078 const struct in6_addr *srcs) 1078 const struct in6_addr *srcs)
1079{ 1079{
1080 struct ip6_sf_list *psf; 1080 struct ip6_sf_list *psf;
1081 int i, scount; 1081 int i, scount;
@@ -1099,10 +1099,10 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1099 } 1099 }
1100 if (!scount) { 1100 if (!scount) {
1101 pmc->mca_flags &= ~MAF_GSQUERY; 1101 pmc->mca_flags &= ~MAF_GSQUERY;
1102 return 0; 1102 return false;
1103 } 1103 }
1104 pmc->mca_flags |= MAF_GSQUERY; 1104 pmc->mca_flags |= MAF_GSQUERY;
1105 return 1; 1105 return true;
1106} 1106}
1107 1107
1108/* called with rcu_read_lock() */ 1108/* called with rcu_read_lock() */
@@ -1276,17 +1276,17 @@ int igmp6_event_report(struct sk_buff *skb)
1276 return 0; 1276 return 0;
1277} 1277}
1278 1278
1279static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, 1279static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1280 int gdeleted, int sdeleted) 1280 int gdeleted, int sdeleted)
1281{ 1281{
1282 switch (type) { 1282 switch (type) {
1283 case MLD2_MODE_IS_INCLUDE: 1283 case MLD2_MODE_IS_INCLUDE:
1284 case MLD2_MODE_IS_EXCLUDE: 1284 case MLD2_MODE_IS_EXCLUDE:
1285 if (gdeleted || sdeleted) 1285 if (gdeleted || sdeleted)
1286 return 0; 1286 return false;
1287 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) { 1287 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1288 if (pmc->mca_sfmode == MCAST_INCLUDE) 1288 if (pmc->mca_sfmode == MCAST_INCLUDE)
1289 return 1; 1289 return true;
1290 /* don't include if this source is excluded 1290 /* don't include if this source is excluded
1291 * in all filters 1291 * in all filters
1292 */ 1292 */
@@ -1295,29 +1295,29 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1295 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1295 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1296 psf->sf_count[MCAST_EXCLUDE]; 1296 psf->sf_count[MCAST_EXCLUDE];
1297 } 1297 }
1298 return 0; 1298 return false;
1299 case MLD2_CHANGE_TO_INCLUDE: 1299 case MLD2_CHANGE_TO_INCLUDE:
1300 if (gdeleted || sdeleted) 1300 if (gdeleted || sdeleted)
1301 return 0; 1301 return false;
1302 return psf->sf_count[MCAST_INCLUDE] != 0; 1302 return psf->sf_count[MCAST_INCLUDE] != 0;
1303 case MLD2_CHANGE_TO_EXCLUDE: 1303 case MLD2_CHANGE_TO_EXCLUDE:
1304 if (gdeleted || sdeleted) 1304 if (gdeleted || sdeleted)
1305 return 0; 1305 return false;
1306 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 || 1306 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1307 psf->sf_count[MCAST_INCLUDE]) 1307 psf->sf_count[MCAST_INCLUDE])
1308 return 0; 1308 return false;
1309 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1309 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1310 psf->sf_count[MCAST_EXCLUDE]; 1310 psf->sf_count[MCAST_EXCLUDE];
1311 case MLD2_ALLOW_NEW_SOURCES: 1311 case MLD2_ALLOW_NEW_SOURCES:
1312 if (gdeleted || !psf->sf_crcount) 1312 if (gdeleted || !psf->sf_crcount)
1313 return 0; 1313 return false;
1314 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted; 1314 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1315 case MLD2_BLOCK_OLD_SOURCES: 1315 case MLD2_BLOCK_OLD_SOURCES:
1316 if (pmc->mca_sfmode == MCAST_INCLUDE) 1316 if (pmc->mca_sfmode == MCAST_INCLUDE)
1317 return gdeleted || (psf->sf_crcount && sdeleted); 1317 return gdeleted || (psf->sf_crcount && sdeleted);
1318 return psf->sf_crcount && !gdeleted && !sdeleted; 1318 return psf->sf_crcount && !gdeleted && !sdeleted;
1319 } 1319 }
1320 return 0; 1320 return false;
1321} 1321}
1322 1322
1323static int 1323static int
@@ -2044,7 +2044,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2044 if (!delta) 2044 if (!delta)
2045 pmc->mca_sfcount[sfmode]--; 2045 pmc->mca_sfcount[sfmode]--;
2046 for (j=0; j<i; j++) 2046 for (j=0; j<i; j++)
2047 (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); 2047 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2048 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { 2048 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2049 struct ip6_sf_list *psf; 2049 struct ip6_sf_list *psf;
2050 2050
@@ -2627,8 +2627,7 @@ static int __net_init igmp6_net_init(struct net *net)
2627 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6, 2627 err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
2628 SOCK_RAW, IPPROTO_ICMPV6, net); 2628 SOCK_RAW, IPPROTO_ICMPV6, net);
2629 if (err < 0) { 2629 if (err < 0) {
2630 printk(KERN_ERR 2630 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
2631 "Failed to initialize the IGMP6 control socket (err %d).\n",
2632 err); 2631 err);
2633 goto out; 2632 goto out;
2634 } 2633 }
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 7e1e0fbfef21..5b087c31d87b 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -22,6 +22,8 @@
22 * Masahide NAKAMURA @USAGI 22 * Masahide NAKAMURA @USAGI
23 */ 23 */
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
25#include <linux/module.h> 27#include <linux/module.h>
26#include <linux/skbuff.h> 28#include <linux/skbuff.h>
27#include <linux/time.h> 29#include <linux/time.h>
@@ -44,7 +46,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
44 if (!data) 46 if (!data)
45 return NULL; 47 return NULL;
46 if (padlen == 1) { 48 if (padlen == 1) {
47 data[0] = IPV6_TLV_PAD0; 49 data[0] = IPV6_TLV_PAD1;
48 } else if (padlen > 1) { 50 } else if (padlen > 1) {
49 data[0] = IPV6_TLV_PADN; 51 data[0] = IPV6_TLV_PADN;
50 data[1] = padlen - 2; 52 data[1] = padlen - 2;
@@ -307,13 +309,12 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
307static int mip6_destopt_init_state(struct xfrm_state *x) 309static int mip6_destopt_init_state(struct xfrm_state *x)
308{ 310{
309 if (x->id.spi) { 311 if (x->id.spi) {
310 printk(KERN_INFO "%s: spi is not 0: %u\n", __func__, 312 pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
311 x->id.spi);
312 return -EINVAL; 313 return -EINVAL;
313 } 314 }
314 if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { 315 if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
315 printk(KERN_INFO "%s: state's mode is not %u: %u\n", 316 pr_info("%s: state's mode is not %u: %u\n",
316 __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); 317 __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
317 return -EINVAL; 318 return -EINVAL;
318 } 319 }
319 320
@@ -443,13 +444,12 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
443static int mip6_rthdr_init_state(struct xfrm_state *x) 444static int mip6_rthdr_init_state(struct xfrm_state *x)
444{ 445{
445 if (x->id.spi) { 446 if (x->id.spi) {
446 printk(KERN_INFO "%s: spi is not 0: %u\n", __func__, 447 pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
447 x->id.spi);
448 return -EINVAL; 448 return -EINVAL;
449 } 449 }
450 if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { 450 if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
451 printk(KERN_INFO "%s: state's mode is not %u: %u\n", 451 pr_info("%s: state's mode is not %u: %u\n",
452 __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); 452 __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
453 return -EINVAL; 453 return -EINVAL;
454 } 454 }
455 455
@@ -481,18 +481,18 @@ static const struct xfrm_type mip6_rthdr_type =
481 481
482static int __init mip6_init(void) 482static int __init mip6_init(void)
483{ 483{
484 printk(KERN_INFO "Mobile IPv6\n"); 484 pr_info("Mobile IPv6\n");
485 485
486 if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) { 486 if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) {
487 printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__); 487 pr_info("%s: can't add xfrm type(destopt)\n", __func__);
488 goto mip6_destopt_xfrm_fail; 488 goto mip6_destopt_xfrm_fail;
489 } 489 }
490 if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) { 490 if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) {
491 printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__); 491 pr_info("%s: can't add xfrm type(rthdr)\n", __func__);
492 goto mip6_rthdr_xfrm_fail; 492 goto mip6_rthdr_xfrm_fail;
493 } 493 }
494 if (rawv6_mh_filter_register(mip6_mh_filter) < 0) { 494 if (rawv6_mh_filter_register(mip6_mh_filter) < 0) {
495 printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__); 495 pr_info("%s: can't add rawv6 mh filter\n", __func__);
496 goto mip6_rawv6_mh_fail; 496 goto mip6_rawv6_mh_fail;
497 } 497 }
498 498
@@ -510,11 +510,11 @@ static int __init mip6_init(void)
510static void __exit mip6_fini(void) 510static void __exit mip6_fini(void)
511{ 511{
512 if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0) 512 if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0)
513 printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__); 513 pr_info("%s: can't remove rawv6 mh filter\n", __func__);
514 if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0) 514 if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0)
515 printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__); 515 pr_info("%s: can't remove xfrm type(rthdr)\n", __func__);
516 if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0) 516 if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0)
517 printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__); 517 pr_info("%s: can't remove xfrm type(destopt)\n", __func__);
518} 518}
519 519
520module_init(mip6_init); 520module_init(mip6_init);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c964958ac470..54f62d3b8dd6 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -15,6 +15,7 @@
15/* 15/*
16 * Changes: 16 * Changes:
17 * 17 *
18 * Alexey I. Froloff : RFC6106 (DNSSL) support
18 * Pierre Ynard : export userland ND options 19 * Pierre Ynard : export userland ND options
19 * through netlink (RDNSS support) 20 * through netlink (RDNSS support)
20 * Lars Fenneberg : fixed MTU setting on receipt 21 * Lars Fenneberg : fixed MTU setting on receipt
@@ -26,27 +27,7 @@
26 * YOSHIFUJI Hideaki @USAGI : Verify ND options properly 27 * YOSHIFUJI Hideaki @USAGI : Verify ND options properly
27 */ 28 */
28 29
29/* Set to 3 to get tracing... */ 30#define pr_fmt(fmt) "ICMPv6: " fmt
30#define ND_DEBUG 1
31
32#define ND_PRINTK(fmt, args...) do { if (net_ratelimit()) { printk(fmt, ## args); } } while(0)
33#define ND_NOPRINTK(x...) do { ; } while(0)
34#define ND_PRINTK0 ND_PRINTK
35#define ND_PRINTK1 ND_NOPRINTK
36#define ND_PRINTK2 ND_NOPRINTK
37#define ND_PRINTK3 ND_NOPRINTK
38#if ND_DEBUG >= 1
39#undef ND_PRINTK1
40#define ND_PRINTK1 ND_PRINTK
41#endif
42#if ND_DEBUG >= 2
43#undef ND_PRINTK2
44#define ND_PRINTK2 ND_PRINTK
45#endif
46#if ND_DEBUG >= 3
47#undef ND_PRINTK3
48#define ND_PRINTK3 ND_PRINTK
49#endif
50 31
51#include <linux/module.h> 32#include <linux/module.h>
52#include <linux/errno.h> 33#include <linux/errno.h>
@@ -91,6 +72,15 @@
91#include <linux/netfilter.h> 72#include <linux/netfilter.h>
92#include <linux/netfilter_ipv6.h> 73#include <linux/netfilter_ipv6.h>
93 74
75/* Set to 3 to get tracing... */
76#define ND_DEBUG 1
77
78#define ND_PRINTK(val, level, fmt, ...) \
79do { \
80 if (val <= ND_DEBUG) \
81 net_##level##_ratelimited(fmt, ##__VA_ARGS__); \
82} while (0)
83
94static u32 ndisc_hash(const void *pkey, 84static u32 ndisc_hash(const void *pkey,
95 const struct net_device *dev, 85 const struct net_device *dev,
96 __u32 *hash_rnd); 86 __u32 *hash_rnd);
@@ -228,7 +218,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
228 218
229static inline int ndisc_is_useropt(struct nd_opt_hdr *opt) 219static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
230{ 220{
231 return opt->nd_opt_type == ND_OPT_RDNSS; 221 return opt->nd_opt_type == ND_OPT_RDNSS ||
222 opt->nd_opt_type == ND_OPT_DNSSL;
232} 223}
233 224
234static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, 225static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -263,10 +254,9 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
263 case ND_OPT_MTU: 254 case ND_OPT_MTU:
264 case ND_OPT_REDIRECT_HDR: 255 case ND_OPT_REDIRECT_HDR:
265 if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { 256 if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
266 ND_PRINTK2(KERN_WARNING 257 ND_PRINTK(2, warn,
267 "%s(): duplicated ND6 option found: type=%d\n", 258 "%s: duplicated ND6 option found: type=%d\n",
268 __func__, 259 __func__, nd_opt->nd_opt_type);
269 nd_opt->nd_opt_type);
270 } else { 260 } else {
271 ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; 261 ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt;
272 } 262 }
@@ -294,10 +284,11 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
294 * to accommodate future extension to the 284 * to accommodate future extension to the
295 * protocol. 285 * protocol.
296 */ 286 */
297 ND_PRINTK2(KERN_NOTICE 287 ND_PRINTK(2, notice,
298 "%s(): ignored unsupported option; type=%d, len=%d\n", 288 "%s: ignored unsupported option; type=%d, len=%d\n",
299 __func__, 289 __func__,
300 nd_opt->nd_opt_type, nd_opt->nd_opt_len); 290 nd_opt->nd_opt_type,
291 nd_opt->nd_opt_len);
301 } 292 }
302 } 293 }
303 opt_len -= l; 294 opt_len -= l;
@@ -325,9 +316,6 @@ int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
325 case ARPHRD_FDDI: 316 case ARPHRD_FDDI:
326 ipv6_eth_mc_map(addr, buf); 317 ipv6_eth_mc_map(addr, buf);
327 return 0; 318 return 0;
328 case ARPHRD_IEEE802_TR:
329 ipv6_tr_mc_map(addr,buf);
330 return 0;
331 case ARPHRD_ARCNET: 319 case ARPHRD_ARCNET:
332 ipv6_arcnet_mc_map(addr, buf); 320 ipv6_arcnet_mc_map(addr, buf);
333 return 0; 321 return 0;
@@ -360,7 +348,7 @@ static int ndisc_constructor(struct neighbour *neigh)
360 struct net_device *dev = neigh->dev; 348 struct net_device *dev = neigh->dev;
361 struct inet6_dev *in6_dev; 349 struct inet6_dev *in6_dev;
362 struct neigh_parms *parms; 350 struct neigh_parms *parms;
363 int is_multicast = ipv6_addr_is_multicast(addr); 351 bool is_multicast = ipv6_addr_is_multicast(addr);
364 352
365 in6_dev = in6_dev_get(dev); 353 in6_dev = in6_dev_get(dev);
366 if (in6_dev == NULL) { 354 if (in6_dev == NULL) {
@@ -456,9 +444,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
456 len + hlen + tlen), 444 len + hlen + tlen),
457 1, &err); 445 1, &err);
458 if (!skb) { 446 if (!skb) {
459 ND_PRINTK0(KERN_ERR 447 ND_PRINTK(0, err, "ND: %s failed to allocate an skb, err=%d\n",
460 "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n", 448 __func__, err);
461 __func__, err);
462 return NULL; 449 return NULL;
463 } 450 }
464 451
@@ -694,8 +681,9 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
694 681
695 if ((probes -= neigh->parms->ucast_probes) < 0) { 682 if ((probes -= neigh->parms->ucast_probes) < 0) {
696 if (!(neigh->nud_state & NUD_VALID)) { 683 if (!(neigh->nud_state & NUD_VALID)) {
697 ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %pI6\n", 684 ND_PRINTK(1, dbg,
698 __func__, target); 685 "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
686 __func__, target);
699 } 687 }
700 ndisc_send_ns(dev, neigh, target, target, saddr); 688 ndisc_send_ns(dev, neigh, target, target, saddr);
701 } else if ((probes -= neigh->parms->app_probes) < 0) { 689 } else if ((probes -= neigh->parms->app_probes) < 0) {
@@ -737,12 +725,11 @@ static void ndisc_recv_ns(struct sk_buff *skb)
737 struct inet6_dev *idev = NULL; 725 struct inet6_dev *idev = NULL;
738 struct neighbour *neigh; 726 struct neighbour *neigh;
739 int dad = ipv6_addr_any(saddr); 727 int dad = ipv6_addr_any(saddr);
740 int inc; 728 bool inc;
741 int is_router = -1; 729 int is_router = -1;
742 730
743 if (ipv6_addr_is_multicast(&msg->target)) { 731 if (ipv6_addr_is_multicast(&msg->target)) {
744 ND_PRINTK2(KERN_WARNING 732 ND_PRINTK(2, warn, "NS: multicast target address\n");
745 "ICMPv6 NS: multicast target address");
746 return; 733 return;
747 } 734 }
748 735
@@ -755,22 +742,20 @@ static void ndisc_recv_ns(struct sk_buff *skb)
755 daddr->s6_addr32[1] == htonl(0x00000000) && 742 daddr->s6_addr32[1] == htonl(0x00000000) &&
756 daddr->s6_addr32[2] == htonl(0x00000001) && 743 daddr->s6_addr32[2] == htonl(0x00000001) &&
757 daddr->s6_addr [12] == 0xff )) { 744 daddr->s6_addr [12] == 0xff )) {
758 ND_PRINTK2(KERN_WARNING 745 ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n");
759 "ICMPv6 NS: bad DAD packet (wrong destination)\n");
760 return; 746 return;
761 } 747 }
762 748
763 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { 749 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
764 ND_PRINTK2(KERN_WARNING 750 ND_PRINTK(2, warn, "NS: invalid ND options\n");
765 "ICMPv6 NS: invalid ND options\n");
766 return; 751 return;
767 } 752 }
768 753
769 if (ndopts.nd_opts_src_lladdr) { 754 if (ndopts.nd_opts_src_lladdr) {
770 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev); 755 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev);
771 if (!lladdr) { 756 if (!lladdr) {
772 ND_PRINTK2(KERN_WARNING 757 ND_PRINTK(2, warn,
773 "ICMPv6 NS: invalid link-layer address length\n"); 758 "NS: invalid link-layer address length\n");
774 return; 759 return;
775 } 760 }
776 761
@@ -780,8 +765,8 @@ static void ndisc_recv_ns(struct sk_buff *skb)
780 * in the message. 765 * in the message.
781 */ 766 */
782 if (dad) { 767 if (dad) {
783 ND_PRINTK2(KERN_WARNING 768 ND_PRINTK(2, warn,
784 "ICMPv6 NS: bad DAD packet (link-layer address option)\n"); 769 "NS: bad DAD packet (link-layer address option)\n");
785 return; 770 return;
786 } 771 }
787 } 772 }
@@ -793,20 +778,6 @@ static void ndisc_recv_ns(struct sk_buff *skb)
793 778
794 if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { 779 if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) {
795 if (dad) { 780 if (dad) {
796 if (dev->type == ARPHRD_IEEE802_TR) {
797 const unsigned char *sadr;
798 sadr = skb_mac_header(skb);
799 if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 &&
800 sadr[9] == dev->dev_addr[1] &&
801 sadr[10] == dev->dev_addr[2] &&
802 sadr[11] == dev->dev_addr[3] &&
803 sadr[12] == dev->dev_addr[4] &&
804 sadr[13] == dev->dev_addr[5]) {
805 /* looped-back to us */
806 goto out;
807 }
808 }
809
810 /* 781 /*
811 * We are colliding with another node 782 * We are colliding with another node
812 * who is doing DAD 783 * who is doing DAD
@@ -913,34 +884,30 @@ static void ndisc_recv_na(struct sk_buff *skb)
913 struct neighbour *neigh; 884 struct neighbour *neigh;
914 885
915 if (skb->len < sizeof(struct nd_msg)) { 886 if (skb->len < sizeof(struct nd_msg)) {
916 ND_PRINTK2(KERN_WARNING 887 ND_PRINTK(2, warn, "NA: packet too short\n");
917 "ICMPv6 NA: packet too short\n");
918 return; 888 return;
919 } 889 }
920 890
921 if (ipv6_addr_is_multicast(&msg->target)) { 891 if (ipv6_addr_is_multicast(&msg->target)) {
922 ND_PRINTK2(KERN_WARNING 892 ND_PRINTK(2, warn, "NA: target address is multicast\n");
923 "ICMPv6 NA: target address is multicast.\n");
924 return; 893 return;
925 } 894 }
926 895
927 if (ipv6_addr_is_multicast(daddr) && 896 if (ipv6_addr_is_multicast(daddr) &&
928 msg->icmph.icmp6_solicited) { 897 msg->icmph.icmp6_solicited) {
929 ND_PRINTK2(KERN_WARNING 898 ND_PRINTK(2, warn, "NA: solicited NA is multicasted\n");
930 "ICMPv6 NA: solicited NA is multicasted.\n");
931 return; 899 return;
932 } 900 }
933 901
934 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { 902 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
935 ND_PRINTK2(KERN_WARNING 903 ND_PRINTK(2, warn, "NS: invalid ND option\n");
936 "ICMPv6 NS: invalid ND option\n");
937 return; 904 return;
938 } 905 }
939 if (ndopts.nd_opts_tgt_lladdr) { 906 if (ndopts.nd_opts_tgt_lladdr) {
940 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev); 907 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev);
941 if (!lladdr) { 908 if (!lladdr) {
942 ND_PRINTK2(KERN_WARNING 909 ND_PRINTK(2, warn,
943 "ICMPv6 NA: invalid link-layer address length\n"); 910 "NA: invalid link-layer address length\n");
944 return; 911 return;
945 } 912 }
946 } 913 }
@@ -961,9 +928,9 @@ static void ndisc_recv_na(struct sk_buff *skb)
961 unsolicited advertisement. 928 unsolicited advertisement.
962 */ 929 */
963 if (skb->pkt_type != PACKET_LOOPBACK) 930 if (skb->pkt_type != PACKET_LOOPBACK)
964 ND_PRINTK1(KERN_WARNING 931 ND_PRINTK(1, warn,
965 "ICMPv6 NA: someone advertises our address %pI6 on %s!\n", 932 "NA: someone advertises our address %pI6 on %s!\n",
966 &ifp->addr, ifp->idev->dev->name); 933 &ifp->addr, ifp->idev->dev->name);
967 in6_ifa_put(ifp); 934 in6_ifa_put(ifp);
968 return; 935 return;
969 } 936 }
@@ -1025,8 +992,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1025 992
1026 idev = __in6_dev_get(skb->dev); 993 idev = __in6_dev_get(skb->dev);
1027 if (!idev) { 994 if (!idev) {
1028 if (net_ratelimit()) 995 ND_PRINTK(1, err, "RS: can't find in6 device\n");
1029 ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
1030 return; 996 return;
1031 } 997 }
1032 998
@@ -1043,8 +1009,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1043 1009
1044 /* Parse ND options */ 1010 /* Parse ND options */
1045 if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) { 1011 if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) {
1046 if (net_ratelimit()) 1012 ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n");
1047 ND_PRINTK2("ICMP6 NS: invalid ND option, ignored\n");
1048 goto out; 1013 goto out;
1049 } 1014 }
1050 1015
@@ -1099,8 +1064,9 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
1099 1064
1100 memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3); 1065 memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3);
1101 1066
1102 NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr), 1067 if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
1103 &ipv6_hdr(ra)->saddr); 1068 &ipv6_hdr(ra)->saddr))
1069 goto nla_put_failure;
1104 nlmsg_end(skb, nlh); 1070 nlmsg_end(skb, nlh);
1105 1071
1106 rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC); 1072 rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC);
@@ -1141,20 +1107,17 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1141 optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg); 1107 optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg);
1142 1108
1143 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { 1109 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
1144 ND_PRINTK2(KERN_WARNING 1110 ND_PRINTK(2, warn, "RA: source address is not link-local\n");
1145 "ICMPv6 RA: source address is not link-local.\n");
1146 return; 1111 return;
1147 } 1112 }
1148 if (optlen < 0) { 1113 if (optlen < 0) {
1149 ND_PRINTK2(KERN_WARNING 1114 ND_PRINTK(2, warn, "RA: packet too short\n");
1150 "ICMPv6 RA: packet too short\n");
1151 return; 1115 return;
1152 } 1116 }
1153 1117
1154#ifdef CONFIG_IPV6_NDISC_NODETYPE 1118#ifdef CONFIG_IPV6_NDISC_NODETYPE
1155 if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) { 1119 if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
1156 ND_PRINTK2(KERN_WARNING 1120 ND_PRINTK(2, warn, "RA: from host or unauthorized router\n");
1157 "ICMPv6 RA: from host or unauthorized router\n");
1158 return; 1121 return;
1159 } 1122 }
1160#endif 1123#endif
@@ -1165,15 +1128,13 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1165 1128
1166 in6_dev = __in6_dev_get(skb->dev); 1129 in6_dev = __in6_dev_get(skb->dev);
1167 if (in6_dev == NULL) { 1130 if (in6_dev == NULL) {
1168 ND_PRINTK0(KERN_ERR 1131 ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
1169 "ICMPv6 RA: can't find inet6 device for %s.\n", 1132 skb->dev->name);
1170 skb->dev->name);
1171 return; 1133 return;
1172 } 1134 }
1173 1135
1174 if (!ndisc_parse_options(opt, optlen, &ndopts)) { 1136 if (!ndisc_parse_options(opt, optlen, &ndopts)) {
1175 ND_PRINTK2(KERN_WARNING 1137 ND_PRINTK(2, warn, "RA: invalid ND options\n");
1176 "ICMP6 RA: invalid ND options\n");
1177 return; 1138 return;
1178 } 1139 }
1179 1140
@@ -1223,32 +1184,37 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1223 1184
1224 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); 1185 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
1225 1186
1226 if (rt) 1187 if (rt) {
1227 neigh = dst_get_neighbour_noref(&rt->dst); 1188 neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
1228 1189 if (!neigh) {
1190 ND_PRINTK(0, err,
1191 "RA: %s got default router without neighbour\n",
1192 __func__);
1193 dst_release(&rt->dst);
1194 return;
1195 }
1196 }
1229 if (rt && lifetime == 0) { 1197 if (rt && lifetime == 0) {
1230 neigh_clone(neigh);
1231 ip6_del_rt(rt); 1198 ip6_del_rt(rt);
1232 rt = NULL; 1199 rt = NULL;
1233 } 1200 }
1234 1201
1235 if (rt == NULL && lifetime) { 1202 if (rt == NULL && lifetime) {
1236 ND_PRINTK3(KERN_DEBUG 1203 ND_PRINTK(3, dbg, "RA: adding default router\n");
1237 "ICMPv6 RA: adding default router.\n");
1238 1204
1239 rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref); 1205 rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
1240 if (rt == NULL) { 1206 if (rt == NULL) {
1241 ND_PRINTK0(KERN_ERR 1207 ND_PRINTK(0, err,
1242 "ICMPv6 RA: %s() failed to add default route.\n", 1208 "RA: %s failed to add default route\n",
1243 __func__); 1209 __func__);
1244 return; 1210 return;
1245 } 1211 }
1246 1212
1247 neigh = dst_get_neighbour_noref(&rt->dst); 1213 neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
1248 if (neigh == NULL) { 1214 if (neigh == NULL) {
1249 ND_PRINTK0(KERN_ERR 1215 ND_PRINTK(0, err,
1250 "ICMPv6 RA: %s() got default router without neighbour.\n", 1216 "RA: %s got default router without neighbour\n",
1251 __func__); 1217 __func__);
1252 dst_release(&rt->dst); 1218 dst_release(&rt->dst);
1253 return; 1219 return;
1254 } 1220 }
@@ -1258,8 +1224,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1258 } 1224 }
1259 1225
1260 if (rt) 1226 if (rt)
1261 rt->dst.expires = jiffies + (HZ * lifetime); 1227 rt6_set_expires(rt, jiffies + (HZ * lifetime));
1262
1263 if (ra_msg->icmph.icmp6_hop_limit) { 1228 if (ra_msg->icmph.icmp6_hop_limit) {
1264 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; 1229 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
1265 if (rt) 1230 if (rt)
@@ -1317,8 +1282,8 @@ skip_linkparms:
1317 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, 1282 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr,
1318 skb->dev); 1283 skb->dev);
1319 if (!lladdr) { 1284 if (!lladdr) {
1320 ND_PRINTK2(KERN_WARNING 1285 ND_PRINTK(2, warn,
1321 "ICMPv6 RA: invalid link-layer address length\n"); 1286 "RA: invalid link-layer address length\n");
1322 goto out; 1287 goto out;
1323 } 1288 }
1324 } 1289 }
@@ -1382,9 +1347,7 @@ skip_routeinfo:
1382 mtu = ntohl(n); 1347 mtu = ntohl(n);
1383 1348
1384 if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { 1349 if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
1385 ND_PRINTK2(KERN_WARNING 1350 ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
1386 "ICMPv6 RA: invalid mtu: %d\n",
1387 mtu);
1388 } else if (in6_dev->cnf.mtu6 != mtu) { 1351 } else if (in6_dev->cnf.mtu6 != mtu) {
1389 in6_dev->cnf.mtu6 = mtu; 1352 in6_dev->cnf.mtu6 = mtu;
1390 1353
@@ -1405,13 +1368,12 @@ skip_routeinfo:
1405 } 1368 }
1406 1369
1407 if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) { 1370 if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
1408 ND_PRINTK2(KERN_WARNING 1371 ND_PRINTK(2, warn, "RA: invalid RA options\n");
1409 "ICMPv6 RA: invalid RA options");
1410 } 1372 }
1411out: 1373out:
1412 if (rt) 1374 if (rt)
1413 dst_release(&rt->dst); 1375 dst_release(&rt->dst);
1414 else if (neigh) 1376 if (neigh)
1415 neigh_release(neigh); 1377 neigh_release(neigh);
1416} 1378}
1417 1379
@@ -1431,15 +1393,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1431 switch (skb->ndisc_nodetype) { 1393 switch (skb->ndisc_nodetype) {
1432 case NDISC_NODETYPE_HOST: 1394 case NDISC_NODETYPE_HOST:
1433 case NDISC_NODETYPE_NODEFAULT: 1395 case NDISC_NODETYPE_NODEFAULT:
1434 ND_PRINTK2(KERN_WARNING 1396 ND_PRINTK(2, warn,
1435 "ICMPv6 Redirect: from host or unauthorized router\n"); 1397 "Redirect: from host or unauthorized router\n");
1436 return; 1398 return;
1437 } 1399 }
1438#endif 1400#endif
1439 1401
1440 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { 1402 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
1441 ND_PRINTK2(KERN_WARNING 1403 ND_PRINTK(2, warn,
1442 "ICMPv6 Redirect: source address is not link-local.\n"); 1404 "Redirect: source address is not link-local\n");
1443 return; 1405 return;
1444 } 1406 }
1445 1407
@@ -1447,8 +1409,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1447 optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); 1409 optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
1448 1410
1449 if (optlen < 0) { 1411 if (optlen < 0) {
1450 ND_PRINTK2(KERN_WARNING 1412 ND_PRINTK(2, warn, "Redirect: packet too short\n");
1451 "ICMPv6 Redirect: packet too short\n");
1452 return; 1413 return;
1453 } 1414 }
1454 1415
@@ -1457,8 +1418,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1457 dest = target + 1; 1418 dest = target + 1;
1458 1419
1459 if (ipv6_addr_is_multicast(dest)) { 1420 if (ipv6_addr_is_multicast(dest)) {
1460 ND_PRINTK2(KERN_WARNING 1421 ND_PRINTK(2, warn,
1461 "ICMPv6 Redirect: destination address is multicast.\n"); 1422 "Redirect: destination address is multicast\n");
1462 return; 1423 return;
1463 } 1424 }
1464 1425
@@ -1466,8 +1427,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1466 on_link = 1; 1427 on_link = 1;
1467 } else if (ipv6_addr_type(target) != 1428 } else if (ipv6_addr_type(target) !=
1468 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { 1429 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1469 ND_PRINTK2(KERN_WARNING 1430 ND_PRINTK(2, warn,
1470 "ICMPv6 Redirect: target address is not link-local unicast.\n"); 1431 "Redirect: target address is not link-local unicast\n");
1471 return; 1432 return;
1472 } 1433 }
1473 1434
@@ -1483,16 +1444,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1483 */ 1444 */
1484 1445
1485 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { 1446 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
1486 ND_PRINTK2(KERN_WARNING 1447 ND_PRINTK(2, warn, "Redirect: invalid ND options\n");
1487 "ICMPv6 Redirect: invalid ND options\n");
1488 return; 1448 return;
1489 } 1449 }
1490 if (ndopts.nd_opts_tgt_lladdr) { 1450 if (ndopts.nd_opts_tgt_lladdr) {
1491 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, 1451 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1492 skb->dev); 1452 skb->dev);
1493 if (!lladdr) { 1453 if (!lladdr) {
1494 ND_PRINTK2(KERN_WARNING 1454 ND_PRINTK(2, warn,
1495 "ICMPv6 Redirect: invalid link-layer address length\n"); 1455 "Redirect: invalid link-layer address length\n");
1496 return; 1456 return;
1497 } 1457 }
1498 } 1458 }
@@ -1506,8 +1466,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1506 } 1466 }
1507} 1467}
1508 1468
1509void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, 1469void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1510 const struct in6_addr *target)
1511{ 1470{
1512 struct net_device *dev = skb->dev; 1471 struct net_device *dev = skb->dev;
1513 struct net *net = dev_net(dev); 1472 struct net *net = dev_net(dev);
@@ -1528,16 +1487,15 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1528 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; 1487 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
1529 1488
1530 if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { 1489 if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
1531 ND_PRINTK2(KERN_WARNING 1490 ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
1532 "ICMPv6 Redirect: no link-local address on %s\n", 1491 dev->name);
1533 dev->name);
1534 return; 1492 return;
1535 } 1493 }
1536 1494
1537 if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && 1495 if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
1538 ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { 1496 ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1539 ND_PRINTK2(KERN_WARNING 1497 ND_PRINTK(2, warn,
1540 "ICMPv6 Redirect: target address is not link-local unicast.\n"); 1498 "Redirect: target address is not link-local unicast\n");
1541 return; 1499 return;
1542 } 1500 }
1543 1501
@@ -1556,8 +1514,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1556 rt = (struct rt6_info *) dst; 1514 rt = (struct rt6_info *) dst;
1557 1515
1558 if (rt->rt6i_flags & RTF_GATEWAY) { 1516 if (rt->rt6i_flags & RTF_GATEWAY) {
1559 ND_PRINTK2(KERN_WARNING 1517 ND_PRINTK(2, warn,
1560 "ICMPv6 Redirect: destination is not a neighbour.\n"); 1518 "Redirect: destination is not a neighbour\n");
1561 goto release; 1519 goto release;
1562 } 1520 }
1563 if (!rt->rt6i_peer) 1521 if (!rt->rt6i_peer)
@@ -1566,6 +1524,13 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1566 goto release; 1524 goto release;
1567 1525
1568 if (dev->addr_len) { 1526 if (dev->addr_len) {
1527 struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
1528 if (!neigh) {
1529 ND_PRINTK(2, warn,
1530 "Redirect: no neigh for target address\n");
1531 goto release;
1532 }
1533
1569 read_lock_bh(&neigh->lock); 1534 read_lock_bh(&neigh->lock);
1570 if (neigh->nud_state & NUD_VALID) { 1535 if (neigh->nud_state & NUD_VALID) {
1571 memcpy(ha_buf, neigh->ha, dev->addr_len); 1536 memcpy(ha_buf, neigh->ha, dev->addr_len);
@@ -1574,6 +1539,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1574 len += ndisc_opt_addr_space(dev); 1539 len += ndisc_opt_addr_space(dev);
1575 } else 1540 } else
1576 read_unlock_bh(&neigh->lock); 1541 read_unlock_bh(&neigh->lock);
1542
1543 neigh_release(neigh);
1577 } 1544 }
1578 1545
1579 rd_len = min_t(unsigned int, 1546 rd_len = min_t(unsigned int,
@@ -1588,9 +1555,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1588 len + hlen + tlen), 1555 len + hlen + tlen),
1589 1, &err); 1556 1, &err);
1590 if (buff == NULL) { 1557 if (buff == NULL) {
1591 ND_PRINTK0(KERN_ERR 1558 ND_PRINTK(0, err,
1592 "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n", 1559 "Redirect: %s failed to allocate an skb, err=%d\n",
1593 __func__, err); 1560 __func__, err);
1594 goto release; 1561 goto release;
1595 } 1562 }
1596 1563
@@ -1675,16 +1642,14 @@ int ndisc_rcv(struct sk_buff *skb)
1675 __skb_push(skb, skb->data - skb_transport_header(skb)); 1642 __skb_push(skb, skb->data - skb_transport_header(skb));
1676 1643
1677 if (ipv6_hdr(skb)->hop_limit != 255) { 1644 if (ipv6_hdr(skb)->hop_limit != 255) {
1678 ND_PRINTK2(KERN_WARNING 1645 ND_PRINTK(2, warn, "NDISC: invalid hop-limit: %d\n",
1679 "ICMPv6 NDISC: invalid hop-limit: %d\n", 1646 ipv6_hdr(skb)->hop_limit);
1680 ipv6_hdr(skb)->hop_limit);
1681 return 0; 1647 return 0;
1682 } 1648 }
1683 1649
1684 if (msg->icmph.icmp6_code != 0) { 1650 if (msg->icmph.icmp6_code != 0) {
1685 ND_PRINTK2(KERN_WARNING 1651 ND_PRINTK(2, warn, "NDISC: invalid ICMPv6 code: %d\n",
1686 "ICMPv6 NDISC: invalid ICMPv6 code: %d\n", 1652 msg->icmph.icmp6_code);
1687 msg->icmph.icmp6_code);
1688 return 0; 1653 return 0;
1689 } 1654 }
1690 1655
@@ -1751,11 +1716,7 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl,
1751 static int warned; 1716 static int warned;
1752 if (strcmp(warncomm, current->comm) && warned < 5) { 1717 if (strcmp(warncomm, current->comm) && warned < 5) {
1753 strcpy(warncomm, current->comm); 1718 strcpy(warncomm, current->comm);
1754 printk(KERN_WARNING 1719 pr_warn("process `%s' is using deprecated sysctl (%s) net.ipv6.neigh.%s.%s - use net.ipv6.neigh.%s.%s_ms instead\n",
1755 "process `%s' is using deprecated sysctl (%s) "
1756 "net.ipv6.neigh.%s.%s; "
1757 "Use net.ipv6.neigh.%s.%s_ms "
1758 "instead.\n",
1759 warncomm, func, 1720 warncomm, func,
1760 dev_name, ctl->procname, 1721 dev_name, ctl->procname,
1761 dev_name, ctl->procname); 1722 dev_name, ctl->procname);
@@ -1809,9 +1770,9 @@ static int __net_init ndisc_net_init(struct net *net)
1809 err = inet_ctl_sock_create(&sk, PF_INET6, 1770 err = inet_ctl_sock_create(&sk, PF_INET6,
1810 SOCK_RAW, IPPROTO_ICMPV6, net); 1771 SOCK_RAW, IPPROTO_ICMPV6, net);
1811 if (err < 0) { 1772 if (err < 0) {
1812 ND_PRINTK0(KERN_ERR 1773 ND_PRINTK(0, err,
1813 "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n", 1774 "NDISC: Failed to initialize the control socket (err %d)\n",
1814 err); 1775 err);
1815 return err; 1776 return err;
1816 } 1777 }
1817 1778
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 9a68fb5b9e77..10135342799e 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -25,28 +25,6 @@ config NF_CONNTRACK_IPV6
25 25
26 To compile it as a module, choose M here. If unsure, say N. 26 To compile it as a module, choose M here. If unsure, say N.
27 27
28config IP6_NF_QUEUE
29 tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)"
30 depends on INET && IPV6 && NETFILTER
31 depends on NETFILTER_ADVANCED
32 ---help---
33
34 This option adds a queue handler to the kernel for IPv6
35 packets which enables users to receive the filtered packets
36 with QUEUE target using libipq.
37
38 This option enables the old IPv6-only "ip6_queue" implementation
39 which has been obsoleted by the new "nfnetlink_queue" code (see
40 CONFIG_NETFILTER_NETLINK_QUEUE).
41
42 (C) Fernando Anton 2001
43 IPv64 Project - Work based in IPv64 draft by Arturo Azcorra.
44 Universidad Carlos III de Madrid
45 Universidad Politecnica de Alcala de Henares
46 email: <fanton@it.uc3m.es>.
47
48 To compile it as a module, choose M here. If unsure, say N.
49
50config IP6_NF_IPTABLES 28config IP6_NF_IPTABLES
51 tristate "IP6 tables support (required for filtering)" 29 tristate "IP6 tables support (required for filtering)"
52 depends on INET && IPV6 30 depends on INET && IPV6
@@ -154,15 +132,6 @@ config IP6_NF_TARGET_HL
154 (e.g. when running oldconfig). It selects 132 (e.g. when running oldconfig). It selects
155 CONFIG_NETFILTER_XT_TARGET_HL. 133 CONFIG_NETFILTER_XT_TARGET_HL.
156 134
157config IP6_NF_TARGET_LOG
158 tristate "LOG target support"
159 default m if NETFILTER_ADVANCED=n
160 help
161 This option adds a `LOG' target, which allows you to create rules in
162 any iptables table which records the packet header to the syslog.
163
164 To compile it as a module, choose M here. If unsure, say N.
165
166config IP6_NF_FILTER 135config IP6_NF_FILTER
167 tristate "Packet filtering" 136 tristate "Packet filtering"
168 default m if NETFILTER_ADVANCED=n 137 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2eaed96db02c..534d3f216f7b 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -6,7 +6,6 @@
6obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o 6obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
7obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o 7obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
9obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
10obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 9obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
11obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o 10obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
12 11
@@ -31,5 +30,4 @@ obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
31obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o 30obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
32 31
33# targets 32# targets
34obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
35obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o 33obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
deleted file mode 100644
index a34c9e4c792c..000000000000
--- a/net/ipv6/netfilter/ip6_queue.c
+++ /dev/null
@@ -1,641 +0,0 @@
1/*
2 * This is a module which is used for queueing IPv6 packets and
3 * communicating with userspace via netlink.
4 *
5 * (C) 2001 Fernando Anton, this code is GPL.
6 * IPv64 Project - Work based in IPv64 draft by Arturo Azcorra.
7 * Universidad Carlos III de Madrid - Leganes (Madrid) - Spain
8 * Universidad Politecnica de Alcala de Henares - Alcala de H. (Madrid) - Spain
9 * email: fanton@it.uc3m.es
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#include <linux/module.h>
16#include <linux/skbuff.h>
17#include <linux/init.h>
18#include <linux/ipv6.h>
19#include <linux/notifier.h>
20#include <linux/netdevice.h>
21#include <linux/netfilter.h>
22#include <linux/netlink.h>
23#include <linux/spinlock.h>
24#include <linux/sysctl.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/mutex.h>
28#include <linux/slab.h>
29#include <net/net_namespace.h>
30#include <net/sock.h>
31#include <net/ipv6.h>
32#include <net/ip6_route.h>
33#include <net/netfilter/nf_queue.h>
34#include <linux/netfilter_ipv4/ip_queue.h>
35#include <linux/netfilter_ipv4/ip_tables.h>
36#include <linux/netfilter_ipv6/ip6_tables.h>
37
38#define IPQ_QMAX_DEFAULT 1024
39#define IPQ_PROC_FS_NAME "ip6_queue"
40#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen"
41
42typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
43
44static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
45static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
46static DEFINE_SPINLOCK(queue_lock);
47static int peer_pid __read_mostly;
48static unsigned int copy_range __read_mostly;
49static unsigned int queue_total;
50static unsigned int queue_dropped = 0;
51static unsigned int queue_user_dropped = 0;
52static struct sock *ipqnl __read_mostly;
53static LIST_HEAD(queue_list);
54static DEFINE_MUTEX(ipqnl_mutex);
55
56static inline void
57__ipq_enqueue_entry(struct nf_queue_entry *entry)
58{
59 list_add_tail(&entry->list, &queue_list);
60 queue_total++;
61}
62
63static inline int
64__ipq_set_mode(unsigned char mode, unsigned int range)
65{
66 int status = 0;
67
68 switch(mode) {
69 case IPQ_COPY_NONE:
70 case IPQ_COPY_META:
71 copy_mode = mode;
72 copy_range = 0;
73 break;
74
75 case IPQ_COPY_PACKET:
76 if (range > 0xFFFF)
77 range = 0xFFFF;
78 copy_range = range;
79 copy_mode = mode;
80 break;
81
82 default:
83 status = -EINVAL;
84
85 }
86 return status;
87}
88
89static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
90
91static inline void
92__ipq_reset(void)
93{
94 peer_pid = 0;
95 net_disable_timestamp();
96 __ipq_set_mode(IPQ_COPY_NONE, 0);
97 __ipq_flush(NULL, 0);
98}
99
100static struct nf_queue_entry *
101ipq_find_dequeue_entry(unsigned long id)
102{
103 struct nf_queue_entry *entry = NULL, *i;
104
105 spin_lock_bh(&queue_lock);
106
107 list_for_each_entry(i, &queue_list, list) {
108 if ((unsigned long)i == id) {
109 entry = i;
110 break;
111 }
112 }
113
114 if (entry) {
115 list_del(&entry->list);
116 queue_total--;
117 }
118
119 spin_unlock_bh(&queue_lock);
120 return entry;
121}
122
123static void
124__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
125{
126 struct nf_queue_entry *entry, *next;
127
128 list_for_each_entry_safe(entry, next, &queue_list, list) {
129 if (!cmpfn || cmpfn(entry, data)) {
130 list_del(&entry->list);
131 queue_total--;
132 nf_reinject(entry, NF_DROP);
133 }
134 }
135}
136
137static void
138ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
139{
140 spin_lock_bh(&queue_lock);
141 __ipq_flush(cmpfn, data);
142 spin_unlock_bh(&queue_lock);
143}
144
145static struct sk_buff *
146ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
147{
148 sk_buff_data_t old_tail;
149 size_t size = 0;
150 size_t data_len = 0;
151 struct sk_buff *skb;
152 struct ipq_packet_msg *pmsg;
153 struct nlmsghdr *nlh;
154 struct timeval tv;
155
156 switch (ACCESS_ONCE(copy_mode)) {
157 case IPQ_COPY_META:
158 case IPQ_COPY_NONE:
159 size = NLMSG_SPACE(sizeof(*pmsg));
160 break;
161
162 case IPQ_COPY_PACKET:
163 if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
164 (*errp = skb_checksum_help(entry->skb)))
165 return NULL;
166
167 data_len = ACCESS_ONCE(copy_range);
168 if (data_len == 0 || data_len > entry->skb->len)
169 data_len = entry->skb->len;
170
171 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
172 break;
173
174 default:
175 *errp = -EINVAL;
176 return NULL;
177 }
178
179 skb = alloc_skb(size, GFP_ATOMIC);
180 if (!skb)
181 goto nlmsg_failure;
182
183 old_tail = skb->tail;
184 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
185 pmsg = NLMSG_DATA(nlh);
186 memset(pmsg, 0, sizeof(*pmsg));
187
188 pmsg->packet_id = (unsigned long )entry;
189 pmsg->data_len = data_len;
190 tv = ktime_to_timeval(entry->skb->tstamp);
191 pmsg->timestamp_sec = tv.tv_sec;
192 pmsg->timestamp_usec = tv.tv_usec;
193 pmsg->mark = entry->skb->mark;
194 pmsg->hook = entry->hook;
195 pmsg->hw_protocol = entry->skb->protocol;
196
197 if (entry->indev)
198 strcpy(pmsg->indev_name, entry->indev->name);
199 else
200 pmsg->indev_name[0] = '\0';
201
202 if (entry->outdev)
203 strcpy(pmsg->outdev_name, entry->outdev->name);
204 else
205 pmsg->outdev_name[0] = '\0';
206
207 if (entry->indev && entry->skb->dev &&
208 entry->skb->mac_header != entry->skb->network_header) {
209 pmsg->hw_type = entry->skb->dev->type;
210 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
211 }
212
213 if (data_len)
214 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
215 BUG();
216
217 nlh->nlmsg_len = skb->tail - old_tail;
218 return skb;
219
220nlmsg_failure:
221 kfree_skb(skb);
222 *errp = -EINVAL;
223 printk(KERN_ERR "ip6_queue: error creating packet message\n");
224 return NULL;
225}
226
227static int
228ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
229{
230 int status = -EINVAL;
231 struct sk_buff *nskb;
232
233 if (copy_mode == IPQ_COPY_NONE)
234 return -EAGAIN;
235
236 nskb = ipq_build_packet_message(entry, &status);
237 if (nskb == NULL)
238 return status;
239
240 spin_lock_bh(&queue_lock);
241
242 if (!peer_pid)
243 goto err_out_free_nskb;
244
245 if (queue_total >= queue_maxlen) {
246 queue_dropped++;
247 status = -ENOSPC;
248 if (net_ratelimit())
249 printk (KERN_WARNING "ip6_queue: fill at %d entries, "
250 "dropping packet(s). Dropped: %d\n", queue_total,
251 queue_dropped);
252 goto err_out_free_nskb;
253 }
254
255 /* netlink_unicast will either free the nskb or attach it to a socket */
256 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
257 if (status < 0) {
258 queue_user_dropped++;
259 goto err_out_unlock;
260 }
261
262 __ipq_enqueue_entry(entry);
263
264 spin_unlock_bh(&queue_lock);
265 return status;
266
267err_out_free_nskb:
268 kfree_skb(nskb);
269
270err_out_unlock:
271 spin_unlock_bh(&queue_lock);
272 return status;
273}
274
275static int
276ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
277{
278 int diff;
279 struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload;
280 struct sk_buff *nskb;
281
282 if (v->data_len < sizeof(*user_iph))
283 return 0;
284 diff = v->data_len - e->skb->len;
285 if (diff < 0) {
286 if (pskb_trim(e->skb, v->data_len))
287 return -ENOMEM;
288 } else if (diff > 0) {
289 if (v->data_len > 0xFFFF)
290 return -EINVAL;
291 if (diff > skb_tailroom(e->skb)) {
292 nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
293 diff, GFP_ATOMIC);
294 if (!nskb) {
295 printk(KERN_WARNING "ip6_queue: OOM "
296 "in mangle, dropping packet\n");
297 return -ENOMEM;
298 }
299 kfree_skb(e->skb);
300 e->skb = nskb;
301 }
302 skb_put(e->skb, diff);
303 }
304 if (!skb_make_writable(e->skb, v->data_len))
305 return -ENOMEM;
306 skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
307 e->skb->ip_summed = CHECKSUM_NONE;
308
309 return 0;
310}
311
312static int
313ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
314{
315 struct nf_queue_entry *entry;
316
317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
318 return -EINVAL;
319
320 entry = ipq_find_dequeue_entry(vmsg->id);
321 if (entry == NULL)
322 return -ENOENT;
323 else {
324 int verdict = vmsg->value;
325
326 if (vmsg->data_len && vmsg->data_len == len)
327 if (ipq_mangle_ipv6(vmsg, entry) < 0)
328 verdict = NF_DROP;
329
330 nf_reinject(entry, verdict);
331 return 0;
332 }
333}
334
335static int
336ipq_set_mode(unsigned char mode, unsigned int range)
337{
338 int status;
339
340 spin_lock_bh(&queue_lock);
341 status = __ipq_set_mode(mode, range);
342 spin_unlock_bh(&queue_lock);
343 return status;
344}
345
346static int
347ipq_receive_peer(struct ipq_peer_msg *pmsg,
348 unsigned char type, unsigned int len)
349{
350 int status = 0;
351
352 if (len < sizeof(*pmsg))
353 return -EINVAL;
354
355 switch (type) {
356 case IPQM_MODE:
357 status = ipq_set_mode(pmsg->msg.mode.value,
358 pmsg->msg.mode.range);
359 break;
360
361 case IPQM_VERDICT:
362 status = ipq_set_verdict(&pmsg->msg.verdict,
363 len - sizeof(*pmsg));
364 break;
365 default:
366 status = -EINVAL;
367 }
368 return status;
369}
370
371static int
372dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
373{
374 if (entry->indev)
375 if (entry->indev->ifindex == ifindex)
376 return 1;
377
378 if (entry->outdev)
379 if (entry->outdev->ifindex == ifindex)
380 return 1;
381#ifdef CONFIG_BRIDGE_NETFILTER
382 if (entry->skb->nf_bridge) {
383 if (entry->skb->nf_bridge->physindev &&
384 entry->skb->nf_bridge->physindev->ifindex == ifindex)
385 return 1;
386 if (entry->skb->nf_bridge->physoutdev &&
387 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
388 return 1;
389 }
390#endif
391 return 0;
392}
393
394static void
395ipq_dev_drop(int ifindex)
396{
397 ipq_flush(dev_cmp, ifindex);
398}
399
400#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
401
402static inline void
403__ipq_rcv_skb(struct sk_buff *skb)
404{
405 int status, type, pid, flags;
406 unsigned int nlmsglen, skblen;
407 struct nlmsghdr *nlh;
408 bool enable_timestamp = false;
409
410 skblen = skb->len;
411 if (skblen < sizeof(*nlh))
412 return;
413
414 nlh = nlmsg_hdr(skb);
415 nlmsglen = nlh->nlmsg_len;
416 if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
417 return;
418
419 pid = nlh->nlmsg_pid;
420 flags = nlh->nlmsg_flags;
421
422 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
423 RCV_SKB_FAIL(-EINVAL);
424
425 if (flags & MSG_TRUNC)
426 RCV_SKB_FAIL(-ECOMM);
427
428 type = nlh->nlmsg_type;
429 if (type < NLMSG_NOOP || type >= IPQM_MAX)
430 RCV_SKB_FAIL(-EINVAL);
431
432 if (type <= IPQM_BASE)
433 return;
434
435 if (!capable(CAP_NET_ADMIN))
436 RCV_SKB_FAIL(-EPERM);
437
438 spin_lock_bh(&queue_lock);
439
440 if (peer_pid) {
441 if (peer_pid != pid) {
442 spin_unlock_bh(&queue_lock);
443 RCV_SKB_FAIL(-EBUSY);
444 }
445 } else {
446 enable_timestamp = true;
447 peer_pid = pid;
448 }
449
450 spin_unlock_bh(&queue_lock);
451 if (enable_timestamp)
452 net_enable_timestamp();
453
454 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
455 nlmsglen - NLMSG_LENGTH(0));
456 if (status < 0)
457 RCV_SKB_FAIL(status);
458
459 if (flags & NLM_F_ACK)
460 netlink_ack(skb, nlh, 0);
461}
462
463static void
464ipq_rcv_skb(struct sk_buff *skb)
465{
466 mutex_lock(&ipqnl_mutex);
467 __ipq_rcv_skb(skb);
468 mutex_unlock(&ipqnl_mutex);
469}
470
471static int
472ipq_rcv_dev_event(struct notifier_block *this,
473 unsigned long event, void *ptr)
474{
475 struct net_device *dev = ptr;
476
477 if (!net_eq(dev_net(dev), &init_net))
478 return NOTIFY_DONE;
479
480 /* Drop any packets associated with the downed device */
481 if (event == NETDEV_DOWN)
482 ipq_dev_drop(dev->ifindex);
483 return NOTIFY_DONE;
484}
485
486static struct notifier_block ipq_dev_notifier = {
487 .notifier_call = ipq_rcv_dev_event,
488};
489
490static int
491ipq_rcv_nl_event(struct notifier_block *this,
492 unsigned long event, void *ptr)
493{
494 struct netlink_notify *n = ptr;
495
496 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
497 spin_lock_bh(&queue_lock);
498 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
499 __ipq_reset();
500 spin_unlock_bh(&queue_lock);
501 }
502 return NOTIFY_DONE;
503}
504
505static struct notifier_block ipq_nl_notifier = {
506 .notifier_call = ipq_rcv_nl_event,
507};
508
509#ifdef CONFIG_SYSCTL
510static struct ctl_table_header *ipq_sysctl_header;
511
512static ctl_table ipq_table[] = {
513 {
514 .procname = NET_IPQ_QMAX_NAME,
515 .data = &queue_maxlen,
516 .maxlen = sizeof(queue_maxlen),
517 .mode = 0644,
518 .proc_handler = proc_dointvec
519 },
520 { }
521};
522#endif
523
524#ifdef CONFIG_PROC_FS
525static int ip6_queue_show(struct seq_file *m, void *v)
526{
527 spin_lock_bh(&queue_lock);
528
529 seq_printf(m,
530 "Peer PID : %d\n"
531 "Copy mode : %hu\n"
532 "Copy range : %u\n"
533 "Queue length : %u\n"
534 "Queue max. length : %u\n"
535 "Queue dropped : %u\n"
536 "Netfilter dropped : %u\n",
537 peer_pid,
538 copy_mode,
539 copy_range,
540 queue_total,
541 queue_maxlen,
542 queue_dropped,
543 queue_user_dropped);
544
545 spin_unlock_bh(&queue_lock);
546 return 0;
547}
548
549static int ip6_queue_open(struct inode *inode, struct file *file)
550{
551 return single_open(file, ip6_queue_show, NULL);
552}
553
554static const struct file_operations ip6_queue_proc_fops = {
555 .open = ip6_queue_open,
556 .read = seq_read,
557 .llseek = seq_lseek,
558 .release = single_release,
559 .owner = THIS_MODULE,
560};
561#endif
562
563static const struct nf_queue_handler nfqh = {
564 .name = "ip6_queue",
565 .outfn = &ipq_enqueue_packet,
566};
567
568static int __init ip6_queue_init(void)
569{
570 int status = -ENOMEM;
571 struct proc_dir_entry *proc __maybe_unused;
572
573 netlink_register_notifier(&ipq_nl_notifier);
574 ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0,
575 ipq_rcv_skb, NULL, THIS_MODULE);
576 if (ipqnl == NULL) {
577 printk(KERN_ERR "ip6_queue: failed to create netlink socket\n");
578 goto cleanup_netlink_notifier;
579 }
580
581#ifdef CONFIG_PROC_FS
582 proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
583 &ip6_queue_proc_fops);
584 if (!proc) {
585 printk(KERN_ERR "ip6_queue: failed to create proc entry\n");
586 goto cleanup_ipqnl;
587 }
588#endif
589 register_netdevice_notifier(&ipq_dev_notifier);
590#ifdef CONFIG_SYSCTL
591 ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table);
592#endif
593 status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh);
594 if (status < 0) {
595 printk(KERN_ERR "ip6_queue: failed to register queue handler\n");
596 goto cleanup_sysctl;
597 }
598 return status;
599
600cleanup_sysctl:
601#ifdef CONFIG_SYSCTL
602 unregister_sysctl_table(ipq_sysctl_header);
603#endif
604 unregister_netdevice_notifier(&ipq_dev_notifier);
605 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
606
607cleanup_ipqnl: __maybe_unused
608 netlink_kernel_release(ipqnl);
609 mutex_lock(&ipqnl_mutex);
610 mutex_unlock(&ipqnl_mutex);
611
612cleanup_netlink_notifier:
613 netlink_unregister_notifier(&ipq_nl_notifier);
614 return status;
615}
616
617static void __exit ip6_queue_fini(void)
618{
619 nf_unregister_queue_handlers(&nfqh);
620
621 ipq_flush(NULL, 0);
622
623#ifdef CONFIG_SYSCTL
624 unregister_sysctl_table(ipq_sysctl_header);
625#endif
626 unregister_netdevice_notifier(&ipq_dev_notifier);
627 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
628
629 netlink_kernel_release(ipqnl);
630 mutex_lock(&ipqnl_mutex);
631 mutex_unlock(&ipqnl_mutex);
632
633 netlink_unregister_notifier(&ipq_nl_notifier);
634}
635
636MODULE_DESCRIPTION("IPv6 packet queue handler");
637MODULE_LICENSE("GPL");
638MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_IP6_FW);
639
640module_init(ip6_queue_init);
641module_exit(ip6_queue_fini);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0bdcdc..d7cb04506c3d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -78,19 +78,6 @@ EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 78
79 Hence the start of any table is given by get_table() below. */ 79 Hence the start of any table is given by get_table() below. */
80 80
81/* Check for an extension */
82int
83ip6t_ext_hdr(u8 nexthdr)
84{
85 return (nexthdr == IPPROTO_HOPOPTS) ||
86 (nexthdr == IPPROTO_ROUTING) ||
87 (nexthdr == IPPROTO_FRAGMENT) ||
88 (nexthdr == IPPROTO_ESP) ||
89 (nexthdr == IPPROTO_AH) ||
90 (nexthdr == IPPROTO_NONE) ||
91 (nexthdr == IPPROTO_DSTOPTS);
92}
93
94/* Returns whether matches rule or not. */ 81/* Returns whether matches rule or not. */
95/* Performance critical - called for every packet */ 82/* Performance critical - called for every packet */
96static inline bool 83static inline bool
@@ -146,7 +133,7 @@ ip6_packet_match(const struct sk_buff *skb,
146 int protohdr; 133 int protohdr;
147 unsigned short _frag_off; 134 unsigned short _frag_off;
148 135
149 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); 136 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
150 if (protohdr < 0) { 137 if (protohdr < 0) {
151 if (_frag_off == 0) 138 if (_frag_off == 0)
152 *hotdrop = true; 139 *hotdrop = true;
@@ -194,8 +181,7 @@ ip6_checkentry(const struct ip6t_ip6 *ipv6)
194static unsigned int 181static unsigned int
195ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) 182ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
196{ 183{
197 if (net_ratelimit()) 184 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
198 pr_info("error: `%s'\n", (const char *)par->targinfo);
199 185
200 return NF_DROP; 186 return NF_DROP;
201} 187}
@@ -375,6 +361,7 @@ ip6t_do_table(struct sk_buff *skb,
375 const struct xt_entry_match *ematch; 361 const struct xt_entry_match *ematch;
376 362
377 IP_NF_ASSERT(e); 363 IP_NF_ASSERT(e);
364 acpar.thoff = 0;
378 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, 365 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
379 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { 366 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
380 no_match: 367 no_match:
@@ -409,7 +396,7 @@ ip6t_do_table(struct sk_buff *skb,
409 if (v < 0) { 396 if (v < 0) {
410 /* Pop from stack? */ 397 /* Pop from stack? */
411 if (v != XT_RETURN) { 398 if (v != XT_RETURN) {
412 verdict = (unsigned)(-v) - 1; 399 verdict = (unsigned int)(-v) - 1;
413 break; 400 break;
414 } 401 }
415 if (*stackptr <= origptr) 402 if (*stackptr <= origptr)
@@ -2291,6 +2278,10 @@ static void __exit ip6_tables_fini(void)
2291 * if target < 0. "last header" is transport protocol header, ESP, or 2278 * if target < 0. "last header" is transport protocol header, ESP, or
2292 * "No next header". 2279 * "No next header".
2293 * 2280 *
2281 * Note that *offset is used as input/output parameter. an if it is not zero,
2282 * then it must be a valid offset to an inner IPv6 header. This can be used
2283 * to explore inner IPv6 header, eg. ICMPv6 error messages.
2284 *
2294 * If target header is found, its offset is set in *offset and return protocol 2285 * If target header is found, its offset is set in *offset and return protocol
2295 * number. Otherwise, return -1. 2286 * number. Otherwise, return -1.
2296 * 2287 *
@@ -2302,17 +2293,33 @@ static void __exit ip6_tables_fini(void)
2302 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff 2293 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2303 * isn't NULL. 2294 * isn't NULL.
2304 * 2295 *
2296 * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG
2297 * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and
2298 * target < 0, then this function will stop at the AH header.
2305 */ 2299 */
2306int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, 2300int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2307 int target, unsigned short *fragoff) 2301 int target, unsigned short *fragoff, int *flags)
2308{ 2302{
2309 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); 2303 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2310 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 2304 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2311 unsigned int len = skb->len - start; 2305 unsigned int len;
2312 2306
2313 if (fragoff) 2307 if (fragoff)
2314 *fragoff = 0; 2308 *fragoff = 0;
2315 2309
2310 if (*offset) {
2311 struct ipv6hdr _ip6, *ip6;
2312
2313 ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
2314 if (!ip6 || (ip6->version != 6)) {
2315 printk(KERN_ERR "IPv6 header not found\n");
2316 return -EBADMSG;
2317 }
2318 start = *offset + sizeof(struct ipv6hdr);
2319 nexthdr = ip6->nexthdr;
2320 }
2321 len = skb->len - start;
2322
2316 while (nexthdr != target) { 2323 while (nexthdr != target) {
2317 struct ipv6_opt_hdr _hdr, *hp; 2324 struct ipv6_opt_hdr _hdr, *hp;
2318 unsigned int hdrlen; 2325 unsigned int hdrlen;
@@ -2329,6 +2336,9 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2329 if (nexthdr == NEXTHDR_FRAGMENT) { 2336 if (nexthdr == NEXTHDR_FRAGMENT) {
2330 unsigned short _frag_off; 2337 unsigned short _frag_off;
2331 __be16 *fp; 2338 __be16 *fp;
2339
2340 if (flags) /* Indicate that this is a fragment */
2341 *flags |= IP6T_FH_F_FRAG;
2332 fp = skb_header_pointer(skb, 2342 fp = skb_header_pointer(skb,
2333 start+offsetof(struct frag_hdr, 2343 start+offsetof(struct frag_hdr,
2334 frag_off), 2344 frag_off),
@@ -2349,9 +2359,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2349 return -ENOENT; 2359 return -ENOENT;
2350 } 2360 }
2351 hdrlen = 8; 2361 hdrlen = 8;
2352 } else if (nexthdr == NEXTHDR_AUTH) 2362 } else if (nexthdr == NEXTHDR_AUTH) {
2363 if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0))
2364 break;
2353 hdrlen = (hp->hdrlen + 2) << 2; 2365 hdrlen = (hp->hdrlen + 2) << 2;
2354 else 2366 } else
2355 hdrlen = ipv6_optlen(hp); 2367 hdrlen = ipv6_optlen(hp);
2356 2368
2357 nexthdr = hp->nexthdr; 2369 nexthdr = hp->nexthdr;
@@ -2366,7 +2378,6 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2366EXPORT_SYMBOL(ip6t_register_table); 2378EXPORT_SYMBOL(ip6t_register_table);
2367EXPORT_SYMBOL(ip6t_unregister_table); 2379EXPORT_SYMBOL(ip6t_unregister_table);
2368EXPORT_SYMBOL(ip6t_do_table); 2380EXPORT_SYMBOL(ip6t_do_table);
2369EXPORT_SYMBOL(ip6t_ext_hdr);
2370EXPORT_SYMBOL(ipv6_find_hdr); 2381EXPORT_SYMBOL(ipv6_find_hdr);
2371 2382
2372module_init(ip6_tables_init); 2383module_init(ip6_tables_init);
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
deleted file mode 100644
index e6af8d72f26b..000000000000
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ /dev/null
@@ -1,527 +0,0 @@
1/*
2 * This is a module which is used for logging packets.
3 */
4
5/* (C) 2001 Jan Rekorajski <baggins@pld.org.pl>
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/if_arp.h>
16#include <linux/ip.h>
17#include <linux/spinlock.h>
18#include <linux/icmpv6.h>
19#include <net/udp.h>
20#include <net/tcp.h>
21#include <net/ipv6.h>
22#include <linux/netfilter.h>
23#include <linux/netfilter/x_tables.h>
24#include <linux/netfilter_ipv6/ip6_tables.h>
25#include <net/netfilter/nf_log.h>
26#include <net/netfilter/xt_log.h>
27
28MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
29MODULE_DESCRIPTION("Xtables: IPv6 packet logging to syslog");
30MODULE_LICENSE("GPL");
31
32struct in_device;
33#include <net/route.h>
34#include <linux/netfilter_ipv6/ip6t_LOG.h>
35
36/* One level of recursion won't kill us */
37static void dump_packet(struct sbuff *m,
38 const struct nf_loginfo *info,
39 const struct sk_buff *skb, unsigned int ip6hoff,
40 int recurse)
41{
42 u_int8_t currenthdr;
43 int fragment;
44 struct ipv6hdr _ip6h;
45 const struct ipv6hdr *ih;
46 unsigned int ptr;
47 unsigned int hdrlen = 0;
48 unsigned int logflags;
49
50 if (info->type == NF_LOG_TYPE_LOG)
51 logflags = info->u.log.logflags;
52 else
53 logflags = NF_LOG_MASK;
54
55 ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
56 if (ih == NULL) {
57 sb_add(m, "TRUNCATED");
58 return;
59 }
60
61 /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
62 sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
63
64 /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
65 sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
66 ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
67 (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
68 ih->hop_limit,
69 (ntohl(*(__be32 *)ih) & 0x000fffff));
70
71 fragment = 0;
72 ptr = ip6hoff + sizeof(struct ipv6hdr);
73 currenthdr = ih->nexthdr;
74 while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
75 struct ipv6_opt_hdr _hdr;
76 const struct ipv6_opt_hdr *hp;
77
78 hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
79 if (hp == NULL) {
80 sb_add(m, "TRUNCATED");
81 return;
82 }
83
84 /* Max length: 48 "OPT (...) " */
85 if (logflags & IP6T_LOG_IPOPT)
86 sb_add(m, "OPT ( ");
87
88 switch (currenthdr) {
89 case IPPROTO_FRAGMENT: {
90 struct frag_hdr _fhdr;
91 const struct frag_hdr *fh;
92
93 sb_add(m, "FRAG:");
94 fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
95 &_fhdr);
96 if (fh == NULL) {
97 sb_add(m, "TRUNCATED ");
98 return;
99 }
100
101 /* Max length: 6 "65535 " */
102 sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
103
104 /* Max length: 11 "INCOMPLETE " */
105 if (fh->frag_off & htons(0x0001))
106 sb_add(m, "INCOMPLETE ");
107
108 sb_add(m, "ID:%08x ", ntohl(fh->identification));
109
110 if (ntohs(fh->frag_off) & 0xFFF8)
111 fragment = 1;
112
113 hdrlen = 8;
114
115 break;
116 }
117 case IPPROTO_DSTOPTS:
118 case IPPROTO_ROUTING:
119 case IPPROTO_HOPOPTS:
120 if (fragment) {
121 if (logflags & IP6T_LOG_IPOPT)
122 sb_add(m, ")");
123 return;
124 }
125 hdrlen = ipv6_optlen(hp);
126 break;
127 /* Max Length */
128 case IPPROTO_AH:
129 if (logflags & IP6T_LOG_IPOPT) {
130 struct ip_auth_hdr _ahdr;
131 const struct ip_auth_hdr *ah;
132
133 /* Max length: 3 "AH " */
134 sb_add(m, "AH ");
135
136 if (fragment) {
137 sb_add(m, ")");
138 return;
139 }
140
141 ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
142 &_ahdr);
143 if (ah == NULL) {
144 /*
145 * Max length: 26 "INCOMPLETE [65535
146 * bytes] )"
147 */
148 sb_add(m, "INCOMPLETE [%u bytes] )",
149 skb->len - ptr);
150 return;
151 }
152
153 /* Length: 15 "SPI=0xF1234567 */
154 sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
155
156 }
157
158 hdrlen = (hp->hdrlen+2)<<2;
159 break;
160 case IPPROTO_ESP:
161 if (logflags & IP6T_LOG_IPOPT) {
162 struct ip_esp_hdr _esph;
163 const struct ip_esp_hdr *eh;
164
165 /* Max length: 4 "ESP " */
166 sb_add(m, "ESP ");
167
168 if (fragment) {
169 sb_add(m, ")");
170 return;
171 }
172
173 /*
174 * Max length: 26 "INCOMPLETE [65535 bytes] )"
175 */
176 eh = skb_header_pointer(skb, ptr, sizeof(_esph),
177 &_esph);
178 if (eh == NULL) {
179 sb_add(m, "INCOMPLETE [%u bytes] )",
180 skb->len - ptr);
181 return;
182 }
183
184 /* Length: 16 "SPI=0xF1234567 )" */
185 sb_add(m, "SPI=0x%x )", ntohl(eh->spi) );
186
187 }
188 return;
189 default:
190 /* Max length: 20 "Unknown Ext Hdr 255" */
191 sb_add(m, "Unknown Ext Hdr %u", currenthdr);
192 return;
193 }
194 if (logflags & IP6T_LOG_IPOPT)
195 sb_add(m, ") ");
196
197 currenthdr = hp->nexthdr;
198 ptr += hdrlen;
199 }
200
201 switch (currenthdr) {
202 case IPPROTO_TCP: {
203 struct tcphdr _tcph;
204 const struct tcphdr *th;
205
206 /* Max length: 10 "PROTO=TCP " */
207 sb_add(m, "PROTO=TCP ");
208
209 if (fragment)
210 break;
211
212 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
213 th = skb_header_pointer(skb, ptr, sizeof(_tcph), &_tcph);
214 if (th == NULL) {
215 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
216 return;
217 }
218
219 /* Max length: 20 "SPT=65535 DPT=65535 " */
220 sb_add(m, "SPT=%u DPT=%u ",
221 ntohs(th->source), ntohs(th->dest));
222 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
223 if (logflags & IP6T_LOG_TCPSEQ)
224 sb_add(m, "SEQ=%u ACK=%u ",
225 ntohl(th->seq), ntohl(th->ack_seq));
226 /* Max length: 13 "WINDOW=65535 " */
227 sb_add(m, "WINDOW=%u ", ntohs(th->window));
228 /* Max length: 9 "RES=0x3C " */
229 sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
230 /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
231 if (th->cwr)
232 sb_add(m, "CWR ");
233 if (th->ece)
234 sb_add(m, "ECE ");
235 if (th->urg)
236 sb_add(m, "URG ");
237 if (th->ack)
238 sb_add(m, "ACK ");
239 if (th->psh)
240 sb_add(m, "PSH ");
241 if (th->rst)
242 sb_add(m, "RST ");
243 if (th->syn)
244 sb_add(m, "SYN ");
245 if (th->fin)
246 sb_add(m, "FIN ");
247 /* Max length: 11 "URGP=65535 " */
248 sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
249
250 if ((logflags & IP6T_LOG_TCPOPT) &&
251 th->doff * 4 > sizeof(struct tcphdr)) {
252 u_int8_t _opt[60 - sizeof(struct tcphdr)];
253 const u_int8_t *op;
254 unsigned int i;
255 unsigned int optsize = th->doff * 4
256 - sizeof(struct tcphdr);
257
258 op = skb_header_pointer(skb,
259 ptr + sizeof(struct tcphdr),
260 optsize, _opt);
261 if (op == NULL) {
262 sb_add(m, "OPT (TRUNCATED)");
263 return;
264 }
265
266 /* Max length: 127 "OPT (" 15*4*2chars ") " */
267 sb_add(m, "OPT (");
268 for (i =0; i < optsize; i++)
269 sb_add(m, "%02X", op[i]);
270 sb_add(m, ") ");
271 }
272 break;
273 }
274 case IPPROTO_UDP:
275 case IPPROTO_UDPLITE: {
276 struct udphdr _udph;
277 const struct udphdr *uh;
278
279 if (currenthdr == IPPROTO_UDP)
280 /* Max length: 10 "PROTO=UDP " */
281 sb_add(m, "PROTO=UDP " );
282 else /* Max length: 14 "PROTO=UDPLITE " */
283 sb_add(m, "PROTO=UDPLITE ");
284
285 if (fragment)
286 break;
287
288 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
289 uh = skb_header_pointer(skb, ptr, sizeof(_udph), &_udph);
290 if (uh == NULL) {
291 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
292 return;
293 }
294
295 /* Max length: 20 "SPT=65535 DPT=65535 " */
296 sb_add(m, "SPT=%u DPT=%u LEN=%u ",
297 ntohs(uh->source), ntohs(uh->dest),
298 ntohs(uh->len));
299 break;
300 }
301 case IPPROTO_ICMPV6: {
302 struct icmp6hdr _icmp6h;
303 const struct icmp6hdr *ic;
304
305 /* Max length: 13 "PROTO=ICMPv6 " */
306 sb_add(m, "PROTO=ICMPv6 ");
307
308 if (fragment)
309 break;
310
311 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
312 ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
313 if (ic == NULL) {
314 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
315 return;
316 }
317
318 /* Max length: 18 "TYPE=255 CODE=255 " */
319 sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
320
321 switch (ic->icmp6_type) {
322 case ICMPV6_ECHO_REQUEST:
323 case ICMPV6_ECHO_REPLY:
324 /* Max length: 19 "ID=65535 SEQ=65535 " */
325 sb_add(m, "ID=%u SEQ=%u ",
326 ntohs(ic->icmp6_identifier),
327 ntohs(ic->icmp6_sequence));
328 break;
329 case ICMPV6_MGM_QUERY:
330 case ICMPV6_MGM_REPORT:
331 case ICMPV6_MGM_REDUCTION:
332 break;
333
334 case ICMPV6_PARAMPROB:
335 /* Max length: 17 "POINTER=ffffffff " */
336 sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
337 /* Fall through */
338 case ICMPV6_DEST_UNREACH:
339 case ICMPV6_PKT_TOOBIG:
340 case ICMPV6_TIME_EXCEED:
341 /* Max length: 3+maxlen */
342 if (recurse) {
343 sb_add(m, "[");
344 dump_packet(m, info, skb,
345 ptr + sizeof(_icmp6h), 0);
346 sb_add(m, "] ");
347 }
348
349 /* Max length: 10 "MTU=65535 " */
350 if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
351 sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
352 }
353 break;
354 }
355 /* Max length: 10 "PROTO=255 " */
356 default:
357 sb_add(m, "PROTO=%u ", currenthdr);
358 }
359
360 /* Max length: 15 "UID=4294967295 " */
361 if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
362 read_lock_bh(&skb->sk->sk_callback_lock);
363 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
364 sb_add(m, "UID=%u GID=%u ",
365 skb->sk->sk_socket->file->f_cred->fsuid,
366 skb->sk->sk_socket->file->f_cred->fsgid);
367 read_unlock_bh(&skb->sk->sk_callback_lock);
368 }
369
370 /* Max length: 16 "MARK=0xFFFFFFFF " */
371 if (!recurse && skb->mark)
372 sb_add(m, "MARK=0x%x ", skb->mark);
373}
374
375static void dump_mac_header(struct sbuff *m,
376 const struct nf_loginfo *info,
377 const struct sk_buff *skb)
378{
379 struct net_device *dev = skb->dev;
380 unsigned int logflags = 0;
381
382 if (info->type == NF_LOG_TYPE_LOG)
383 logflags = info->u.log.logflags;
384
385 if (!(logflags & IP6T_LOG_MACDECODE))
386 goto fallback;
387
388 switch (dev->type) {
389 case ARPHRD_ETHER:
390 sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
391 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
392 ntohs(eth_hdr(skb)->h_proto));
393 return;
394 default:
395 break;
396 }
397
398fallback:
399 sb_add(m, "MAC=");
400 if (dev->hard_header_len &&
401 skb->mac_header != skb->network_header) {
402 const unsigned char *p = skb_mac_header(skb);
403 unsigned int len = dev->hard_header_len;
404 unsigned int i;
405
406 if (dev->type == ARPHRD_SIT &&
407 (p -= ETH_HLEN) < skb->head)
408 p = NULL;
409
410 if (p != NULL) {
411 sb_add(m, "%02x", *p++);
412 for (i = 1; i < len; i++)
413 sb_add(m, ":%02x", *p++);
414 }
415 sb_add(m, " ");
416
417 if (dev->type == ARPHRD_SIT) {
418 const struct iphdr *iph =
419 (struct iphdr *)skb_mac_header(skb);
420 sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr);
421 }
422 } else
423 sb_add(m, " ");
424}
425
426static struct nf_loginfo default_loginfo = {
427 .type = NF_LOG_TYPE_LOG,
428 .u = {
429 .log = {
430 .level = 5,
431 .logflags = NF_LOG_MASK,
432 },
433 },
434};
435
436static void
437ip6t_log_packet(u_int8_t pf,
438 unsigned int hooknum,
439 const struct sk_buff *skb,
440 const struct net_device *in,
441 const struct net_device *out,
442 const struct nf_loginfo *loginfo,
443 const char *prefix)
444{
445 struct sbuff *m = sb_open();
446
447 if (!loginfo)
448 loginfo = &default_loginfo;
449
450 sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
451 prefix,
452 in ? in->name : "",
453 out ? out->name : "");
454
455 if (in != NULL)
456 dump_mac_header(m, loginfo, skb);
457
458 dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
459
460 sb_close(m);
461}
462
463static unsigned int
464log_tg6(struct sk_buff *skb, const struct xt_action_param *par)
465{
466 const struct ip6t_log_info *loginfo = par->targinfo;
467 struct nf_loginfo li;
468
469 li.type = NF_LOG_TYPE_LOG;
470 li.u.log.level = loginfo->level;
471 li.u.log.logflags = loginfo->logflags;
472
473 ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in, par->out,
474 &li, loginfo->prefix);
475 return XT_CONTINUE;
476}
477
478
479static int log_tg6_check(const struct xt_tgchk_param *par)
480{
481 const struct ip6t_log_info *loginfo = par->targinfo;
482
483 if (loginfo->level >= 8) {
484 pr_debug("level %u >= 8\n", loginfo->level);
485 return -EINVAL;
486 }
487 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
488 pr_debug("prefix not null-terminated\n");
489 return -EINVAL;
490 }
491 return 0;
492}
493
494static struct xt_target log_tg6_reg __read_mostly = {
495 .name = "LOG",
496 .family = NFPROTO_IPV6,
497 .target = log_tg6,
498 .targetsize = sizeof(struct ip6t_log_info),
499 .checkentry = log_tg6_check,
500 .me = THIS_MODULE,
501};
502
503static struct nf_logger ip6t_logger __read_mostly = {
504 .name = "ip6t_LOG",
505 .logfn = &ip6t_log_packet,
506 .me = THIS_MODULE,
507};
508
509static int __init log_tg6_init(void)
510{
511 int ret;
512
513 ret = xt_register_target(&log_tg6_reg);
514 if (ret < 0)
515 return ret;
516 nf_log_register(NFPROTO_IPV6, &ip6t_logger);
517 return 0;
518}
519
520static void __exit log_tg6_exit(void)
521{
522 nf_log_unregister(&ip6t_logger);
523 xt_unregister_target(&log_tg6_reg);
524}
525
526module_init(log_tg6_init);
527module_exit(log_tg6_exit);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index aad2fa41cf46..fd4fb34c51c7 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -114,8 +114,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
114 GFP_ATOMIC); 114 GFP_ATOMIC);
115 115
116 if (!nskb) { 116 if (!nskb) {
117 if (net_ratelimit()) 117 net_dbg_ratelimited("cannot alloc skb\n");
118 pr_debug("cannot alloc skb\n");
119 dst_release(dst); 118 dst_release(dst);
120 return; 119 return;
121 } 120 }
@@ -210,8 +209,7 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
210 send_reset(net, skb); 209 send_reset(net, skb);
211 break; 210 break;
212 default: 211 default:
213 if (net_ratelimit()) 212 net_info_ratelimited("case %u not handled yet\n", reject->with);
214 pr_info("case %u not handled yet\n", reject->with);
215 break; 213 break;
216 } 214 }
217 215
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 89cccc5a9c92..04099ab7d2e3 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -41,11 +41,11 @@ static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par)
41 struct ip_auth_hdr _ah; 41 struct ip_auth_hdr _ah;
42 const struct ip_auth_hdr *ah; 42 const struct ip_auth_hdr *ah;
43 const struct ip6t_ah *ahinfo = par->matchinfo; 43 const struct ip6t_ah *ahinfo = par->matchinfo;
44 unsigned int ptr; 44 unsigned int ptr = 0;
45 unsigned int hdrlen = 0; 45 unsigned int hdrlen = 0;
46 int err; 46 int err;
47 47
48 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL); 48 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL, NULL);
49 if (err < 0) { 49 if (err < 0) {
50 if (err != -ENOENT) 50 if (err != -ENOENT)
51 par->hotdrop = true; 51 par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index eda898fda6ca..3b5735e56bfe 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -40,10 +40,10 @@ frag_mt6(const struct sk_buff *skb, struct xt_action_param *par)
40 struct frag_hdr _frag; 40 struct frag_hdr _frag;
41 const struct frag_hdr *fh; 41 const struct frag_hdr *fh;
42 const struct ip6t_frag *fraginfo = par->matchinfo; 42 const struct ip6t_frag *fraginfo = par->matchinfo;
43 unsigned int ptr; 43 unsigned int ptr = 0;
44 int err; 44 int err;
45 45
46 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL); 46 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL);
47 if (err < 0) { 47 if (err < 0) {
48 if (err != -ENOENT) 48 if (err != -ENOENT)
49 par->hotdrop = true; 49 par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 59df051eaef6..01df142bb027 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -50,7 +50,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
50 const struct ipv6_opt_hdr *oh; 50 const struct ipv6_opt_hdr *oh;
51 const struct ip6t_opts *optinfo = par->matchinfo; 51 const struct ip6t_opts *optinfo = par->matchinfo;
52 unsigned int temp; 52 unsigned int temp;
53 unsigned int ptr; 53 unsigned int ptr = 0;
54 unsigned int hdrlen = 0; 54 unsigned int hdrlen = 0;
55 bool ret = false; 55 bool ret = false;
56 u8 _opttype; 56 u8 _opttype;
@@ -62,7 +62,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
62 62
63 err = ipv6_find_hdr(skb, &ptr, 63 err = ipv6_find_hdr(skb, &ptr,
64 (par->match == &hbh_mt6_reg[0]) ? 64 (par->match == &hbh_mt6_reg[0]) ?
65 NEXTHDR_HOP : NEXTHDR_DEST, NULL); 65 NEXTHDR_HOP : NEXTHDR_DEST, NULL, NULL);
66 if (err < 0) { 66 if (err < 0) {
67 if (err != -ENOENT) 67 if (err != -ENOENT)
68 par->hotdrop = true; 68 par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index d8488c50a8e0..2c99b94eeca3 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -42,14 +42,14 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
42 const struct ipv6_rt_hdr *rh; 42 const struct ipv6_rt_hdr *rh;
43 const struct ip6t_rt *rtinfo = par->matchinfo; 43 const struct ip6t_rt *rtinfo = par->matchinfo;
44 unsigned int temp; 44 unsigned int temp;
45 unsigned int ptr; 45 unsigned int ptr = 0;
46 unsigned int hdrlen = 0; 46 unsigned int hdrlen = 0;
47 bool ret = false; 47 bool ret = false;
48 struct in6_addr _addr; 48 struct in6_addr _addr;
49 const struct in6_addr *ap; 49 const struct in6_addr *ap;
50 int err; 50 int err;
51 51
52 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL); 52 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL, NULL);
53 if (err < 0) { 53 if (err < 0) {
54 if (err != -ENOENT) 54 if (err != -ENOENT)
55 par->hotdrop = true; 55 par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index a8f6da97e3b2..325e59a0224f 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -44,7 +44,7 @@ ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
44static struct nf_hook_ops *filter_ops __read_mostly; 44static struct nf_hook_ops *filter_ops __read_mostly;
45 45
46/* Default to forward because I got too much mail already. */ 46/* Default to forward because I got too much mail already. */
47static bool forward = NF_ACCEPT; 47static bool forward = true;
48module_param(forward, bool, 0000); 48module_param(forward, bool, 0000);
49 49
50static int __net_init ip6table_filter_net_init(struct net *net) 50static int __net_init ip6table_filter_net_init(struct net *net)
@@ -56,7 +56,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
56 return -ENOMEM; 56 return -ENOMEM;
57 /* Entry 1 is the FORWARD hook */ 57 /* Entry 1 is the FORWARD hook */
58 ((struct ip6t_standard *)repl->entries)[1].target.verdict = 58 ((struct ip6t_standard *)repl->entries)[1].target.verdict =
59 -forward - 1; 59 forward ? -NF_ACCEPT - 1 : -NF_DROP - 1;
60 60
61 net->ipv6.ip6table_filter = 61 net->ipv6.ip6table_filter =
62 ip6t_register_table(net, &packet_filter, repl); 62 ip6t_register_table(net, &packet_filter, repl);
@@ -80,11 +80,6 @@ static int __init ip6table_filter_init(void)
80{ 80{
81 int ret; 81 int ret;
82 82
83 if (forward < 0 || forward > NF_MAX_VERDICT) {
84 pr_err("iptables forward must be 0 or 1\n");
85 return -EINVAL;
86 }
87
88 ret = register_pernet_subsys(&ip6table_filter_net_ops); 83 ret = register_pernet_subsys(&ip6table_filter_net_ops);
89 if (ret < 0) 84 if (ret < 0)
90 return ret; 85 return ret;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 00d19173db7e..4d782405f125 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -42,8 +42,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
42 /* root is playing with raw sockets. */ 42 /* root is playing with raw sockets. */
43 if (skb->len < sizeof(struct iphdr) || 43 if (skb->len < sizeof(struct iphdr) ||
44 ip_hdrlen(skb) < sizeof(struct iphdr)) { 44 ip_hdrlen(skb) < sizeof(struct iphdr)) {
45 if (net_ratelimit()) 45 net_warn_ratelimited("ip6t_hook: happy cracking\n");
46 pr_warning("ip6t_hook: happy cracking.\n");
47 return NF_ACCEPT; 46 return NF_ACCEPT;
48 } 47 }
49#endif 48#endif
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4111050a9fc5..3224ef90a21a 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -232,8 +232,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
232{ 232{
233 /* root is playing with raw sockets. */ 233 /* root is playing with raw sockets. */
234 if (skb->len < sizeof(struct ipv6hdr)) { 234 if (skb->len < sizeof(struct ipv6hdr)) {
235 if (net_ratelimit()) 235 net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
236 pr_notice("ipv6_conntrack_local: packet too short\n");
237 return NF_ACCEPT; 236 return NF_ACCEPT;
238 } 237 }
239 return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn); 238 return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn);
@@ -278,10 +277,11 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
278static int ipv6_tuple_to_nlattr(struct sk_buff *skb, 277static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
279 const struct nf_conntrack_tuple *tuple) 278 const struct nf_conntrack_tuple *tuple)
280{ 279{
281 NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, 280 if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
282 &tuple->src.u3.ip6); 281 &tuple->src.u3.ip6) ||
283 NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, 282 nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
284 &tuple->dst.u3.ip6); 283 &tuple->dst.u3.ip6))
284 goto nla_put_failure;
285 return 0; 285 return 0;
286 286
287nla_put_failure: 287nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 7c05e7eacbc6..3e81904fbbcd 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -88,25 +88,31 @@ static int icmpv6_print_tuple(struct seq_file *s,
88 ntohs(tuple->src.u.icmp.id)); 88 ntohs(tuple->src.u.icmp.id));
89} 89}
90 90
91static unsigned int *icmpv6_get_timeouts(struct net *net)
92{
93 return &nf_ct_icmpv6_timeout;
94}
95
91/* Returns verdict for packet, or -1 for invalid. */ 96/* Returns verdict for packet, or -1 for invalid. */
92static int icmpv6_packet(struct nf_conn *ct, 97static int icmpv6_packet(struct nf_conn *ct,
93 const struct sk_buff *skb, 98 const struct sk_buff *skb,
94 unsigned int dataoff, 99 unsigned int dataoff,
95 enum ip_conntrack_info ctinfo, 100 enum ip_conntrack_info ctinfo,
96 u_int8_t pf, 101 u_int8_t pf,
97 unsigned int hooknum) 102 unsigned int hooknum,
103 unsigned int *timeout)
98{ 104{
99 /* Do not immediately delete the connection after the first 105 /* Do not immediately delete the connection after the first
100 successful reply to avoid excessive conntrackd traffic 106 successful reply to avoid excessive conntrackd traffic
101 and also to handle correctly ICMP echo reply duplicates. */ 107 and also to handle correctly ICMP echo reply duplicates. */
102 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout); 108 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
103 109
104 return NF_ACCEPT; 110 return NF_ACCEPT;
105} 111}
106 112
107/* Called when a new connection for this protocol found. */ 113/* Called when a new connection for this protocol found. */
108static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb, 114static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
109 unsigned int dataoff) 115 unsigned int dataoff, unsigned int *timeouts)
110{ 116{
111 static const u_int8_t valid_new[] = { 117 static const u_int8_t valid_new[] = {
112 [ICMPV6_ECHO_REQUEST - 128] = 1, 118 [ICMPV6_ECHO_REQUEST - 128] = 1,
@@ -228,10 +234,10 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
228static int icmpv6_tuple_to_nlattr(struct sk_buff *skb, 234static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
229 const struct nf_conntrack_tuple *t) 235 const struct nf_conntrack_tuple *t)
230{ 236{
231 NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id); 237 if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
232 NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type); 238 nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
233 NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code); 239 nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
234 240 goto nla_put_failure;
235 return 0; 241 return 0;
236 242
237nla_put_failure: 243nla_put_failure:
@@ -270,6 +276,44 @@ static int icmpv6_nlattr_tuple_size(void)
270} 276}
271#endif 277#endif
272 278
279#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
280
281#include <linux/netfilter/nfnetlink.h>
282#include <linux/netfilter/nfnetlink_cttimeout.h>
283
284static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
285{
286 unsigned int *timeout = data;
287
288 if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
289 *timeout =
290 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
291 } else {
292 /* Set default ICMPv6 timeout. */
293 *timeout = nf_ct_icmpv6_timeout;
294 }
295 return 0;
296}
297
298static int
299icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
300{
301 const unsigned int *timeout = data;
302
303 if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)))
304 goto nla_put_failure;
305 return 0;
306
307nla_put_failure:
308 return -ENOSPC;
309}
310
311static const struct nla_policy
312icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
313 [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
314};
315#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
316
273#ifdef CONFIG_SYSCTL 317#ifdef CONFIG_SYSCTL
274static struct ctl_table_header *icmpv6_sysctl_header; 318static struct ctl_table_header *icmpv6_sysctl_header;
275static struct ctl_table icmpv6_sysctl_table[] = { 319static struct ctl_table icmpv6_sysctl_table[] = {
@@ -293,6 +337,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
293 .invert_tuple = icmpv6_invert_tuple, 337 .invert_tuple = icmpv6_invert_tuple,
294 .print_tuple = icmpv6_print_tuple, 338 .print_tuple = icmpv6_print_tuple,
295 .packet = icmpv6_packet, 339 .packet = icmpv6_packet,
340 .get_timeouts = icmpv6_get_timeouts,
296 .new = icmpv6_new, 341 .new = icmpv6_new,
297 .error = icmpv6_error, 342 .error = icmpv6_error,
298#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 343#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
@@ -301,6 +346,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
301 .nlattr_to_tuple = icmpv6_nlattr_to_tuple, 346 .nlattr_to_tuple = icmpv6_nlattr_to_tuple,
302 .nla_policy = icmpv6_nla_policy, 347 .nla_policy = icmpv6_nla_policy,
303#endif 348#endif
349#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
350 .ctnl_timeout = {
351 .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
352 .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
353 .nlattr_max = CTA_TIMEOUT_ICMP_MAX,
354 .obj_size = sizeof(unsigned int),
355 .nla_policy = icmpv6_timeout_nla_policy,
356 },
357#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
304#ifdef CONFIG_SYSCTL 358#ifdef CONFIG_SYSCTL
305 .ctl_table_header = &icmpv6_sysctl_header, 359 .ctl_table_header = &icmpv6_sysctl_header,
306 .ctl_table = icmpv6_sysctl_table, 360 .ctl_table = icmpv6_sysctl_table,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 38f00b0298d3..c9c78c2e666b 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -444,12 +444,11 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
444 return head; 444 return head;
445 445
446out_oversize: 446out_oversize:
447 if (net_ratelimit()) 447 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
448 printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len); 448 payload_len);
449 goto out_fail; 449 goto out_fail;
450out_oom: 450out_oom:
451 if (net_ratelimit()) 451 net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n");
452 printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
453out_fail: 452out_fail:
454 return NULL; 453 return NULL;
455} 454}
@@ -626,8 +625,8 @@ int nf_ct_frag6_init(void)
626 inet_frags_init(&nf_frags); 625 inet_frags_init(&nf_frags);
627 626
628#ifdef CONFIG_SYSCTL 627#ifdef CONFIG_SYSCTL
629 nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path, 628 nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
630 nf_ct_frag6_sysctl_table); 629 nf_ct_frag6_sysctl_table);
631 if (!nf_ct_frag6_sysctl_header) { 630 if (!nf_ct_frag6_sysctl_header) {
632 inet_frags_fini(&nf_frags); 631 inet_frags_fini(&nf_frags);
633 return -ENOMEM; 632 return -ENOMEM;
@@ -640,7 +639,7 @@ int nf_ct_frag6_init(void)
640void nf_ct_frag6_cleanup(void) 639void nf_ct_frag6_cleanup(void)
641{ 640{
642#ifdef CONFIG_SYSCTL 641#ifdef CONFIG_SYSCTL
643 unregister_sysctl_table(nf_ct_frag6_sysctl_header); 642 unregister_net_sysctl_table(nf_ct_frag6_sysctl_header);
644 nf_ct_frag6_sysctl_header = NULL; 643 nf_ct_frag6_sysctl_header = NULL;
645#endif 644#endif
646 inet_frags_fini(&nf_frags); 645 inet_frags_fini(&nf_frags);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d02f7e4dd611..93d69836fded 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -72,7 +72,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
72 const struct in6_addr *rmt_addr, int dif) 72 const struct in6_addr *rmt_addr, int dif)
73{ 73{
74 struct hlist_node *node; 74 struct hlist_node *node;
75 int is_multicast = ipv6_addr_is_multicast(loc_addr); 75 bool is_multicast = ipv6_addr_is_multicast(loc_addr);
76 76
77 sk_for_each_from(sk, node) 77 sk_for_each_from(sk, node)
78 if (inet_sk(sk)->inet_num == num) { 78 if (inet_sk(sk)->inet_num == num) {
@@ -153,12 +153,12 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
153 * 153 *
154 * Caller owns SKB so we must make clones. 154 * Caller owns SKB so we must make clones.
155 */ 155 */
156static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) 156static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
157{ 157{
158 const struct in6_addr *saddr; 158 const struct in6_addr *saddr;
159 const struct in6_addr *daddr; 159 const struct in6_addr *daddr;
160 struct sock *sk; 160 struct sock *sk;
161 int delivered = 0; 161 bool delivered = false;
162 __u8 hash; 162 __u8 hash;
163 struct net *net; 163 struct net *net;
164 164
@@ -179,7 +179,7 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
179 while (sk) { 179 while (sk) {
180 int filtered; 180 int filtered;
181 181
182 delivered = 1; 182 delivered = true;
183 switch (nexthdr) { 183 switch (nexthdr) {
184 case IPPROTO_ICMPV6: 184 case IPPROTO_ICMPV6:
185 filtered = icmpv6_filter(sk, skb); 185 filtered = icmpv6_filter(sk, skb);
@@ -225,7 +225,7 @@ out:
225 return delivered; 225 return delivered;
226} 226}
227 227
228int raw6_local_deliver(struct sk_buff *skb, int nexthdr) 228bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
229{ 229{
230 struct sock *raw_sk; 230 struct sock *raw_sk;
231 231
@@ -856,6 +856,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
856 856
857 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 857 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
858 fl6.flowi6_oif = np->mcast_oif; 858 fl6.flowi6_oif = np->mcast_oif;
859 else if (!fl6.flowi6_oif)
860 fl6.flowi6_oif = np->ucast_oif;
859 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 861 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
860 862
861 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); 863 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index b69fae76a6f1..4ff9af628e72 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -134,15 +134,16 @@ static unsigned int ip6_hashfn(struct inet_frag_queue *q)
134 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); 134 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
135} 135}
136 136
137int ip6_frag_match(struct inet_frag_queue *q, void *a) 137bool ip6_frag_match(struct inet_frag_queue *q, void *a)
138{ 138{
139 struct frag_queue *fq; 139 struct frag_queue *fq;
140 struct ip6_create_arg *arg = a; 140 struct ip6_create_arg *arg = a;
141 141
142 fq = container_of(q, struct frag_queue, q); 142 fq = container_of(q, struct frag_queue, q);
143 return (fq->id == arg->id && fq->user == arg->user && 143 return fq->id == arg->id &&
144 ipv6_addr_equal(&fq->saddr, arg->src) && 144 fq->user == arg->user &&
145 ipv6_addr_equal(&fq->daddr, arg->dst)); 145 ipv6_addr_equal(&fq->saddr, arg->src) &&
146 ipv6_addr_equal(&fq->daddr, arg->dst);
146} 147}
147EXPORT_SYMBOL(ip6_frag_match); 148EXPORT_SYMBOL(ip6_frag_match);
148 149
@@ -336,12 +337,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
336 } 337 }
337 338
338found: 339found:
339 /* RFC5722, Section 4: 340 /* RFC5722, Section 4, amended by Errata ID : 3089
340 * When reassembling an IPv6 datagram, if 341 * When reassembling an IPv6 datagram, if
341 * one or more its constituent fragments is determined to be an 342 * one or more its constituent fragments is determined to be an
342 * overlapping fragment, the entire datagram (and any constituent 343 * overlapping fragment, the entire datagram (and any constituent
343 * fragments, including those not yet received) MUST be silently 344 * fragments) MUST be silently discarded.
344 * discarded.
345 */ 345 */
346 346
347 /* Check for overlap with preceding fragment. */ 347 /* Check for overlap with preceding fragment. */
@@ -415,6 +415,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
415 struct sk_buff *fp, *head = fq->q.fragments; 415 struct sk_buff *fp, *head = fq->q.fragments;
416 int payload_len; 416 int payload_len;
417 unsigned int nhoff; 417 unsigned int nhoff;
418 int sum_truesize;
418 419
419 fq_kill(fq); 420 fq_kill(fq);
420 421
@@ -434,7 +435,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
434 skb_morph(head, fq->q.fragments); 435 skb_morph(head, fq->q.fragments);
435 head->next = fq->q.fragments->next; 436 head->next = fq->q.fragments->next;
436 437
437 kfree_skb(fq->q.fragments); 438 consume_skb(fq->q.fragments);
438 fq->q.fragments = head; 439 fq->q.fragments = head;
439 } 440 }
440 441
@@ -484,20 +485,33 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
484 head->mac_header += sizeof(struct frag_hdr); 485 head->mac_header += sizeof(struct frag_hdr);
485 head->network_header += sizeof(struct frag_hdr); 486 head->network_header += sizeof(struct frag_hdr);
486 487
487 skb_shinfo(head)->frag_list = head->next;
488 skb_reset_transport_header(head); 488 skb_reset_transport_header(head);
489 skb_push(head, head->data - skb_network_header(head)); 489 skb_push(head, head->data - skb_network_header(head));
490 490
491 for (fp=head->next; fp; fp = fp->next) { 491 sum_truesize = head->truesize;
492 head->data_len += fp->len; 492 for (fp = head->next; fp;) {
493 head->len += fp->len; 493 bool headstolen;
494 int delta;
495 struct sk_buff *next = fp->next;
496
497 sum_truesize += fp->truesize;
494 if (head->ip_summed != fp->ip_summed) 498 if (head->ip_summed != fp->ip_summed)
495 head->ip_summed = CHECKSUM_NONE; 499 head->ip_summed = CHECKSUM_NONE;
496 else if (head->ip_summed == CHECKSUM_COMPLETE) 500 else if (head->ip_summed == CHECKSUM_COMPLETE)
497 head->csum = csum_add(head->csum, fp->csum); 501 head->csum = csum_add(head->csum, fp->csum);
498 head->truesize += fp->truesize; 502
503 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
504 kfree_skb_partial(fp, headstolen);
505 } else {
506 if (!skb_shinfo(head)->frag_list)
507 skb_shinfo(head)->frag_list = fp;
508 head->data_len += fp->len;
509 head->len += fp->len;
510 head->truesize += fp->truesize;
511 }
512 fp = next;
499 } 513 }
500 atomic_sub(head->truesize, &fq->q.net->mem); 514 atomic_sub(sum_truesize, &fq->q.net->mem);
501 515
502 head->next = NULL; 516 head->next = NULL;
503 head->dev = dev; 517 head->dev = dev;
@@ -519,12 +533,10 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
519 return 1; 533 return 1;
520 534
521out_oversize: 535out_oversize:
522 if (net_ratelimit()) 536 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
523 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
524 goto out_fail; 537 goto out_fail;
525out_oom: 538out_oom:
526 if (net_ratelimit()) 539 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
527 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
528out_fail: 540out_fail:
529 rcu_read_lock(); 541 rcu_read_lock();
530 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 542 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
@@ -647,7 +659,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
647 table[2].data = &net->ipv6.frags.timeout; 659 table[2].data = &net->ipv6.frags.timeout;
648 } 660 }
649 661
650 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 662 hdr = register_net_sysctl(net, "net/ipv6", table);
651 if (hdr == NULL) 663 if (hdr == NULL)
652 goto err_reg; 664 goto err_reg;
653 665
@@ -675,7 +687,7 @@ static struct ctl_table_header *ip6_ctl_header;
675 687
676static int ip6_frags_sysctl_register(void) 688static int ip6_frags_sysctl_register(void)
677{ 689{
678 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, 690 ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
679 ip6_frags_ctl_table); 691 ip6_frags_ctl_table);
680 return ip6_ctl_header == NULL ? -ENOMEM : 0; 692 return ip6_ctl_header == NULL ? -ENOMEM : 0;
681} 693}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 22b766407de1..999a982ad3fd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -24,6 +24,8 @@
24 * Fixed routing subtrees. 24 * Fixed routing subtrees.
25 */ 25 */
26 26
27#define pr_fmt(fmt) "IPv6: " fmt
28
27#include <linux/capability.h> 29#include <linux/capability.h>
28#include <linux/errno.h> 30#include <linux/errno.h>
29#include <linux/export.h> 31#include <linux/export.h>
@@ -62,7 +64,7 @@
62#include <linux/sysctl.h> 64#include <linux/sysctl.h>
63#endif 65#endif
64 66
65static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, 67static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
66 const struct in6_addr *dest); 68 const struct in6_addr *dest);
67static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 69static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
68static unsigned int ip6_default_advmss(const struct dst_entry *dst); 70static unsigned int ip6_default_advmss(const struct dst_entry *dst);
@@ -82,7 +84,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
82static struct rt6_info *rt6_add_route_info(struct net *net, 84static struct rt6_info *rt6_add_route_info(struct net *net,
83 const struct in6_addr *prefix, int prefixlen, 85 const struct in6_addr *prefix, int prefixlen,
84 const struct in6_addr *gwaddr, int ifindex, 86 const struct in6_addr *gwaddr, int ifindex,
85 unsigned pref); 87 unsigned int pref);
86static struct rt6_info *rt6_get_route_info(struct net *net, 88static struct rt6_info *rt6_get_route_info(struct net *net,
87 const struct in6_addr *prefix, int prefixlen, 89 const struct in6_addr *prefix, int prefixlen,
88 const struct in6_addr *gwaddr, int ifindex); 90 const struct in6_addr *gwaddr, int ifindex);
@@ -121,9 +123,22 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
121 return p; 123 return p;
122} 124}
123 125
126static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr)
127{
128 struct in6_addr *p = &rt->rt6i_gateway;
129
130 if (!ipv6_addr_any(p))
131 return (const void *) p;
132 return daddr;
133}
134
124static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr) 135static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
125{ 136{
126 struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); 137 struct rt6_info *rt = (struct rt6_info *) dst;
138 struct neighbour *n;
139
140 daddr = choose_neigh_daddr(rt, daddr);
141 n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
127 if (n) 142 if (n)
128 return n; 143 return n;
129 return neigh_create(&nd_tbl, daddr, dst->dev); 144 return neigh_create(&nd_tbl, daddr, dst->dev);
@@ -272,6 +287,10 @@ static void ip6_dst_destroy(struct dst_entry *dst)
272 rt->rt6i_idev = NULL; 287 rt->rt6i_idev = NULL;
273 in6_dev_put(idev); 288 in6_dev_put(idev);
274 } 289 }
290
291 if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
292 dst_release(dst->from);
293
275 if (peer) { 294 if (peer) {
276 rt->rt6i_peer = NULL; 295 rt->rt6i_peer = NULL;
277 inet_putpeer(peer); 296 inet_putpeer(peer);
@@ -314,13 +333,22 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
314 } 333 }
315} 334}
316 335
317static __inline__ int rt6_check_expired(const struct rt6_info *rt) 336static bool rt6_check_expired(const struct rt6_info *rt)
318{ 337{
319 return (rt->rt6i_flags & RTF_EXPIRES) && 338 struct rt6_info *ort = NULL;
320 time_after(jiffies, rt->dst.expires); 339
340 if (rt->rt6i_flags & RTF_EXPIRES) {
341 if (time_after(jiffies, rt->dst.expires))
342 return true;
343 } else if (rt->dst.from) {
344 ort = (struct rt6_info *) rt->dst.from;
345 return (ort->rt6i_flags & RTF_EXPIRES) &&
346 time_after(jiffies, ort->dst.expires);
347 }
348 return false;
321} 349}
322 350
323static inline int rt6_need_strict(const struct in6_addr *daddr) 351static bool rt6_need_strict(const struct in6_addr *daddr)
324{ 352{
325 return ipv6_addr_type(daddr) & 353 return ipv6_addr_type(daddr) &
326 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 354 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
@@ -607,12 +635,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
607 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 635 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
608 636
609 if (rt) { 637 if (rt) {
610 if (!addrconf_finite_timeout(lifetime)) { 638 if (!addrconf_finite_timeout(lifetime))
611 rt->rt6i_flags &= ~RTF_EXPIRES; 639 rt6_clean_expires(rt);
612 } else { 640 else
613 rt->dst.expires = jiffies + HZ * lifetime; 641 rt6_set_expires(rt, jiffies + HZ * lifetime);
614 rt->rt6i_flags |= RTF_EXPIRES; 642
615 }
616 dst_release(&rt->dst); 643 dst_release(&rt->dst);
617 } 644 }
618 return 0; 645 return 0;
@@ -717,7 +744,7 @@ int ip6_ins_rt(struct rt6_info *rt)
717 return __ip6_ins_rt(rt, &info); 744 return __ip6_ins_rt(rt, &info);
718} 745}
719 746
720static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, 747static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
721 const struct in6_addr *daddr, 748 const struct in6_addr *daddr,
722 const struct in6_addr *saddr) 749 const struct in6_addr *saddr)
723{ 750{
@@ -769,9 +796,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
769 goto retry; 796 goto retry;
770 } 797 }
771 798
772 if (net_ratelimit()) 799 net_warn_ratelimited("Neighbour table overflow\n");
773 printk(KERN_WARNING
774 "ipv6: Neighbour table overflow.\n");
775 dst_free(&rt->dst); 800 dst_free(&rt->dst);
776 return NULL; 801 return NULL;
777 } 802 }
@@ -868,6 +893,16 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *
868 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); 893 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
869} 894}
870 895
896static struct dst_entry *ip6_route_input_lookup(struct net *net,
897 struct net_device *dev,
898 struct flowi6 *fl6, int flags)
899{
900 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
901 flags |= RT6_LOOKUP_F_IFACE;
902
903 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
904}
905
871void ip6_route_input(struct sk_buff *skb) 906void ip6_route_input(struct sk_buff *skb)
872{ 907{
873 const struct ipv6hdr *iph = ipv6_hdr(skb); 908 const struct ipv6hdr *iph = ipv6_hdr(skb);
@@ -882,10 +917,7 @@ void ip6_route_input(struct sk_buff *skb)
882 .flowi6_proto = iph->nexthdr, 917 .flowi6_proto = iph->nexthdr,
883 }; 918 };
884 919
885 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG) 920 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
886 flags |= RT6_LOOKUP_F_IFACE;
887
888 skb_dst_set(skb, fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_input));
889} 921}
890 922
891static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, 923static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
@@ -934,10 +966,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
934 rt->rt6i_idev = ort->rt6i_idev; 966 rt->rt6i_idev = ort->rt6i_idev;
935 if (rt->rt6i_idev) 967 if (rt->rt6i_idev)
936 in6_dev_hold(rt->rt6i_idev); 968 in6_dev_hold(rt->rt6i_idev);
937 rt->dst.expires = 0;
938 969
939 rt->rt6i_gateway = ort->rt6i_gateway; 970 rt->rt6i_gateway = ort->rt6i_gateway;
940 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 971 rt->rt6i_flags = ort->rt6i_flags;
972 rt6_clean_expires(rt);
941 rt->rt6i_metric = 0; 973 rt->rt6i_metric = 0;
942 974
943 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 975 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -999,10 +1031,9 @@ static void ip6_link_failure(struct sk_buff *skb)
999 1031
1000 rt = (struct rt6_info *) skb_dst(skb); 1032 rt = (struct rt6_info *) skb_dst(skb);
1001 if (rt) { 1033 if (rt) {
1002 if (rt->rt6i_flags & RTF_CACHE) { 1034 if (rt->rt6i_flags & RTF_CACHE)
1003 dst_set_expires(&rt->dst, 0); 1035 rt6_update_expires(rt, 0);
1004 rt->rt6i_flags |= RTF_EXPIRES; 1036 else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1005 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1006 rt->rt6i_node->fn_sernum = -1; 1037 rt->rt6i_node->fn_sernum = -1;
1007 } 1038 }
1008} 1039}
@@ -1251,7 +1282,7 @@ int ip6_route_add(struct fib6_config *cfg)
1251 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { 1282 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1252 table = fib6_get_table(net, cfg->fc_table); 1283 table = fib6_get_table(net, cfg->fc_table);
1253 if (!table) { 1284 if (!table) {
1254 printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n"); 1285 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1255 table = fib6_new_table(net, cfg->fc_table); 1286 table = fib6_new_table(net, cfg->fc_table);
1256 } 1287 }
1257 } else { 1288 } else {
@@ -1269,9 +1300,12 @@ int ip6_route_add(struct fib6_config *cfg)
1269 } 1300 }
1270 1301
1271 rt->dst.obsolete = -1; 1302 rt->dst.obsolete = -1;
1272 rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ? 1303
1273 jiffies + clock_t_to_jiffies(cfg->fc_expires) : 1304 if (cfg->fc_flags & RTF_EXPIRES)
1274 0; 1305 rt6_set_expires(rt, jiffies +
1306 clock_t_to_jiffies(cfg->fc_expires));
1307 else
1308 rt6_clean_expires(rt);
1275 1309
1276 if (cfg->fc_protocol == RTPROT_UNSPEC) 1310 if (cfg->fc_protocol == RTPROT_UNSPEC)
1277 cfg->fc_protocol = RTPROT_BOOT; 1311 cfg->fc_protocol = RTPROT_BOOT;
@@ -1609,9 +1643,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1609 rt = ip6_route_redirect(dest, src, saddr, neigh->dev); 1643 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1610 1644
1611 if (rt == net->ipv6.ip6_null_entry) { 1645 if (rt == net->ipv6.ip6_null_entry) {
1612 if (net_ratelimit()) 1646 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1613 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1614 "for redirect target\n");
1615 goto out; 1647 goto out;
1616 } 1648 }
1617 1649
@@ -1716,8 +1748,8 @@ again:
1716 features |= RTAX_FEATURE_ALLFRAG; 1748 features |= RTAX_FEATURE_ALLFRAG;
1717 dst_metric_set(&rt->dst, RTAX_FEATURES, features); 1749 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1718 } 1750 }
1719 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1751 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1720 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; 1752 rt->rt6i_flags |= RTF_MODIFIED;
1721 goto out; 1753 goto out;
1722 } 1754 }
1723 1755
@@ -1745,9 +1777,8 @@ again:
1745 * which is 10 mins. After 10 mins the decreased pmtu is expired 1777 * which is 10 mins. After 10 mins the decreased pmtu is expired
1746 * and detecting PMTU increase will be automatically happened. 1778 * and detecting PMTU increase will be automatically happened.
1747 */ 1779 */
1748 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1780 rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1749 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; 1781 nrt->rt6i_flags |= RTF_DYNAMIC;
1750
1751 ip6_ins_rt(nrt); 1782 ip6_ins_rt(nrt);
1752 } 1783 }
1753out: 1784out:
@@ -1779,7 +1810,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
1779 * Misc support functions 1810 * Misc support functions
1780 */ 1811 */
1781 1812
1782static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, 1813static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1783 const struct in6_addr *dest) 1814 const struct in6_addr *dest)
1784{ 1815{
1785 struct net *net = dev_net(ort->dst.dev); 1816 struct net *net = dev_net(ort->dst.dev);
@@ -1799,10 +1830,14 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1799 if (rt->rt6i_idev) 1830 if (rt->rt6i_idev)
1800 in6_dev_hold(rt->rt6i_idev); 1831 in6_dev_hold(rt->rt6i_idev);
1801 rt->dst.lastuse = jiffies; 1832 rt->dst.lastuse = jiffies;
1802 rt->dst.expires = 0;
1803 1833
1804 rt->rt6i_gateway = ort->rt6i_gateway; 1834 rt->rt6i_gateway = ort->rt6i_gateway;
1805 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 1835 rt->rt6i_flags = ort->rt6i_flags;
1836 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1837 (RTF_DEFAULT | RTF_ADDRCONF))
1838 rt6_set_from(rt, ort);
1839 else
1840 rt6_clean_expires(rt);
1806 rt->rt6i_metric = 0; 1841 rt->rt6i_metric = 0;
1807 1842
1808#ifdef CONFIG_IPV6_SUBTREES 1843#ifdef CONFIG_IPV6_SUBTREES
@@ -1850,7 +1885,7 @@ out:
1850static struct rt6_info *rt6_add_route_info(struct net *net, 1885static struct rt6_info *rt6_add_route_info(struct net *net,
1851 const struct in6_addr *prefix, int prefixlen, 1886 const struct in6_addr *prefix, int prefixlen,
1852 const struct in6_addr *gwaddr, int ifindex, 1887 const struct in6_addr *gwaddr, int ifindex,
1853 unsigned pref) 1888 unsigned int pref)
1854{ 1889{
1855 struct fib6_config cfg = { 1890 struct fib6_config cfg = {
1856 .fc_table = RT6_TABLE_INFO, 1891 .fc_table = RT6_TABLE_INFO,
@@ -2069,9 +2104,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2069 int err; 2104 int err;
2070 2105
2071 if (!rt) { 2106 if (!rt) {
2072 if (net_ratelimit()) 2107 net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
2073 pr_warning("IPv6: Maximum number of routes reached,"
2074 " consider increasing route/max_size.\n");
2075 return ERR_PTR(-ENOMEM); 2108 return ERR_PTR(-ENOMEM);
2076 } 2109 }
2077 2110
@@ -2180,10 +2213,9 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
2180 icmp6_clean_all(fib6_ifdown, &adn); 2213 icmp6_clean_all(fib6_ifdown, &adn);
2181} 2214}
2182 2215
2183struct rt6_mtu_change_arg 2216struct rt6_mtu_change_arg {
2184{
2185 struct net_device *dev; 2217 struct net_device *dev;
2186 unsigned mtu; 2218 unsigned int mtu;
2187}; 2219};
2188 2220
2189static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) 2221static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
@@ -2225,7 +2257,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2225 return 0; 2257 return 0;
2226} 2258}
2227 2259
2228void rt6_mtu_change(struct net_device *dev, unsigned mtu) 2260void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2229{ 2261{
2230 struct rt6_mtu_change_arg arg = { 2262 struct rt6_mtu_change_arg arg = {
2231 .dev = dev, 2263 .dev = dev,
@@ -2393,7 +2425,8 @@ static int rt6_fill_node(struct net *net,
2393 else 2425 else
2394 table = RT6_TABLE_UNSPEC; 2426 table = RT6_TABLE_UNSPEC;
2395 rtm->rtm_table = table; 2427 rtm->rtm_table = table;
2396 NLA_PUT_U32(skb, RTA_TABLE, table); 2428 if (nla_put_u32(skb, RTA_TABLE, table))
2429 goto nla_put_failure;
2397 if (rt->rt6i_flags & RTF_REJECT) 2430 if (rt->rt6i_flags & RTF_REJECT)
2398 rtm->rtm_type = RTN_UNREACHABLE; 2431 rtm->rtm_type = RTN_UNREACHABLE;
2399 else if (rt->rt6i_flags & RTF_LOCAL) 2432 else if (rt->rt6i_flags & RTF_LOCAL)
@@ -2416,16 +2449,20 @@ static int rt6_fill_node(struct net *net,
2416 rtm->rtm_flags |= RTM_F_CLONED; 2449 rtm->rtm_flags |= RTM_F_CLONED;
2417 2450
2418 if (dst) { 2451 if (dst) {
2419 NLA_PUT(skb, RTA_DST, 16, dst); 2452 if (nla_put(skb, RTA_DST, 16, dst))
2453 goto nla_put_failure;
2420 rtm->rtm_dst_len = 128; 2454 rtm->rtm_dst_len = 128;
2421 } else if (rtm->rtm_dst_len) 2455 } else if (rtm->rtm_dst_len)
2422 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr); 2456 if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2457 goto nla_put_failure;
2423#ifdef CONFIG_IPV6_SUBTREES 2458#ifdef CONFIG_IPV6_SUBTREES
2424 if (src) { 2459 if (src) {
2425 NLA_PUT(skb, RTA_SRC, 16, src); 2460 if (nla_put(skb, RTA_SRC, 16, src))
2461 goto nla_put_failure;
2426 rtm->rtm_src_len = 128; 2462 rtm->rtm_src_len = 128;
2427 } else if (rtm->rtm_src_len) 2463 } else if (rtm->rtm_src_len &&
2428 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr); 2464 nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2465 goto nla_put_failure;
2429#endif 2466#endif
2430 if (iif) { 2467 if (iif) {
2431#ifdef CONFIG_IPV6_MROUTE 2468#ifdef CONFIG_IPV6_MROUTE
@@ -2443,17 +2480,20 @@ static int rt6_fill_node(struct net *net,
2443 } 2480 }
2444 } else 2481 } else
2445#endif 2482#endif
2446 NLA_PUT_U32(skb, RTA_IIF, iif); 2483 if (nla_put_u32(skb, RTA_IIF, iif))
2484 goto nla_put_failure;
2447 } else if (dst) { 2485 } else if (dst) {
2448 struct in6_addr saddr_buf; 2486 struct in6_addr saddr_buf;
2449 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0) 2487 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2450 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2488 nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2489 goto nla_put_failure;
2451 } 2490 }
2452 2491
2453 if (rt->rt6i_prefsrc.plen) { 2492 if (rt->rt6i_prefsrc.plen) {
2454 struct in6_addr saddr_buf; 2493 struct in6_addr saddr_buf;
2455 saddr_buf = rt->rt6i_prefsrc.addr; 2494 saddr_buf = rt->rt6i_prefsrc.addr;
2456 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2495 if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2496 goto nla_put_failure;
2457 } 2497 }
2458 2498
2459 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2499 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
@@ -2461,15 +2501,19 @@ static int rt6_fill_node(struct net *net,
2461 2501
2462 rcu_read_lock(); 2502 rcu_read_lock();
2463 n = dst_get_neighbour_noref(&rt->dst); 2503 n = dst_get_neighbour_noref(&rt->dst);
2464 if (n) 2504 if (n) {
2465 NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key); 2505 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
2506 rcu_read_unlock();
2507 goto nla_put_failure;
2508 }
2509 }
2466 rcu_read_unlock(); 2510 rcu_read_unlock();
2467 2511
2468 if (rt->dst.dev) 2512 if (rt->dst.dev &&
2469 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); 2513 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2470 2514 goto nla_put_failure;
2471 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); 2515 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2472 2516 goto nla_put_failure;
2473 if (!(rt->rt6i_flags & RTF_EXPIRES)) 2517 if (!(rt->rt6i_flags & RTF_EXPIRES))
2474 expires = 0; 2518 expires = 0;
2475 else if (rt->dst.expires - jiffies < INT_MAX) 2519 else if (rt->dst.expires - jiffies < INT_MAX)
@@ -2520,7 +2564,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2520 struct sk_buff *skb; 2564 struct sk_buff *skb;
2521 struct rtmsg *rtm; 2565 struct rtmsg *rtm;
2522 struct flowi6 fl6; 2566 struct flowi6 fl6;
2523 int err, iif = 0; 2567 int err, iif = 0, oif = 0;
2524 2568
2525 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); 2569 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2526 if (err < 0) 2570 if (err < 0)
@@ -2547,19 +2591,34 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2547 iif = nla_get_u32(tb[RTA_IIF]); 2591 iif = nla_get_u32(tb[RTA_IIF]);
2548 2592
2549 if (tb[RTA_OIF]) 2593 if (tb[RTA_OIF])
2550 fl6.flowi6_oif = nla_get_u32(tb[RTA_OIF]); 2594 oif = nla_get_u32(tb[RTA_OIF]);
2551 2595
2552 if (iif) { 2596 if (iif) {
2553 struct net_device *dev; 2597 struct net_device *dev;
2598 int flags = 0;
2599
2554 dev = __dev_get_by_index(net, iif); 2600 dev = __dev_get_by_index(net, iif);
2555 if (!dev) { 2601 if (!dev) {
2556 err = -ENODEV; 2602 err = -ENODEV;
2557 goto errout; 2603 goto errout;
2558 } 2604 }
2605
2606 fl6.flowi6_iif = iif;
2607
2608 if (!ipv6_addr_any(&fl6.saddr))
2609 flags |= RT6_LOOKUP_F_HAS_SADDR;
2610
2611 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2612 flags);
2613 } else {
2614 fl6.flowi6_oif = oif;
2615
2616 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2559 } 2617 }
2560 2618
2561 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2619 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2562 if (!skb) { 2620 if (!skb) {
2621 dst_release(&rt->dst);
2563 err = -ENOBUFS; 2622 err = -ENOBUFS;
2564 goto errout; 2623 goto errout;
2565 } 2624 }
@@ -2570,7 +2629,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2570 skb_reset_mac_header(skb); 2629 skb_reset_mac_header(skb);
2571 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); 2630 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2572 2631
2573 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl6);
2574 skb_dst_set(skb, &rt->dst); 2632 skb_dst_set(skb, &rt->dst);
2575 2633
2576 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, 2634 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 133768e52912..60415711563f 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -17,6 +17,8 @@
17 * Fred Templin <fred.l.templin@boeing.com>: isatap support 17 * Fred Templin <fred.l.templin@boeing.com>: isatap support
18 */ 18 */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/capability.h> 23#include <linux/capability.h>
22#include <linux/errno.h> 24#include <linux/errno.h>
@@ -87,35 +89,51 @@ struct sit_net {
87 89
88/* often modified stats are per cpu, other are shared (netdev->stats) */ 90/* often modified stats are per cpu, other are shared (netdev->stats) */
89struct pcpu_tstats { 91struct pcpu_tstats {
90 unsigned long rx_packets; 92 u64 rx_packets;
91 unsigned long rx_bytes; 93 u64 rx_bytes;
92 unsigned long tx_packets; 94 u64 tx_packets;
93 unsigned long tx_bytes; 95 u64 tx_bytes;
94} __attribute__((aligned(4*sizeof(unsigned long)))); 96 struct u64_stats_sync syncp;
97};
95 98
96static struct net_device_stats *ipip6_get_stats(struct net_device *dev) 99static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
100 struct rtnl_link_stats64 *tot)
97{ 101{
98 struct pcpu_tstats sum = { 0 };
99 int i; 102 int i;
100 103
101 for_each_possible_cpu(i) { 104 for_each_possible_cpu(i) {
102 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 105 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
103 106 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
104 sum.rx_packets += tstats->rx_packets; 107 unsigned int start;
105 sum.rx_bytes += tstats->rx_bytes; 108
106 sum.tx_packets += tstats->tx_packets; 109 do {
107 sum.tx_bytes += tstats->tx_bytes; 110 start = u64_stats_fetch_begin_bh(&tstats->syncp);
111 rx_packets = tstats->rx_packets;
112 tx_packets = tstats->tx_packets;
113 rx_bytes = tstats->rx_bytes;
114 tx_bytes = tstats->tx_bytes;
115 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
116
117 tot->rx_packets += rx_packets;
118 tot->tx_packets += tx_packets;
119 tot->rx_bytes += rx_bytes;
120 tot->tx_bytes += tx_bytes;
108 } 121 }
109 dev->stats.rx_packets = sum.rx_packets; 122
110 dev->stats.rx_bytes = sum.rx_bytes; 123 tot->rx_errors = dev->stats.rx_errors;
111 dev->stats.tx_packets = sum.tx_packets; 124 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
112 dev->stats.tx_bytes = sum.tx_bytes; 125 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
113 return &dev->stats; 126 tot->tx_dropped = dev->stats.tx_dropped;
127 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
128 tot->tx_errors = dev->stats.tx_errors;
129
130 return tot;
114} 131}
132
115/* 133/*
116 * Must be invoked with rcu_read_lock 134 * Must be invoked with rcu_read_lock
117 */ 135 */
118static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, 136static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
119 struct net_device *dev, __be32 remote, __be32 local) 137 struct net_device *dev, __be32 remote, __be32 local)
120{ 138{
121 unsigned int h0 = HASH(remote); 139 unsigned int h0 = HASH(remote);
@@ -680,23 +698,27 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
680 /* ISATAP (RFC4214) - must come before 6to4 */ 698 /* ISATAP (RFC4214) - must come before 6to4 */
681 if (dev->priv_flags & IFF_ISATAP) { 699 if (dev->priv_flags & IFF_ISATAP) {
682 struct neighbour *neigh = NULL; 700 struct neighbour *neigh = NULL;
701 bool do_tx_error = false;
683 702
684 if (skb_dst(skb)) 703 if (skb_dst(skb))
685 neigh = dst_get_neighbour_noref(skb_dst(skb)); 704 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
686 705
687 if (neigh == NULL) { 706 if (neigh == NULL) {
688 if (net_ratelimit()) 707 net_dbg_ratelimited("sit: nexthop == NULL\n");
689 printk(KERN_DEBUG "sit: nexthop == NULL\n");
690 goto tx_error; 708 goto tx_error;
691 } 709 }
692 710
693 addr6 = (const struct in6_addr*)&neigh->primary_key; 711 addr6 = (const struct in6_addr *)&neigh->primary_key;
694 addr_type = ipv6_addr_type(addr6); 712 addr_type = ipv6_addr_type(addr6);
695 713
696 if ((addr_type & IPV6_ADDR_UNICAST) && 714 if ((addr_type & IPV6_ADDR_UNICAST) &&
697 ipv6_addr_is_isatap(addr6)) 715 ipv6_addr_is_isatap(addr6))
698 dst = addr6->s6_addr32[3]; 716 dst = addr6->s6_addr32[3];
699 else 717 else
718 do_tx_error = true;
719
720 neigh_release(neigh);
721 if (do_tx_error)
700 goto tx_error; 722 goto tx_error;
701 } 723 }
702 724
@@ -705,17 +727,17 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
705 727
706 if (!dst) { 728 if (!dst) {
707 struct neighbour *neigh = NULL; 729 struct neighbour *neigh = NULL;
730 bool do_tx_error = false;
708 731
709 if (skb_dst(skb)) 732 if (skb_dst(skb))
710 neigh = dst_get_neighbour_noref(skb_dst(skb)); 733 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
711 734
712 if (neigh == NULL) { 735 if (neigh == NULL) {
713 if (net_ratelimit()) 736 net_dbg_ratelimited("sit: nexthop == NULL\n");
714 printk(KERN_DEBUG "sit: nexthop == NULL\n");
715 goto tx_error; 737 goto tx_error;
716 } 738 }
717 739
718 addr6 = (const struct in6_addr*)&neigh->primary_key; 740 addr6 = (const struct in6_addr *)&neigh->primary_key;
719 addr_type = ipv6_addr_type(addr6); 741 addr_type = ipv6_addr_type(addr6);
720 742
721 if (addr_type == IPV6_ADDR_ANY) { 743 if (addr_type == IPV6_ADDR_ANY) {
@@ -723,10 +745,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
723 addr_type = ipv6_addr_type(addr6); 745 addr_type = ipv6_addr_type(addr6);
724 } 746 }
725 747
726 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 748 if ((addr_type & IPV6_ADDR_COMPATv4) != 0)
727 goto tx_error_icmp; 749 dst = addr6->s6_addr32[3];
750 else
751 do_tx_error = true;
728 752
729 dst = addr6->s6_addr32[3]; 753 neigh_release(neigh);
754 if (do_tx_error)
755 goto tx_error;
730 } 756 }
731 757
732 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, 758 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
@@ -1116,7 +1142,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
1116 .ndo_start_xmit = ipip6_tunnel_xmit, 1142 .ndo_start_xmit = ipip6_tunnel_xmit,
1117 .ndo_do_ioctl = ipip6_tunnel_ioctl, 1143 .ndo_do_ioctl = ipip6_tunnel_ioctl,
1118 .ndo_change_mtu = ipip6_tunnel_change_mtu, 1144 .ndo_change_mtu = ipip6_tunnel_change_mtu,
1119 .ndo_get_stats = ipip6_get_stats, 1145 .ndo_get_stats64= ipip6_get_stats64,
1120}; 1146};
1121 1147
1122static void ipip6_dev_free(struct net_device *dev) 1148static void ipip6_dev_free(struct net_device *dev)
@@ -1277,7 +1303,7 @@ static int __init sit_init(void)
1277{ 1303{
1278 int err; 1304 int err;
1279 1305
1280 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); 1306 pr_info("IPv6 over IPv4 tunneling driver\n");
1281 1307
1282 err = register_pernet_device(&sit_net_ops); 1308 err = register_pernet_device(&sit_net_ops);
1283 if (err < 0) 1309 if (err < 0)
@@ -1285,7 +1311,7 @@ static int __init sit_init(void)
1285 err = xfrm4_tunnel_register(&sit_handler, AF_INET6); 1311 err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1286 if (err < 0) { 1312 if (err < 0) {
1287 unregister_pernet_device(&sit_net_ops); 1313 unregister_pernet_device(&sit_net_ops);
1288 printk(KERN_INFO "sit init: Can't add protocol\n"); 1314 pr_info("%s: can't add protocol\n", __func__);
1289 } 1315 }
1290 return err; 1316 return err;
1291} 1317}
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 166a57c47d39..e85c48bd404f 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -16,32 +16,8 @@
16#include <net/addrconf.h> 16#include <net/addrconf.h>
17#include <net/inet_frag.h> 17#include <net/inet_frag.h>
18 18
19static struct ctl_table empty[1];
20
21static ctl_table ipv6_static_skeleton[] = {
22 {
23 .procname = "neigh",
24 .maxlen = 0,
25 .mode = 0555,
26 .child = empty,
27 },
28 { }
29};
30
31static ctl_table ipv6_table_template[] = { 19static ctl_table ipv6_table_template[] = {
32 { 20 {
33 .procname = "route",
34 .maxlen = 0,
35 .mode = 0555,
36 .child = ipv6_route_table_template
37 },
38 {
39 .procname = "icmp",
40 .maxlen = 0,
41 .mode = 0555,
42 .child = ipv6_icmp_table_template
43 },
44 {
45 .procname = "bindv6only", 21 .procname = "bindv6only",
46 .data = &init_net.ipv6.sysctl.bindv6only, 22 .data = &init_net.ipv6.sysctl.bindv6only,
47 .maxlen = sizeof(int), 23 .maxlen = sizeof(int),
@@ -62,13 +38,6 @@ static ctl_table ipv6_rotable[] = {
62 { } 38 { }
63}; 39};
64 40
65struct ctl_path net_ipv6_ctl_path[] = {
66 { .procname = "net", },
67 { .procname = "ipv6", },
68 { },
69};
70EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
71
72static int __net_init ipv6_sysctl_net_init(struct net *net) 41static int __net_init ipv6_sysctl_net_init(struct net *net)
73{ 42{
74 struct ctl_table *ipv6_table; 43 struct ctl_table *ipv6_table;
@@ -81,28 +50,37 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
81 GFP_KERNEL); 50 GFP_KERNEL);
82 if (!ipv6_table) 51 if (!ipv6_table)
83 goto out; 52 goto out;
53 ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
84 54
85 ipv6_route_table = ipv6_route_sysctl_init(net); 55 ipv6_route_table = ipv6_route_sysctl_init(net);
86 if (!ipv6_route_table) 56 if (!ipv6_route_table)
87 goto out_ipv6_table; 57 goto out_ipv6_table;
88 ipv6_table[0].child = ipv6_route_table;
89 58
90 ipv6_icmp_table = ipv6_icmp_sysctl_init(net); 59 ipv6_icmp_table = ipv6_icmp_sysctl_init(net);
91 if (!ipv6_icmp_table) 60 if (!ipv6_icmp_table)
92 goto out_ipv6_route_table; 61 goto out_ipv6_route_table;
93 ipv6_table[1].child = ipv6_icmp_table;
94 62
95 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; 63 net->ipv6.sysctl.hdr = register_net_sysctl(net, "net/ipv6", ipv6_table);
96 64 if (!net->ipv6.sysctl.hdr)
97 net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
98 ipv6_table);
99 if (!net->ipv6.sysctl.table)
100 goto out_ipv6_icmp_table; 65 goto out_ipv6_icmp_table;
101 66
67 net->ipv6.sysctl.route_hdr =
68 register_net_sysctl(net, "net/ipv6/route", ipv6_route_table);
69 if (!net->ipv6.sysctl.route_hdr)
70 goto out_unregister_ipv6_table;
71
72 net->ipv6.sysctl.icmp_hdr =
73 register_net_sysctl(net, "net/ipv6/icmp", ipv6_icmp_table);
74 if (!net->ipv6.sysctl.icmp_hdr)
75 goto out_unregister_route_table;
76
102 err = 0; 77 err = 0;
103out: 78out:
104 return err; 79 return err;
105 80out_unregister_route_table:
81 unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
82out_unregister_ipv6_table:
83 unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
106out_ipv6_icmp_table: 84out_ipv6_icmp_table:
107 kfree(ipv6_icmp_table); 85 kfree(ipv6_icmp_table);
108out_ipv6_route_table: 86out_ipv6_route_table:
@@ -118,11 +96,13 @@ static void __net_exit ipv6_sysctl_net_exit(struct net *net)
118 struct ctl_table *ipv6_route_table; 96 struct ctl_table *ipv6_route_table;
119 struct ctl_table *ipv6_icmp_table; 97 struct ctl_table *ipv6_icmp_table;
120 98
121 ipv6_table = net->ipv6.sysctl.table->ctl_table_arg; 99 ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg;
122 ipv6_route_table = ipv6_table[0].child; 100 ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg;
123 ipv6_icmp_table = ipv6_table[1].child; 101 ipv6_icmp_table = net->ipv6.sysctl.icmp_hdr->ctl_table_arg;
124 102
125 unregister_net_sysctl_table(net->ipv6.sysctl.table); 103 unregister_net_sysctl_table(net->ipv6.sysctl.icmp_hdr);
104 unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
105 unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
126 106
127 kfree(ipv6_table); 107 kfree(ipv6_table);
128 kfree(ipv6_route_table); 108 kfree(ipv6_route_table);
@@ -140,7 +120,7 @@ int ipv6_sysctl_register(void)
140{ 120{
141 int err = -ENOMEM; 121 int err = -ENOMEM;
142 122
143 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable); 123 ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable);
144 if (ip6_header == NULL) 124 if (ip6_header == NULL)
145 goto out; 125 goto out;
146 126
@@ -160,18 +140,3 @@ void ipv6_sysctl_unregister(void)
160 unregister_net_sysctl_table(ip6_header); 140 unregister_net_sysctl_table(ip6_header);
161 unregister_pernet_subsys(&ipv6_sysctl_net_ops); 141 unregister_pernet_subsys(&ipv6_sysctl_net_ops);
162} 142}
163
164static struct ctl_table_header *ip6_base;
165
166int ipv6_static_sysctl_register(void)
167{
168 ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton);
169 if (ip6_base == NULL)
170 return -ENOMEM;
171 return 0;
172}
173
174void ipv6_static_sysctl_unregister(void)
175{
176 unregister_net_sysctl_table(ip6_base);
177}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3edd05ae4388..3a9aec29581a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -476,7 +476,8 @@ out:
476 476
477 477
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp) 479 struct request_values *rvp,
480 u16 queue_mapping)
480{ 481{
481 struct inet6_request_sock *treq = inet6_rsk(req); 482 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk); 483 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 515
515 fl6.daddr = treq->rmt_addr; 516 fl6.daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping);
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err); 519 err = net_xmit_eval(err);
518 } 520 }
@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp) 530 struct request_values *rvp)
529{ 531{
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp); 533 return tcp_v6_send_synack(sk, req, rvp, 0);
532} 534}
533 535
534static void tcp_v6_reqsk_destructor(struct request_sock *req) 536static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -540,19 +542,7 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 542static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 const struct in6_addr *addr) 543 const struct in6_addr *addr)
542{ 544{
543 struct tcp_sock *tp = tcp_sk(sk); 545 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
544 int i;
545
546 BUG_ON(tp == NULL);
547
548 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
549 return NULL;
550
551 for (i = 0; i < tp->md5sig_info->entries6; i++) {
552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
553 return &tp->md5sig_info->keys6[i].base;
554 }
555 return NULL;
556} 546}
557 547
558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, 548static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
@@ -567,136 +557,11 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 557 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
568} 558}
569 559
570static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
571 char *newkey, u8 newkeylen)
572{
573 /* Add key to the list */
574 struct tcp_md5sig_key *key;
575 struct tcp_sock *tp = tcp_sk(sk);
576 struct tcp6_md5sig_key *keys;
577
578 key = tcp_v6_md5_do_lookup(sk, peer);
579 if (key) {
580 /* modify existing entry - just update that one */
581 kfree(key->key);
582 key->key = newkey;
583 key->keylen = newkeylen;
584 } else {
585 /* reallocate new list if current one is full. */
586 if (!tp->md5sig_info) {
587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
588 if (!tp->md5sig_info) {
589 kfree(newkey);
590 return -ENOMEM;
591 }
592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
593 }
594 if (tp->md5sig_info->entries6 == 0 &&
595 tcp_alloc_md5sig_pool(sk) == NULL) {
596 kfree(newkey);
597 return -ENOMEM;
598 }
599 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
600 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
602
603 if (!keys) {
604 kfree(newkey);
605 if (tp->md5sig_info->entries6 == 0)
606 tcp_free_md5sig_pool();
607 return -ENOMEM;
608 }
609
610 if (tp->md5sig_info->entries6)
611 memmove(keys, tp->md5sig_info->keys6,
612 (sizeof (tp->md5sig_info->keys6[0]) *
613 tp->md5sig_info->entries6));
614
615 kfree(tp->md5sig_info->keys6);
616 tp->md5sig_info->keys6 = keys;
617 tp->md5sig_info->alloced6++;
618 }
619
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
622 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
623
624 tp->md5sig_info->entries6++;
625 }
626 return 0;
627}
628
629static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
630 u8 *newkey, __u8 newkeylen)
631{
632 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
633 newkey, newkeylen);
634}
635
636static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
637{
638 struct tcp_sock *tp = tcp_sk(sk);
639 int i;
640
641 for (i = 0; i < tp->md5sig_info->entries6; i++) {
642 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
643 /* Free the key */
644 kfree(tp->md5sig_info->keys6[i].base.key);
645 tp->md5sig_info->entries6--;
646
647 if (tp->md5sig_info->entries6 == 0) {
648 kfree(tp->md5sig_info->keys6);
649 tp->md5sig_info->keys6 = NULL;
650 tp->md5sig_info->alloced6 = 0;
651 tcp_free_md5sig_pool();
652 } else {
653 /* shrink the database */
654 if (tp->md5sig_info->entries6 != i)
655 memmove(&tp->md5sig_info->keys6[i],
656 &tp->md5sig_info->keys6[i+1],
657 (tp->md5sig_info->entries6 - i)
658 * sizeof (tp->md5sig_info->keys6[0]));
659 }
660 return 0;
661 }
662 }
663 return -ENOENT;
664}
665
666static void tcp_v6_clear_md5_list (struct sock *sk)
667{
668 struct tcp_sock *tp = tcp_sk(sk);
669 int i;
670
671 if (tp->md5sig_info->entries6) {
672 for (i = 0; i < tp->md5sig_info->entries6; i++)
673 kfree(tp->md5sig_info->keys6[i].base.key);
674 tp->md5sig_info->entries6 = 0;
675 tcp_free_md5sig_pool();
676 }
677
678 kfree(tp->md5sig_info->keys6);
679 tp->md5sig_info->keys6 = NULL;
680 tp->md5sig_info->alloced6 = 0;
681
682 if (tp->md5sig_info->entries4) {
683 for (i = 0; i < tp->md5sig_info->entries4; i++)
684 kfree(tp->md5sig_info->keys4[i].base.key);
685 tp->md5sig_info->entries4 = 0;
686 tcp_free_md5sig_pool();
687 }
688
689 kfree(tp->md5sig_info->keys4);
690 tp->md5sig_info->keys4 = NULL;
691 tp->md5sig_info->alloced4 = 0;
692}
693
694static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, 560static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
695 int optlen) 561 int optlen)
696{ 562{
697 struct tcp_md5sig cmd; 563 struct tcp_md5sig cmd;
698 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; 564 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
699 u8 *newkey;
700 565
701 if (optlen < sizeof(cmd)) 566 if (optlen < sizeof(cmd))
702 return -EINVAL; 567 return -EINVAL;
@@ -708,36 +573,22 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
708 return -EINVAL; 573 return -EINVAL;
709 574
710 if (!cmd.tcpm_keylen) { 575 if (!cmd.tcpm_keylen) {
711 if (!tcp_sk(sk)->md5sig_info)
712 return -ENOENT;
713 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 576 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
714 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); 577 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
715 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); 578 AF_INET);
579 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
580 AF_INET6);
716 } 581 }
717 582
718 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 583 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
719 return -EINVAL; 584 return -EINVAL;
720 585
721 if (!tcp_sk(sk)->md5sig_info) { 586 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
722 struct tcp_sock *tp = tcp_sk(sk); 587 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
723 struct tcp_md5sig_info *p; 588 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
724 589
725 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); 590 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
726 if (!p) 591 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
727 return -ENOMEM;
728
729 tp->md5sig_info = p;
730 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
731 }
732
733 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
734 if (!newkey)
735 return -ENOMEM;
736 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
737 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
738 newkey, cmd.tcpm_keylen);
739 }
740 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
741} 592}
742 593
743static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 594static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
@@ -874,12 +725,10 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
874 NULL, NULL, skb); 725 NULL, NULL, skb);
875 726
876 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 727 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
877 if (net_ratelimit()) { 728 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
878 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", 729 genhash ? "failed" : "mismatch",
879 genhash ? "failed" : "mismatch", 730 &ip6h->saddr, ntohs(th->source),
880 &ip6h->saddr, ntohs(th->source), 731 &ip6h->daddr, ntohs(th->dest));
881 &ip6h->daddr, ntohs(th->dest));
882 }
883 return 1; 732 return 1;
884 } 733 }
885 return 0; 734 return 0;
@@ -1074,6 +923,13 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1074 const struct tcphdr *th = tcp_hdr(skb); 923 const struct tcphdr *th = tcp_hdr(skb);
1075 u32 seq = 0, ack_seq = 0; 924 u32 seq = 0, ack_seq = 0;
1076 struct tcp_md5sig_key *key = NULL; 925 struct tcp_md5sig_key *key = NULL;
926#ifdef CONFIG_TCP_MD5SIG
927 const __u8 *hash_location = NULL;
928 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
929 unsigned char newhash[16];
930 int genhash;
931 struct sock *sk1 = NULL;
932#endif
1077 933
1078 if (th->rst) 934 if (th->rst)
1079 return; 935 return;
@@ -1082,8 +938,32 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1082 return; 938 return;
1083 939
1084#ifdef CONFIG_TCP_MD5SIG 940#ifdef CONFIG_TCP_MD5SIG
1085 if (sk) 941 hash_location = tcp_parse_md5sig_option(th);
1086 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr); 942 if (!sk && hash_location) {
943 /*
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
949 */
950 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
951 &tcp_hashinfo, &ipv6h->daddr,
952 ntohs(th->source), inet6_iif(skb));
953 if (!sk1)
954 return;
955
956 rcu_read_lock();
957 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
958 if (!key)
959 goto release_sk1;
960
961 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
962 if (genhash || memcmp(hash_location, newhash, 16) != 0)
963 goto release_sk1;
964 } else {
965 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
966 }
1087#endif 967#endif
1088 968
1089 if (th->ack) 969 if (th->ack)
@@ -1093,6 +973,14 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1093 (th->doff << 2); 973 (th->doff << 2);
1094 974
1095 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0); 975 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
976
977#ifdef CONFIG_TCP_MD5SIG
978release_sk1:
979 if (sk1) {
980 rcu_read_unlock();
981 sock_put(sk1);
982 }
983#endif
1096} 984}
1097 985
1098static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 986static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
@@ -1169,7 +1057,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1169 struct tcp_sock *tp = tcp_sk(sk); 1057 struct tcp_sock *tp = tcp_sk(sk);
1170 __u32 isn = TCP_SKB_CB(skb)->when; 1058 __u32 isn = TCP_SKB_CB(skb)->when;
1171 struct dst_entry *dst = NULL; 1059 struct dst_entry *dst = NULL;
1172 int want_cookie = 0; 1060 bool want_cookie = false;
1173 1061
1174 if (skb->protocol == htons(ETH_P_IP)) 1062 if (skb->protocol == htons(ETH_P_IP))
1175 return tcp_v4_conn_request(sk, skb); 1063 return tcp_v4_conn_request(sk, skb);
@@ -1230,7 +1118,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1230 while (l-- > 0) 1118 while (l-- > 0)
1231 *c++ ^= *hash_location++; 1119 *c++ ^= *hash_location++;
1232 1120
1233 want_cookie = 0; /* not our kind of cookie */ 1121 want_cookie = false; /* not our kind of cookie */
1234 tmp_ext.cookie_out_never = 0; /* false */ 1122 tmp_ext.cookie_out_never = 0; /* false */
1235 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1123 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1236 } else if (!tp->rx_opt.cookie_in_always) { 1124 } else if (!tp->rx_opt.cookie_in_always) {
@@ -1252,7 +1140,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1252 treq->rmt_addr = ipv6_hdr(skb)->saddr; 1140 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1253 treq->loc_addr = ipv6_hdr(skb)->daddr; 1141 treq->loc_addr = ipv6_hdr(skb)->daddr;
1254 if (!want_cookie || tmp_opt.tstamp_ok) 1142 if (!want_cookie || tmp_opt.tstamp_ok)
1255 TCP_ECN_create_request(req, tcp_hdr(skb)); 1143 TCP_ECN_create_request(req, skb);
1256 1144
1257 treq->iif = sk->sk_bound_dev_if; 1145 treq->iif = sk->sk_bound_dev_if;
1258 1146
@@ -1327,7 +1215,8 @@ have_isn:
1327 security_inet_conn_request(sk, skb, req); 1215 security_inet_conn_request(sk, skb, req);
1328 1216
1329 if (tcp_v6_send_synack(sk, req, 1217 if (tcp_v6_send_synack(sk, req,
1330 (struct request_values *)&tmp_ext) || 1218 (struct request_values *)&tmp_ext,
1219 skb_get_queue_mapping(skb)) ||
1331 want_cookie) 1220 want_cookie)
1332 goto drop_and_free; 1221 goto drop_and_free;
1333 1222
@@ -1394,6 +1283,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1394 newnp->opt = NULL; 1283 newnp->opt = NULL;
1395 newnp->mcast_oif = inet6_iif(skb); 1284 newnp->mcast_oif = inet6_iif(skb);
1396 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1285 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1286 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1397 1287
1398 /* 1288 /*
1399 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1289 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -1464,7 +1354,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1464 newnp->pktoptions = NULL; 1354 newnp->pktoptions = NULL;
1465 if (treq->pktopts != NULL) { 1355 if (treq->pktopts != NULL) {
1466 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); 1356 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1467 kfree_skb(treq->pktopts); 1357 consume_skb(treq->pktopts);
1468 treq->pktopts = NULL; 1358 treq->pktopts = NULL;
1469 if (newnp->pktoptions) 1359 if (newnp->pktoptions)
1470 skb_set_owner_r(newnp->pktoptions, newsk); 1360 skb_set_owner_r(newnp->pktoptions, newsk);
@@ -1472,6 +1362,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1472 newnp->opt = NULL; 1362 newnp->opt = NULL;
1473 newnp->mcast_oif = inet6_iif(skb); 1363 newnp->mcast_oif = inet6_iif(skb);
1474 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1364 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1365 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1475 1366
1476 /* Clone native IPv6 options from listening socket (if any) 1367 /* Clone native IPv6 options from listening socket (if any)
1477 1368
@@ -1493,6 +1384,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1493 tcp_mtup_init(newsk); 1384 tcp_mtup_init(newsk);
1494 tcp_sync_mss(newsk, dst_mtu(dst)); 1385 tcp_sync_mss(newsk, dst_mtu(dst));
1495 newtp->advmss = dst_metric_advmss(dst); 1386 newtp->advmss = dst_metric_advmss(dst);
1387 if (tcp_sk(sk)->rx_opt.user_mss &&
1388 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1389 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1390
1496 tcp_initialize_rcv_mss(newsk); 1391 tcp_initialize_rcv_mss(newsk);
1497 if (tcp_rsk(req)->snt_synack) 1392 if (tcp_rsk(req)->snt_synack)
1498 tcp_valid_rtt_meas(newsk, 1393 tcp_valid_rtt_meas(newsk,
@@ -1510,10 +1405,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1510 * memory, then we end up not copying the key 1405 * memory, then we end up not copying the key
1511 * across. Shucks. 1406 * across. Shucks.
1512 */ 1407 */
1513 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1408 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1514 if (newkey != NULL) 1409 AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1515 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1516 newkey, key->keylen);
1517 } 1410 }
1518#endif 1411#endif
1519 1412
@@ -1676,6 +1569,8 @@ ipv6_pktoptions:
1676 np->mcast_oif = inet6_iif(opt_skb); 1569 np->mcast_oif = inet6_iif(opt_skb);
1677 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1570 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1678 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1571 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1572 if (np->rxopt.bits.rxtclass)
1573 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1679 if (ipv6_opt_accepted(sk, opt_skb)) { 1574 if (ipv6_opt_accepted(sk, opt_skb)) {
1680 skb_set_owner_r(opt_skb, sk); 1575 skb_set_owner_r(opt_skb, sk);
1681 opt_skb = xchg(&np->pktoptions, opt_skb); 1576 opt_skb = xchg(&np->pktoptions, opt_skb);
@@ -1755,7 +1650,7 @@ process:
1755#ifdef CONFIG_NET_DMA 1650#ifdef CONFIG_NET_DMA
1756 struct tcp_sock *tp = tcp_sk(sk); 1651 struct tcp_sock *tp = tcp_sk(sk);
1757 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1652 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1758 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1653 tp->ucopy.dma_chan = net_dma_find_channel();
1759 if (tp->ucopy.dma_chan) 1654 if (tp->ucopy.dma_chan)
1760 ret = tcp_v6_do_rcv(sk, skb); 1655 ret = tcp_v6_do_rcv(sk, skb);
1761 else 1656 else
@@ -1764,7 +1659,8 @@ process:
1764 if (!tcp_prequeue(sk, skb)) 1659 if (!tcp_prequeue(sk, skb))
1765 ret = tcp_v6_do_rcv(sk, skb); 1660 ret = tcp_v6_do_rcv(sk, skb);
1766 } 1661 }
1767 } else if (unlikely(sk_add_backlog(sk, skb))) { 1662 } else if (unlikely(sk_add_backlog(sk, skb,
1663 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1768 bh_unlock_sock(sk); 1664 bh_unlock_sock(sk);
1769 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1665 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1770 goto discard_and_relse; 1666 goto discard_and_relse;
@@ -1883,6 +1779,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
1883 .syn_recv_sock = tcp_v6_syn_recv_sock, 1779 .syn_recv_sock = tcp_v6_syn_recv_sock,
1884 .get_peer = tcp_v6_get_peer, 1780 .get_peer = tcp_v6_get_peer,
1885 .net_header_len = sizeof(struct ipv6hdr), 1781 .net_header_len = sizeof(struct ipv6hdr),
1782 .net_frag_header_len = sizeof(struct frag_hdr),
1886 .setsockopt = ipv6_setsockopt, 1783 .setsockopt = ipv6_setsockopt,
1887 .getsockopt = ipv6_getsockopt, 1784 .getsockopt = ipv6_getsockopt,
1888 .addr2sockaddr = inet6_csk_addr2sockaddr, 1785 .addr2sockaddr = inet6_csk_addr2sockaddr,
@@ -1898,7 +1795,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
1898static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { 1795static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1899 .md5_lookup = tcp_v6_md5_lookup, 1796 .md5_lookup = tcp_v6_md5_lookup,
1900 .calc_md5_hash = tcp_v6_md5_hash_skb, 1797 .calc_md5_hash = tcp_v6_md5_hash_skb,
1901 .md5_add = tcp_v6_md5_add_func,
1902 .md5_parse = tcp_v6_parse_md5_keys, 1798 .md5_parse = tcp_v6_parse_md5_keys,
1903}; 1799};
1904#endif 1800#endif
@@ -1930,7 +1826,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1930static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { 1826static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1931 .md5_lookup = tcp_v4_md5_lookup, 1827 .md5_lookup = tcp_v4_md5_lookup,
1932 .calc_md5_hash = tcp_v4_md5_hash_skb, 1828 .calc_md5_hash = tcp_v4_md5_hash_skb,
1933 .md5_add = tcp_v6_md5_add_func,
1934 .md5_parse = tcp_v6_parse_md5_keys, 1829 .md5_parse = tcp_v6_parse_md5_keys,
1935}; 1830};
1936#endif 1831#endif
@@ -1941,74 +1836,20 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1941static int tcp_v6_init_sock(struct sock *sk) 1836static int tcp_v6_init_sock(struct sock *sk)
1942{ 1837{
1943 struct inet_connection_sock *icsk = inet_csk(sk); 1838 struct inet_connection_sock *icsk = inet_csk(sk);
1944 struct tcp_sock *tp = tcp_sk(sk);
1945
1946 skb_queue_head_init(&tp->out_of_order_queue);
1947 tcp_init_xmit_timers(sk);
1948 tcp_prequeue_init(tp);
1949 1839
1950 icsk->icsk_rto = TCP_TIMEOUT_INIT; 1840 tcp_init_sock(sk);
1951 tp->mdev = TCP_TIMEOUT_INIT;
1952
1953 /* So many TCP implementations out there (incorrectly) count the
1954 * initial SYN frame in their delayed-ACK and congestion control
1955 * algorithms that we must have the following bandaid to talk
1956 * efficiently to them. -DaveM
1957 */
1958 tp->snd_cwnd = 2;
1959
1960 /* See draft-stevens-tcpca-spec-01 for discussion of the
1961 * initialization of these values.
1962 */
1963 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1964 tp->snd_cwnd_clamp = ~0;
1965 tp->mss_cache = TCP_MSS_DEFAULT;
1966
1967 tp->reordering = sysctl_tcp_reordering;
1968
1969 sk->sk_state = TCP_CLOSE;
1970 1841
1971 icsk->icsk_af_ops = &ipv6_specific; 1842 icsk->icsk_af_ops = &ipv6_specific;
1972 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1973 icsk->icsk_sync_mss = tcp_sync_mss;
1974 sk->sk_write_space = sk_stream_write_space;
1975 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1976 1843
1977#ifdef CONFIG_TCP_MD5SIG 1844#ifdef CONFIG_TCP_MD5SIG
1978 tp->af_specific = &tcp_sock_ipv6_specific; 1845 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1979#endif 1846#endif
1980 1847
1981 /* TCP Cookie Transactions */
1982 if (sysctl_tcp_cookie_size > 0) {
1983 /* Default, cookies without s_data_payload. */
1984 tp->cookie_values =
1985 kzalloc(sizeof(*tp->cookie_values),
1986 sk->sk_allocation);
1987 if (tp->cookie_values != NULL)
1988 kref_init(&tp->cookie_values->kref);
1989 }
1990 /* Presumed zeroed, in order of appearance:
1991 * cookie_in_always, cookie_out_never,
1992 * s_data_constant, s_data_in, s_data_out
1993 */
1994 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1995 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1996
1997 local_bh_disable();
1998 sock_update_memcg(sk);
1999 sk_sockets_allocated_inc(sk);
2000 local_bh_enable();
2001
2002 return 0; 1848 return 0;
2003} 1849}
2004 1850
2005static void tcp_v6_destroy_sock(struct sock *sk) 1851static void tcp_v6_destroy_sock(struct sock *sk)
2006{ 1852{
2007#ifdef CONFIG_TCP_MD5SIG
2008 /* Clean up the MD5 key list */
2009 if (tcp_sk(sk)->md5sig_info)
2010 tcp_v6_clear_md5_list(sk);
2011#endif
2012 tcp_v4_destroy_sock(sk); 1853 tcp_v4_destroy_sock(sk);
2013 inet6_destroy_sock(sk); 1854 inet6_destroy_sock(sk);
2014} 1855}
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 4f3cec12aa85..4b0f50d9a962 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -19,6 +19,8 @@
19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> 19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
20 */ 20 */
21 21
22#define pr_fmt(fmt) "IPv6: " fmt
23
22#include <linux/icmpv6.h> 24#include <linux/icmpv6.h>
23#include <linux/init.h> 25#include <linux/init.h>
24#include <linux/module.h> 26#include <linux/module.h>
@@ -160,11 +162,11 @@ static const struct inet6_protocol tunnel46_protocol = {
160static int __init tunnel6_init(void) 162static int __init tunnel6_init(void)
161{ 163{
162 if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) { 164 if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) {
163 printk(KERN_ERR "tunnel6 init(): can't add protocol\n"); 165 pr_err("%s: can't add protocol\n", __func__);
164 return -EAGAIN; 166 return -EAGAIN;
165 } 167 }
166 if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) { 168 if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) {
167 printk(KERN_ERR "tunnel6 init(): can't add protocol\n"); 169 pr_err("%s: can't add protocol\n", __func__);
168 inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6); 170 inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6);
169 return -EAGAIN; 171 return -EAGAIN;
170 } 172 }
@@ -174,9 +176,9 @@ static int __init tunnel6_init(void)
174static void __exit tunnel6_fini(void) 176static void __exit tunnel6_fini(void)
175{ 177{
176 if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP)) 178 if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP))
177 printk(KERN_ERR "tunnel6 close: can't remove protocol\n"); 179 pr_err("%s: can't remove protocol\n", __func__);
178 if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6)) 180 if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6))
179 printk(KERN_ERR "tunnel6 close: can't remove protocol\n"); 181 pr_err("%s: can't remove protocol\n", __func__);
180} 182}
181 183
182module_init(tunnel6_init); 184module_init(tunnel6_init);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4f96b5c63685..f05099fc5901 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -103,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
103{ 103{
104 unsigned int hash2_nulladdr = 104 unsigned int hash2_nulladdr =
105 udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 105 udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
106 unsigned int hash2_partial = 106 unsigned int hash2_partial =
107 udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0); 107 udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
108 108
109 /* precompute partial secondary hash */ 109 /* precompute partial secondary hash */
@@ -342,14 +342,14 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
342 struct inet_sock *inet = inet_sk(sk); 342 struct inet_sock *inet = inet_sk(sk);
343 struct sk_buff *skb; 343 struct sk_buff *skb;
344 unsigned int ulen, copied; 344 unsigned int ulen, copied;
345 int peeked; 345 int peeked, off = 0;
346 int err; 346 int err;
347 int is_udplite = IS_UDPLITE(sk); 347 int is_udplite = IS_UDPLITE(sk);
348 int is_udp4; 348 int is_udp4;
349 bool slow; 349 bool slow;
350 350
351 if (addr_len) 351 if (addr_len)
352 *addr_len=sizeof(struct sockaddr_in6); 352 *addr_len = sizeof(struct sockaddr_in6);
353 353
354 if (flags & MSG_ERRQUEUE) 354 if (flags & MSG_ERRQUEUE)
355 return ipv6_recv_error(sk, msg, len); 355 return ipv6_recv_error(sk, msg, len);
@@ -359,7 +359,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
359 359
360try_again: 360try_again:
361 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 361 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
362 &peeked, &err); 362 &peeked, &off, &err);
363 if (!skb) 363 if (!skb)
364 goto out; 364 goto out;
365 365
@@ -496,6 +496,28 @@ out:
496 sock_put(sk); 496 sock_put(sk);
497} 497}
498 498
499static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
500{
501 int rc;
502
503 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
504 sock_rps_save_rxhash(sk, skb);
505
506 rc = sock_queue_rcv_skb(sk, skb);
507 if (rc < 0) {
508 int is_udplite = IS_UDPLITE(sk);
509
510 /* Note that an ENOMEM error is charged twice */
511 if (rc == -ENOMEM)
512 UDP6_INC_STATS_BH(sock_net(sk),
513 UDP_MIB_RCVBUFERRORS, is_udplite);
514 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
515 kfree_skb(skb);
516 return -1;
517 }
518 return 0;
519}
520
499static __inline__ void udpv6_err(struct sk_buff *skb, 521static __inline__ void udpv6_err(struct sk_buff *skb,
500 struct inet6_skb_parm *opt, u8 type, 522 struct inet6_skb_parm *opt, u8 type,
501 u8 code, int offset, __be32 info ) 523 u8 code, int offset, __be32 info )
@@ -503,18 +525,54 @@ static __inline__ void udpv6_err(struct sk_buff *skb,
503 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 525 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
504} 526}
505 527
506int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) 528static struct static_key udpv6_encap_needed __read_mostly;
529void udpv6_encap_enable(void)
530{
531 if (!static_key_enabled(&udpv6_encap_needed))
532 static_key_slow_inc(&udpv6_encap_needed);
533}
534EXPORT_SYMBOL(udpv6_encap_enable);
535
536int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
507{ 537{
508 struct udp_sock *up = udp_sk(sk); 538 struct udp_sock *up = udp_sk(sk);
509 int rc; 539 int rc;
510 int is_udplite = IS_UDPLITE(sk); 540 int is_udplite = IS_UDPLITE(sk);
511 541
512 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
513 sock_rps_save_rxhash(sk, skb);
514
515 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 542 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
516 goto drop; 543 goto drop;
517 544
545 if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
546 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
547
548 /*
549 * This is an encapsulation socket so pass the skb to
550 * the socket's udp_encap_rcv() hook. Otherwise, just
551 * fall through and pass this up the UDP socket.
552 * up->encap_rcv() returns the following value:
553 * =0 if skb was successfully passed to the encap
554 * handler or was discarded by it.
555 * >0 if skb should be passed on to UDP.
556 * <0 if skb should be resubmitted as proto -N
557 */
558
559 /* if we're overly short, let UDP handle it */
560 encap_rcv = ACCESS_ONCE(up->encap_rcv);
561 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
562 int ret;
563
564 ret = encap_rcv(sk, skb);
565 if (ret <= 0) {
566 UDP_INC_STATS_BH(sock_net(sk),
567 UDP_MIB_INDATAGRAMS,
568 is_udplite);
569 return -ret;
570 }
571 }
572
573 /* FALLTHROUGH -- it's a UDP Packet */
574 }
575
518 /* 576 /*
519 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 577 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
520 */ 578 */
@@ -539,21 +597,25 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
539 goto drop; 597 goto drop;
540 } 598 }
541 599
600 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
601 goto drop;
602
542 skb_dst_drop(skb); 603 skb_dst_drop(skb);
543 rc = sock_queue_rcv_skb(sk, skb); 604
544 if (rc < 0) { 605 bh_lock_sock(sk);
545 /* Note that an ENOMEM error is charged twice */ 606 rc = 0;
546 if (rc == -ENOMEM) 607 if (!sock_owned_by_user(sk))
547 UDP6_INC_STATS_BH(sock_net(sk), 608 rc = __udpv6_queue_rcv_skb(sk, skb);
548 UDP_MIB_RCVBUFERRORS, is_udplite); 609 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
549 goto drop_no_sk_drops_inc; 610 bh_unlock_sock(sk);
611 goto drop;
550 } 612 }
613 bh_unlock_sock(sk);
551 614
552 return 0; 615 return rc;
553drop: 616drop:
554 atomic_inc(&sk->sk_drops);
555drop_no_sk_drops_inc:
556 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 617 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
618 atomic_inc(&sk->sk_drops);
557 kfree_skb(skb); 619 kfree_skb(skb);
558 return -1; 620 return -1;
559} 621}
@@ -602,37 +664,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
602static void flush_stack(struct sock **stack, unsigned int count, 664static void flush_stack(struct sock **stack, unsigned int count,
603 struct sk_buff *skb, unsigned int final) 665 struct sk_buff *skb, unsigned int final)
604{ 666{
605 unsigned int i; 667 struct sk_buff *skb1 = NULL;
606 struct sock *sk; 668 struct sock *sk;
607 struct sk_buff *skb1; 669 unsigned int i;
608 670
609 for (i = 0; i < count; i++) { 671 for (i = 0; i < count; i++) {
610 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
611
612 sk = stack[i]; 672 sk = stack[i];
613 if (skb1) { 673 if (likely(skb1 == NULL))
614 if (sk_rcvqueues_full(sk, skb1)) { 674 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
615 kfree_skb(skb1); 675 if (!skb1) {
616 goto drop; 676 atomic_inc(&sk->sk_drops);
617 } 677 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
618 bh_lock_sock(sk); 678 IS_UDPLITE(sk));
619 if (!sock_owned_by_user(sk)) 679 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
620 udpv6_queue_rcv_skb(sk, skb1); 680 IS_UDPLITE(sk));
621 else if (sk_add_backlog(sk, skb1)) {
622 kfree_skb(skb1);
623 bh_unlock_sock(sk);
624 goto drop;
625 }
626 bh_unlock_sock(sk);
627 continue;
628 } 681 }
629drop: 682
630 atomic_inc(&sk->sk_drops); 683 if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
631 UDP6_INC_STATS_BH(sock_net(sk), 684 skb1 = NULL;
632 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
633 UDP6_INC_STATS_BH(sock_net(sk),
634 UDP_MIB_INERRORS, IS_UDPLITE(sk));
635 } 685 }
686 if (unlikely(skb1))
687 kfree_skb(skb1);
636} 688}
637/* 689/*
638 * Note: called only from the BH handler context, 690 * Note: called only from the BH handler context,
@@ -772,39 +824,29 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
772 * for sock caches... i'll skip this for now. 824 * for sock caches... i'll skip this for now.
773 */ 825 */
774 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 826 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
827 if (sk != NULL) {
828 int ret = udpv6_queue_rcv_skb(sk, skb);
829 sock_put(sk);
775 830
776 if (sk == NULL) { 831 /* a return value > 0 means to resubmit the input, but
777 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 832 * it wants the return to be -protocol, or 0
778 goto discard; 833 */
779 834 if (ret > 0)
780 if (udp_lib_checksum_complete(skb)) 835 return -ret;
781 goto discard;
782 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
783 proto == IPPROTO_UDPLITE);
784
785 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
786 836
787 kfree_skb(skb);
788 return 0; 837 return 0;
789 } 838 }
790 839
791 /* deliver */ 840 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
792
793 if (sk_rcvqueues_full(sk, skb)) {
794 sock_put(sk);
795 goto discard; 841 goto discard;
796 } 842
797 bh_lock_sock(sk); 843 if (udp_lib_checksum_complete(skb))
798 if (!sock_owned_by_user(sk))
799 udpv6_queue_rcv_skb(sk, skb);
800 else if (sk_add_backlog(sk, skb)) {
801 atomic_inc(&sk->sk_drops);
802 bh_unlock_sock(sk);
803 sock_put(sk);
804 goto discard; 844 goto discard;
805 } 845
806 bh_unlock_sock(sk); 846 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
807 sock_put(sk); 847 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
848
849 kfree_skb(skb);
808 return 0; 850 return 0;
809 851
810short_packet: 852short_packet:
@@ -1130,7 +1172,8 @@ do_udp_sendmsg:
1130 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { 1172 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1131 fl6.flowi6_oif = np->mcast_oif; 1173 fl6.flowi6_oif = np->mcast_oif;
1132 connected = 0; 1174 connected = 0;
1133 } 1175 } else if (!fl6.flowi6_oif)
1176 fl6.flowi6_oif = np->ucast_oif;
1134 1177
1135 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 1178 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1136 1179
@@ -1336,7 +1379,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
1336 * do checksum of UDP packets sent as multiple IP fragments. 1379 * do checksum of UDP packets sent as multiple IP fragments.
1337 */ 1380 */
1338 offset = skb_checksum_start_offset(skb); 1381 offset = skb_checksum_start_offset(skb);
1339 csum = skb_checksum(skb, offset, skb->len- offset, 0); 1382 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1340 offset += skb->csum_offset; 1383 offset += skb->csum_offset;
1341 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 1384 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1342 skb->ip_summed = CHECKSUM_NONE; 1385 skb->ip_summed = CHECKSUM_NONE;
@@ -1470,7 +1513,7 @@ struct proto udpv6_prot = {
1470 .getsockopt = udpv6_getsockopt, 1513 .getsockopt = udpv6_getsockopt,
1471 .sendmsg = udpv6_sendmsg, 1514 .sendmsg = udpv6_sendmsg,
1472 .recvmsg = udpv6_recvmsg, 1515 .recvmsg = udpv6_recvmsg,
1473 .backlog_rcv = udpv6_queue_rcv_skb, 1516 .backlog_rcv = __udpv6_queue_rcv_skb,
1474 .hash = udp_lib_hash, 1517 .hash = udp_lib_hash,
1475 .unhash = udp_lib_unhash, 1518 .unhash = udp_lib_unhash,
1476 .rehash = udp_v6_rehash, 1519 .rehash = udp_v6_rehash,
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 4eeff89c1aaa..8755a3079d0f 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -146,7 +146,7 @@ static int __xfrm6_output(struct sk_buff *skb)
146 return -EMSGSIZE; 146 return -EMSGSIZE;
147 } 147 }
148 148
149 if ((x && x->props.mode == XFRM_MODE_TUNNEL) && 149 if (x->props.mode == XFRM_MODE_TUNNEL &&
150 ((skb->len > mtu && !skb_is_gso(skb)) || 150 ((skb->len > mtu && !skb_is_gso(skb)) ||
151 dst_allfrag(skb_dst(skb)))) { 151 dst_allfrag(skb_dst(skb)))) {
152 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); 152 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8ea65e032733..8625fba96db9 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -334,8 +334,8 @@ int __init xfrm6_init(void)
334 goto out_policy; 334 goto out_policy;
335 335
336#ifdef CONFIG_SYSCTL 336#ifdef CONFIG_SYSCTL
337 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, 337 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6",
338 xfrm6_policy_table); 338 xfrm6_policy_table);
339#endif 339#endif
340out: 340out:
341 return ret; 341 return ret;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4fe1db12d2a3..ee5a7065aacc 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,9 +68,9 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
68 68
69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
70 70
71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) 71static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
72{ 72{
73 unsigned h; 73 unsigned int h;
74 74
75 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]); 75 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
76 h ^= h >> 16; 76 h ^= h >> 16;
@@ -80,7 +80,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
80 return h; 80 return h;
81} 81}
82 82
83static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi) 83static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
84{ 84{
85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
86} 86}
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 9680226640ef..dfd6faaf0ea7 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -983,10 +983,6 @@ static int ipxitf_create(struct ipx_interface_definition *idef)
983 goto out; 983 goto out;
984 984
985 switch (idef->ipx_dlink_type) { 985 switch (idef->ipx_dlink_type) {
986 case IPX_FRAME_TR_8022:
987 printk(KERN_WARNING "IPX frame type 802.2TR is "
988 "obsolete Use 802.2 instead.\n");
989 /* fall through */
990 case IPX_FRAME_8022: 986 case IPX_FRAME_8022:
991 dlink_type = htons(ETH_P_802_2); 987 dlink_type = htons(ETH_P_802_2);
992 datalink = p8022_datalink; 988 datalink = p8022_datalink;
@@ -996,10 +992,7 @@ static int ipxitf_create(struct ipx_interface_definition *idef)
996 dlink_type = htons(ETH_P_IPX); 992 dlink_type = htons(ETH_P_IPX);
997 datalink = pEII_datalink; 993 datalink = pEII_datalink;
998 break; 994 break;
999 } else 995 }
1000 printk(KERN_WARNING "IPX frame type EtherII over "
1001 "token-ring is obsolete. Use SNAP "
1002 "instead.\n");
1003 /* fall through */ 996 /* fall through */
1004 case IPX_FRAME_SNAP: 997 case IPX_FRAME_SNAP:
1005 dlink_type = htons(ETH_P_SNAP); 998 dlink_type = htons(ETH_P_SNAP);
@@ -1275,7 +1268,6 @@ const char *ipx_frame_name(__be16 frame)
1275 case ETH_P_802_2: rc = "802.2"; break; 1268 case ETH_P_802_2: rc = "802.2"; break;
1276 case ETH_P_SNAP: rc = "SNAP"; break; 1269 case ETH_P_SNAP: rc = "SNAP"; break;
1277 case ETH_P_802_3: rc = "802.3"; break; 1270 case ETH_P_802_3: rc = "802.3"; break;
1278 case ETH_P_TR_802_2: rc = "802.2TR"; break;
1279 } 1271 }
1280 1272
1281 return rc; 1273 return rc;
@@ -1909,9 +1901,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1909 (const unsigned short __user *)argp); 1901 (const unsigned short __user *)argp);
1910 break; 1902 break;
1911 case SIOCGSTAMP: 1903 case SIOCGSTAMP:
1912 rc = -EINVAL; 1904 rc = sock_get_timestamp(sk, argp);
1913 if (sk)
1914 rc = sock_get_timestamp(sk, argp);
1915 break; 1905 break;
1916 case SIOCGIFDSTADDR: 1906 case SIOCGIFDSTADDR:
1917 case SIOCSIFDSTADDR: 1907 case SIOCSIFDSTADDR:
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c
index bd6dca00fb85..ad7c03dedaab 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/net/ipx/sysctl_net_ipx.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/sysctl.h> 10#include <linux/sysctl.h>
11#include <net/net_namespace.h>
11 12
12#ifndef CONFIG_SYSCTL 13#ifndef CONFIG_SYSCTL
13#error This file should not be compiled without CONFIG_SYSCTL defined 14#error This file should not be compiled without CONFIG_SYSCTL defined
@@ -27,20 +28,14 @@ static struct ctl_table ipx_table[] = {
27 { }, 28 { },
28}; 29};
29 30
30static struct ctl_path ipx_path[] = {
31 { .procname = "net", },
32 { .procname = "ipx", },
33 { }
34};
35
36static struct ctl_table_header *ipx_table_header; 31static struct ctl_table_header *ipx_table_header;
37 32
38void ipx_register_sysctl(void) 33void ipx_register_sysctl(void)
39{ 34{
40 ipx_table_header = register_sysctl_paths(ipx_path, ipx_table); 35 ipx_table_header = register_net_sysctl(&init_net, "net/ipx", ipx_table);
41} 36}
42 37
43void ipx_unregister_sysctl(void) 38void ipx_unregister_sysctl(void)
44{ 39{
45 unregister_sysctl_table(ipx_table_header); 40 unregister_net_sysctl_table(ipx_table_header);
46} 41}
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 253695d43fd9..6b9d5a0e42f9 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -122,7 +122,6 @@ static int __init ircomm_tty_init(void)
122 return -ENOMEM; 122 return -ENOMEM;
123 } 123 }
124 124
125 driver->owner = THIS_MODULE;
126 driver->driver_name = "ircomm"; 125 driver->driver_name = "ircomm";
127 driver->name = "ircomm"; 126 driver->name = "ircomm";
128 driver->major = IRCOMM_TTY_MAJOR; 127 driver->major = IRCOMM_TTY_MAJOR;
@@ -366,16 +365,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
366static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) 365static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
367{ 366{
368 struct ircomm_tty_cb *self; 367 struct ircomm_tty_cb *self;
369 unsigned int line; 368 unsigned int line = tty->index;
370 unsigned long flags; 369 unsigned long flags;
371 int ret; 370 int ret;
372 371
373 IRDA_DEBUG(2, "%s()\n", __func__ ); 372 IRDA_DEBUG(2, "%s()\n", __func__ );
374 373
375 line = tty->index;
376 if (line >= IRCOMM_TTY_PORTS)
377 return -ENODEV;
378
379 /* Check if instance already exists */ 374 /* Check if instance already exists */
380 self = hashbin_lock_find(ircomm_tty, line, NULL); 375 self = hashbin_lock_find(ircomm_tty, line, NULL);
381 if (!self) { 376 if (!self) {
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index 77c5e6499f8f..d0667d68351d 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -54,7 +54,7 @@
54 */ 54 */
55static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) 55static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
56{ 56{
57 unsigned cflag, cval; 57 unsigned int cflag, cval;
58 int baud; 58 int baud;
59 59
60 IRDA_DEBUG(2, "%s()\n", __func__ ); 60 IRDA_DEBUG(2, "%s()\n", __func__ );
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index ba1a3fc39b5c..42cf1390ce9c 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -37,7 +37,6 @@
37#include <linux/bitops.h> 37#include <linux/bitops.h>
38#include <net/arp.h> 38#include <net/arp.h>
39 39
40#include <asm/system.h>
41#include <asm/byteorder.h> 40#include <asm/byteorder.h>
42 41
43#include <net/irda/irda.h> 42#include <net/irda/irda.h>
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 579617cca125..7ac4d1becbfc 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -40,7 +40,6 @@
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41#include <linux/bitops.h> 41#include <linux/bitops.h>
42 42
43#include <asm/system.h>
44#include <asm/byteorder.h> 43#include <asm/byteorder.h>
45 44
46#include <net/irda/irda.h> 45#include <net/irda/irda.h>
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 8b61cf0d8a69..32dcaac70b0c 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -36,7 +36,6 @@
36#include <linux/bitops.h> 36#include <linux/bitops.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38 38
39#include <asm/system.h>
40#include <asm/byteorder.h> 39#include <asm/byteorder.h>
41 40
42#include <net/irda/irda.h> 41#include <net/irda/irda.h>
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 979ecb2435a7..564eb0b8afa3 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -254,7 +254,7 @@
254#include <linux/init.h> 254#include <linux/init.h>
255 255
256#include <linux/ppp_defs.h> 256#include <linux/ppp_defs.h>
257#include <linux/if_ppp.h> 257#include <linux/ppp-ioctl.h>
258#include <linux/ppp_channel.h> 258#include <linux/ppp_channel.h>
259 259
260#include <net/irda/irda.h> 260#include <net/irda/irda.h>
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2615ffc8e785..de73f6496db5 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -235,12 +235,6 @@ static ctl_table irda_table[] = {
235 { } 235 { }
236}; 236};
237 237
238static struct ctl_path irda_path[] = {
239 { .procname = "net", },
240 { .procname = "irda", },
241 { }
242};
243
244static struct ctl_table_header *irda_table_header; 238static struct ctl_table_header *irda_table_header;
245 239
246/* 240/*
@@ -251,7 +245,7 @@ static struct ctl_table_header *irda_table_header;
251 */ 245 */
252int __init irda_sysctl_register(void) 246int __init irda_sysctl_register(void)
253{ 247{
254 irda_table_header = register_sysctl_paths(irda_path, irda_table); 248 irda_table_header = register_net_sysctl(&init_net, "net/irda", irda_table);
255 if (!irda_table_header) 249 if (!irda_table_header)
256 return -ENOMEM; 250 return -ENOMEM;
257 251
@@ -266,7 +260,7 @@ int __init irda_sysctl_register(void)
266 */ 260 */
267void irda_sysctl_unregister(void) 261void irda_sysctl_unregister(void)
268{ 262{
269 unregister_sysctl_table(irda_table_header); 263 unregister_net_sysctl_table(irda_table_header);
270} 264}
271 265
272 266
diff --git a/net/irda/timer.c b/net/irda/timer.c
index f418cb2ad49c..1d552b3946fc 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -24,7 +24,6 @@
24 * 24 *
25 ********************************************************************/ 25 ********************************************************************/
26 26
27#include <asm/system.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
29 28
30#include <net/irda/timer.h> 29#include <net/irda/timer.h>
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d5c5b8fd1d01..07d7d55a1b93 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -90,6 +90,7 @@ do { \
90 90
91static void iucv_sock_kill(struct sock *sk); 91static void iucv_sock_kill(struct sock *sk);
92static void iucv_sock_close(struct sock *sk); 92static void iucv_sock_close(struct sock *sk);
93static void iucv_sever_path(struct sock *, int);
93 94
94static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 95static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev); 96 struct packet_type *pt, struct net_device *orig_dev);
@@ -130,17 +131,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
130 memcpy(&dst[8], src, 8); 131 memcpy(&dst[8], src, 8);
131} 132}
132 133
133static void iucv_skb_queue_purge(struct sk_buff_head *list)
134{
135 struct sk_buff *skb;
136
137 while ((skb = skb_dequeue(list)) != NULL) {
138 if (skb->dev)
139 dev_put(skb->dev);
140 kfree_skb(skb);
141 }
142}
143
144static int afiucv_pm_prepare(struct device *dev) 134static int afiucv_pm_prepare(struct device *dev)
145{ 135{
146#ifdef CONFIG_PM_DEBUG 136#ifdef CONFIG_PM_DEBUG
@@ -175,17 +165,11 @@ static int afiucv_pm_freeze(struct device *dev)
175 read_lock(&iucv_sk_list.lock); 165 read_lock(&iucv_sk_list.lock);
176 sk_for_each(sk, node, &iucv_sk_list.head) { 166 sk_for_each(sk, node, &iucv_sk_list.head) {
177 iucv = iucv_sk(sk); 167 iucv = iucv_sk(sk);
178 iucv_skb_queue_purge(&iucv->send_skb_q);
179 skb_queue_purge(&iucv->backlog_skb_q);
180 switch (sk->sk_state) { 168 switch (sk->sk_state) {
181 case IUCV_DISCONN: 169 case IUCV_DISCONN:
182 case IUCV_CLOSING: 170 case IUCV_CLOSING:
183 case IUCV_CONNECTED: 171 case IUCV_CONNECTED:
184 if (iucv->path) { 172 iucv_sever_path(sk, 0);
185 err = pr_iucv->path_sever(iucv->path, NULL);
186 iucv_path_free(iucv->path);
187 iucv->path = NULL;
188 }
189 break; 173 break;
190 case IUCV_OPEN: 174 case IUCV_OPEN:
191 case IUCV_BOUND: 175 case IUCV_BOUND:
@@ -194,6 +178,8 @@ static int afiucv_pm_freeze(struct device *dev)
194 default: 178 default:
195 break; 179 break;
196 } 180 }
181 skb_queue_purge(&iucv->send_skb_q);
182 skb_queue_purge(&iucv->backlog_skb_q);
197 } 183 }
198 read_unlock(&iucv_sk_list.lock); 184 read_unlock(&iucv_sk_list.lock);
199 return err; 185 return err;
@@ -338,7 +324,6 @@ static void iucv_sock_wake_msglim(struct sock *sk)
338static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, 324static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
339 struct sk_buff *skb, u8 flags) 325 struct sk_buff *skb, u8 flags)
340{ 326{
341 struct net *net = sock_net(sock);
342 struct iucv_sock *iucv = iucv_sk(sock); 327 struct iucv_sock *iucv = iucv_sk(sock);
343 struct af_iucv_trans_hdr *phs_hdr; 328 struct af_iucv_trans_hdr *phs_hdr;
344 struct sk_buff *nskb; 329 struct sk_buff *nskb;
@@ -375,10 +360,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
375 if (imsg) 360 if (imsg)
376 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 361 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
377 362
378 skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if); 363 skb->dev = iucv->hs_dev;
379 if (!skb->dev) 364 if (!skb->dev)
380 return -ENODEV; 365 return -ENODEV;
381 if (!(skb->dev->flags & IFF_UP)) 366 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
382 return -ENETDOWN; 367 return -ENETDOWN;
383 if (skb->len > skb->dev->mtu) { 368 if (skb->len > skb->dev->mtu) {
384 if (sock->sk_type == SOCK_SEQPACKET) 369 if (sock->sk_type == SOCK_SEQPACKET)
@@ -393,15 +378,14 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
393 return -ENOMEM; 378 return -ENOMEM;
394 skb_queue_tail(&iucv->send_skb_q, nskb); 379 skb_queue_tail(&iucv->send_skb_q, nskb);
395 err = dev_queue_xmit(skb); 380 err = dev_queue_xmit(skb);
396 if (err) { 381 if (net_xmit_eval(err)) {
397 skb_unlink(nskb, &iucv->send_skb_q); 382 skb_unlink(nskb, &iucv->send_skb_q);
398 dev_put(nskb->dev);
399 kfree_skb(nskb); 383 kfree_skb(nskb);
400 } else { 384 } else {
401 atomic_sub(confirm_recv, &iucv->msg_recv); 385 atomic_sub(confirm_recv, &iucv->msg_recv);
402 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 386 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
403 } 387 }
404 return err; 388 return net_xmit_eval(err);
405} 389}
406 390
407static struct sock *__iucv_get_sock_by_name(char *nm) 391static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -419,7 +403,19 @@ static struct sock *__iucv_get_sock_by_name(char *nm)
419static void iucv_sock_destruct(struct sock *sk) 403static void iucv_sock_destruct(struct sock *sk)
420{ 404{
421 skb_queue_purge(&sk->sk_receive_queue); 405 skb_queue_purge(&sk->sk_receive_queue);
422 skb_queue_purge(&sk->sk_write_queue); 406 skb_queue_purge(&sk->sk_error_queue);
407
408 sk_mem_reclaim(sk);
409
410 if (!sock_flag(sk, SOCK_DEAD)) {
411 pr_err("Attempt to release alive iucv socket %p\n", sk);
412 return;
413 }
414
415 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
416 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
417 WARN_ON(sk->sk_wmem_queued);
418 WARN_ON(sk->sk_forward_alloc);
423} 419}
424 420
425/* Cleanup Listen */ 421/* Cleanup Listen */
@@ -447,14 +443,48 @@ static void iucv_sock_kill(struct sock *sk)
447 sock_put(sk); 443 sock_put(sk);
448} 444}
449 445
446/* Terminate an IUCV path */
447static void iucv_sever_path(struct sock *sk, int with_user_data)
448{
449 unsigned char user_data[16];
450 struct iucv_sock *iucv = iucv_sk(sk);
451 struct iucv_path *path = iucv->path;
452
453 if (iucv->path) {
454 iucv->path = NULL;
455 if (with_user_data) {
456 low_nmcpy(user_data, iucv->src_name);
457 high_nmcpy(user_data, iucv->dst_name);
458 ASCEBC(user_data, sizeof(user_data));
459 pr_iucv->path_sever(path, user_data);
460 } else
461 pr_iucv->path_sever(path, NULL);
462 iucv_path_free(path);
463 }
464}
465
466/* Send FIN through an IUCV socket for HIPER transport */
467static int iucv_send_ctrl(struct sock *sk, u8 flags)
468{
469 int err = 0;
470 int blen;
471 struct sk_buff *skb;
472
473 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
474 skb = sock_alloc_send_skb(sk, blen, 1, &err);
475 if (skb) {
476 skb_reserve(skb, blen);
477 err = afiucv_hs_send(NULL, sk, skb, flags);
478 }
479 return err;
480}
481
450/* Close an IUCV socket */ 482/* Close an IUCV socket */
451static void iucv_sock_close(struct sock *sk) 483static void iucv_sock_close(struct sock *sk)
452{ 484{
453 unsigned char user_data[16];
454 struct iucv_sock *iucv = iucv_sk(sk); 485 struct iucv_sock *iucv = iucv_sk(sk);
455 unsigned long timeo; 486 unsigned long timeo;
456 int err, blen; 487 int err = 0;
457 struct sk_buff *skb;
458 488
459 lock_sock(sk); 489 lock_sock(sk);
460 490
@@ -465,14 +495,7 @@ static void iucv_sock_close(struct sock *sk)
465 495
466 case IUCV_CONNECTED: 496 case IUCV_CONNECTED:
467 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 497 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
468 /* send fin */ 498 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
469 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
470 skb = sock_alloc_send_skb(sk, blen, 1, &err);
471 if (skb) {
472 skb_reserve(skb, blen);
473 err = afiucv_hs_send(NULL, sk, skb,
474 AF_IUCV_FLAG_FIN);
475 }
476 sk->sk_state = IUCV_DISCONN; 499 sk->sk_state = IUCV_DISCONN;
477 sk->sk_state_change(sk); 500 sk->sk_state_change(sk);
478 } 501 }
@@ -480,7 +503,7 @@ static void iucv_sock_close(struct sock *sk)
480 sk->sk_state = IUCV_CLOSING; 503 sk->sk_state = IUCV_CLOSING;
481 sk->sk_state_change(sk); 504 sk->sk_state_change(sk);
482 505
483 if (!skb_queue_empty(&iucv->send_skb_q)) { 506 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
484 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 507 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
485 timeo = sk->sk_lingertime; 508 timeo = sk->sk_lingertime;
486 else 509 else
@@ -494,25 +517,20 @@ static void iucv_sock_close(struct sock *sk)
494 sk->sk_state = IUCV_CLOSED; 517 sk->sk_state = IUCV_CLOSED;
495 sk->sk_state_change(sk); 518 sk->sk_state_change(sk);
496 519
497 if (iucv->path) {
498 low_nmcpy(user_data, iucv->src_name);
499 high_nmcpy(user_data, iucv->dst_name);
500 ASCEBC(user_data, sizeof(user_data));
501 pr_iucv->path_sever(iucv->path, user_data);
502 iucv_path_free(iucv->path);
503 iucv->path = NULL;
504 }
505
506 sk->sk_err = ECONNRESET; 520 sk->sk_err = ECONNRESET;
507 sk->sk_state_change(sk); 521 sk->sk_state_change(sk);
508 522
509 iucv_skb_queue_purge(&iucv->send_skb_q); 523 skb_queue_purge(&iucv->send_skb_q);
510 skb_queue_purge(&iucv->backlog_skb_q); 524 skb_queue_purge(&iucv->backlog_skb_q);
511 break;
512 525
513 default: 526 default: /* fall through */
514 /* nothing to do here */ 527 iucv_sever_path(sk, 1);
515 break; 528 }
529
530 if (iucv->hs_dev) {
531 dev_put(iucv->hs_dev);
532 iucv->hs_dev = NULL;
533 sk->sk_bound_dev_if = 0;
516 } 534 }
517 535
518 /* mark socket for deletion by iucv_sock_kill() */ 536 /* mark socket for deletion by iucv_sock_kill() */
@@ -706,7 +724,6 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
706 goto done_unlock; 724 goto done_unlock;
707 725
708 /* Bind the socket */ 726 /* Bind the socket */
709
710 if (pr_iucv) 727 if (pr_iucv)
711 if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) 728 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
712 goto vm_bind; /* VM IUCV transport */ 729 goto vm_bind; /* VM IUCV transport */
@@ -720,6 +737,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
720 memcpy(iucv->src_name, sa->siucv_name, 8); 737 memcpy(iucv->src_name, sa->siucv_name, 8);
721 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); 738 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
722 sk->sk_bound_dev_if = dev->ifindex; 739 sk->sk_bound_dev_if = dev->ifindex;
740 iucv->hs_dev = dev;
741 dev_hold(dev);
723 sk->sk_state = IUCV_BOUND; 742 sk->sk_state = IUCV_BOUND;
724 iucv->transport = AF_IUCV_TRANS_HIPER; 743 iucv->transport = AF_IUCV_TRANS_HIPER;
725 if (!iucv->msglimit) 744 if (!iucv->msglimit)
@@ -780,26 +799,6 @@ static int iucv_sock_autobind(struct sock *sk)
780 return err; 799 return err;
781} 800}
782 801
783static int afiucv_hs_connect(struct socket *sock)
784{
785 struct sock *sk = sock->sk;
786 struct sk_buff *skb;
787 int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
788 int err = 0;
789
790 /* send syn */
791 skb = sock_alloc_send_skb(sk, blen, 1, &err);
792 if (!skb) {
793 err = -ENOMEM;
794 goto done;
795 }
796 skb->dev = NULL;
797 skb_reserve(skb, blen);
798 err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
799done:
800 return err;
801}
802
803static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) 802static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
804{ 803{
805 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 804 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
@@ -880,7 +879,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
880 memcpy(iucv->dst_name, sa->siucv_name, 8); 879 memcpy(iucv->dst_name, sa->siucv_name, 8);
881 880
882 if (iucv->transport == AF_IUCV_TRANS_HIPER) 881 if (iucv->transport == AF_IUCV_TRANS_HIPER)
883 err = afiucv_hs_connect(sock); 882 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
884 else 883 else
885 err = afiucv_path_connect(sock, addr); 884 err = afiucv_path_connect(sock, addr);
886 if (err) 885 if (err)
@@ -894,11 +893,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
894 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) 893 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
895 err = -ECONNREFUSED; 894 err = -ECONNREFUSED;
896 895
897 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) { 896 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
898 pr_iucv->path_sever(iucv->path, NULL); 897 iucv_sever_path(sk, 0);
899 iucv_path_free(iucv->path);
900 iucv->path = NULL;
901 }
902 898
903done: 899done:
904 release_sock(sk); 900 release_sock(sk);
@@ -1124,8 +1120,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1124 noblock, &err); 1120 noblock, &err);
1125 else 1121 else
1126 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1122 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1127 if (!skb) 1123 if (!skb) {
1124 err = -ENOMEM;
1128 goto out; 1125 goto out;
1126 }
1129 if (iucv->transport == AF_IUCV_TRANS_HIPER) 1127 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1130 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); 1128 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1131 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1129 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1148,6 +1146,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1148 /* increment and save iucv message tag for msg_completion cbk */ 1146 /* increment and save iucv message tag for msg_completion cbk */
1149 txmsg.tag = iucv->send_tag++; 1147 txmsg.tag = iucv->send_tag++;
1150 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1148 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1149
1151 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1150 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1152 atomic_inc(&iucv->msg_sent); 1151 atomic_inc(&iucv->msg_sent);
1153 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1152 err = afiucv_hs_send(&txmsg, sk, skb, 0);
@@ -1202,8 +1201,6 @@ release:
1202 return len; 1201 return len;
1203 1202
1204fail: 1203fail:
1205 if (skb->dev)
1206 dev_put(skb->dev);
1207 kfree_skb(skb); 1204 kfree_skb(skb);
1208out: 1205out:
1209 release_sock(sk); 1206 release_sock(sk);
@@ -1332,8 +1329,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1332 struct sock *sk = sock->sk; 1329 struct sock *sk = sock->sk;
1333 struct iucv_sock *iucv = iucv_sk(sk); 1330 struct iucv_sock *iucv = iucv_sk(sk);
1334 unsigned int copied, rlen; 1331 unsigned int copied, rlen;
1335 struct sk_buff *skb, *rskb, *cskb, *sskb; 1332 struct sk_buff *skb, *rskb, *cskb;
1336 int blen;
1337 int err = 0; 1333 int err = 0;
1338 1334
1339 if ((sk->sk_state == IUCV_DISCONN) && 1335 if ((sk->sk_state == IUCV_DISCONN) &&
@@ -1356,6 +1352,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1356 1352
1357 rlen = skb->len; /* real length of skb */ 1353 rlen = skb->len; /* real length of skb */
1358 copied = min_t(unsigned int, rlen, len); 1354 copied = min_t(unsigned int, rlen, len);
1355 if (!rlen)
1356 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1359 1357
1360 cskb = skb; 1358 cskb = skb;
1361 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { 1359 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
@@ -1396,7 +1394,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1396 } 1394 }
1397 1395
1398 kfree_skb(skb); 1396 kfree_skb(skb);
1399 atomic_inc(&iucv->msg_recv); 1397 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1398 atomic_inc(&iucv->msg_recv);
1399 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1400 WARN_ON(1);
1401 iucv_sock_close(sk);
1402 return -EFAULT;
1403 }
1404 }
1400 1405
1401 /* Queue backlog skbs */ 1406 /* Queue backlog skbs */
1402 spin_lock_bh(&iucv->message_q.lock); 1407 spin_lock_bh(&iucv->message_q.lock);
@@ -1415,15 +1420,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1415 iucv_process_message_q(sk); 1420 iucv_process_message_q(sk);
1416 if (atomic_read(&iucv->msg_recv) >= 1421 if (atomic_read(&iucv->msg_recv) >=
1417 iucv->msglimit / 2) { 1422 iucv->msglimit / 2) {
1418 /* send WIN to peer */ 1423 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1419 blen = sizeof(struct af_iucv_trans_hdr) +
1420 ETH_HLEN;
1421 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1422 if (sskb) {
1423 skb_reserve(sskb, blen);
1424 err = afiucv_hs_send(NULL, sk, sskb,
1425 AF_IUCV_FLAG_WIN);
1426 }
1427 if (err) { 1424 if (err) {
1428 sk->sk_state = IUCV_DISCONN; 1425 sk->sk_state = IUCV_DISCONN;
1429 sk->sk_state_change(sk); 1426 sk->sk_state_change(sk);
@@ -1486,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1486 if (sk->sk_state == IUCV_DISCONN) 1483 if (sk->sk_state == IUCV_DISCONN)
1487 mask |= POLLIN; 1484 mask |= POLLIN;
1488 1485
1489 if (sock_writeable(sk)) 1486 if (sock_writeable(sk) && iucv_below_msglim(sk))
1490 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1487 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1491 else 1488 else
1492 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1489 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -1508,42 +1505,47 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1508 1505
1509 lock_sock(sk); 1506 lock_sock(sk);
1510 switch (sk->sk_state) { 1507 switch (sk->sk_state) {
1508 case IUCV_LISTEN:
1511 case IUCV_DISCONN: 1509 case IUCV_DISCONN:
1512 case IUCV_CLOSING: 1510 case IUCV_CLOSING:
1513 case IUCV_CLOSED: 1511 case IUCV_CLOSED:
1514 err = -ENOTCONN; 1512 err = -ENOTCONN;
1515 goto fail; 1513 goto fail;
1516
1517 default: 1514 default:
1518 sk->sk_shutdown |= how;
1519 break; 1515 break;
1520 } 1516 }
1521 1517
1522 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1518 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1523 txmsg.class = 0; 1519 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1524 txmsg.tag = 0; 1520 txmsg.class = 0;
1525 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 1521 txmsg.tag = 0;
1526 0, (void *) iprm_shutdown, 8); 1522 err = pr_iucv->message_send(iucv->path, &txmsg,
1527 if (err) { 1523 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1528 switch (err) { 1524 if (err) {
1529 case 1: 1525 switch (err) {
1530 err = -ENOTCONN; 1526 case 1:
1531 break; 1527 err = -ENOTCONN;
1532 case 2: 1528 break;
1533 err = -ECONNRESET; 1529 case 2:
1534 break; 1530 err = -ECONNRESET;
1535 default: 1531 break;
1536 err = -ENOTCONN; 1532 default:
1537 break; 1533 err = -ENOTCONN;
1534 break;
1535 }
1538 } 1536 }
1539 } 1537 } else
1538 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1540 } 1539 }
1541 1540
1541 sk->sk_shutdown |= how;
1542 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1542 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1543 err = pr_iucv->path_quiesce(iucv->path, NULL); 1543 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1544 if (err) 1544 err = pr_iucv->path_quiesce(iucv->path, NULL);
1545 err = -ENOTCONN; 1545 if (err)
1546 1546 err = -ENOTCONN;
1547/* skb_queue_purge(&sk->sk_receive_queue); */
1548 }
1547 skb_queue_purge(&sk->sk_receive_queue); 1549 skb_queue_purge(&sk->sk_receive_queue);
1548 } 1550 }
1549 1551
@@ -1565,13 +1567,6 @@ static int iucv_sock_release(struct socket *sock)
1565 1567
1566 iucv_sock_close(sk); 1568 iucv_sock_close(sk);
1567 1569
1568 /* Unregister with IUCV base support */
1569 if (iucv_sk(sk)->path) {
1570 pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
1571 iucv_path_free(iucv_sk(sk)->path);
1572 iucv_sk(sk)->path = NULL;
1573 }
1574
1575 sock_orphan(sk); 1570 sock_orphan(sk);
1576 iucv_sock_kill(sk); 1571 iucv_sock_kill(sk);
1577 return err; 1572 return err;
@@ -1633,7 +1628,8 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1633{ 1628{
1634 struct sock *sk = sock->sk; 1629 struct sock *sk = sock->sk;
1635 struct iucv_sock *iucv = iucv_sk(sk); 1630 struct iucv_sock *iucv = iucv_sk(sk);
1636 int val, len; 1631 unsigned int val;
1632 int len;
1637 1633
1638 if (level != SOL_IUCV) 1634 if (level != SOL_IUCV)
1639 return -ENOPROTOOPT; 1635 return -ENOPROTOOPT;
@@ -1656,6 +1652,13 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1656 : iucv->msglimit; /* default */ 1652 : iucv->msglimit; /* default */
1657 release_sock(sk); 1653 release_sock(sk);
1658 break; 1654 break;
1655 case SO_MSGSIZE:
1656 if (sk->sk_state == IUCV_OPEN)
1657 return -EBADFD;
1658 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1659 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1660 0x7fffffff;
1661 break;
1659 default: 1662 default:
1660 return -ENOPROTOOPT; 1663 return -ENOPROTOOPT;
1661 } 1664 }
@@ -1750,8 +1753,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1750 path->msglim = iucv->msglimit; 1753 path->msglim = iucv->msglimit;
1751 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); 1754 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1752 if (err) { 1755 if (err) {
1753 err = pr_iucv->path_sever(path, user_data); 1756 iucv_sever_path(nsk, 1);
1754 iucv_path_free(path);
1755 iucv_sock_kill(nsk); 1757 iucv_sock_kill(nsk);
1756 goto fail; 1758 goto fail;
1757 } 1759 }
@@ -1828,6 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1828 struct sk_buff *list_skb = list->next; 1830 struct sk_buff *list_skb = list->next;
1829 unsigned long flags; 1831 unsigned long flags;
1830 1832
1833 bh_lock_sock(sk);
1831 if (!skb_queue_empty(list)) { 1834 if (!skb_queue_empty(list)) {
1832 spin_lock_irqsave(&list->lock, flags); 1835 spin_lock_irqsave(&list->lock, flags);
1833 1836
@@ -1849,7 +1852,6 @@ static void iucv_callback_txdone(struct iucv_path *path,
1849 iucv_sock_wake_msglim(sk); 1852 iucv_sock_wake_msglim(sk);
1850 } 1853 }
1851 } 1854 }
1852 BUG_ON(!this);
1853 1855
1854 if (sk->sk_state == IUCV_CLOSING) { 1856 if (sk->sk_state == IUCV_CLOSING) {
1855 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 1857 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
@@ -1857,6 +1859,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1857 sk->sk_state_change(sk); 1859 sk->sk_state_change(sk);
1858 } 1860 }
1859 } 1861 }
1862 bh_unlock_sock(sk);
1860 1863
1861} 1864}
1862 1865
@@ -1864,9 +1867,15 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1864{ 1867{
1865 struct sock *sk = path->private; 1868 struct sock *sk = path->private;
1866 1869
1870 if (sk->sk_state == IUCV_CLOSED)
1871 return;
1872
1873 bh_lock_sock(sk);
1874 iucv_sever_path(sk, 1);
1867 sk->sk_state = IUCV_DISCONN; 1875 sk->sk_state = IUCV_DISCONN;
1868 1876
1869 sk->sk_state_change(sk); 1877 sk->sk_state_change(sk);
1878 bh_unlock_sock(sk);
1870} 1879}
1871 1880
1872/* called if the other communication side shuts down its RECV direction; 1881/* called if the other communication side shuts down its RECV direction;
@@ -1954,6 +1963,8 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1954 memcpy(niucv->src_name, iucv->src_name, 8); 1963 memcpy(niucv->src_name, iucv->src_name, 8);
1955 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1964 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1956 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; 1965 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1966 niucv->hs_dev = iucv->hs_dev;
1967 dev_hold(niucv->hs_dev);
1957 afiucv_swap_src_dest(skb); 1968 afiucv_swap_src_dest(skb);
1958 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; 1969 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1959 trans_hdr->window = niucv->msglimit; 1970 trans_hdr->window = niucv->msglimit;
@@ -2022,12 +2033,15 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2022 struct iucv_sock *iucv = iucv_sk(sk); 2033 struct iucv_sock *iucv = iucv_sk(sk);
2023 2034
2024 /* other end of connection closed */ 2035 /* other end of connection closed */
2025 if (iucv) { 2036 if (!iucv)
2026 bh_lock_sock(sk); 2037 goto out;
2038 bh_lock_sock(sk);
2039 if (sk->sk_state == IUCV_CONNECTED) {
2027 sk->sk_state = IUCV_DISCONN; 2040 sk->sk_state = IUCV_DISCONN;
2028 sk->sk_state_change(sk); 2041 sk->sk_state_change(sk);
2029 bh_unlock_sock(sk);
2030 } 2042 }
2043 bh_unlock_sock(sk);
2044out:
2031 kfree_skb(skb); 2045 kfree_skb(skb);
2032 return NET_RX_SUCCESS; 2046 return NET_RX_SUCCESS;
2033} 2047}
@@ -2069,8 +2083,13 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2069 return NET_RX_SUCCESS; 2083 return NET_RX_SUCCESS;
2070 } 2084 }
2071 2085
2086 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2087 kfree_skb(skb);
2088 return NET_RX_SUCCESS;
2089 }
2090
2072 /* write stuff from iucv_msg to skb cb */ 2091 /* write stuff from iucv_msg to skb cb */
2073 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) { 2092 if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
2074 kfree_skb(skb); 2093 kfree_skb(skb);
2075 return NET_RX_SUCCESS; 2094 return NET_RX_SUCCESS;
2076 } 2095 }
@@ -2172,11 +2191,14 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2172 break; 2191 break;
2173 case (AF_IUCV_FLAG_WIN): 2192 case (AF_IUCV_FLAG_WIN):
2174 err = afiucv_hs_callback_win(sk, skb); 2193 err = afiucv_hs_callback_win(sk, skb);
2175 if (skb->len > sizeof(struct af_iucv_trans_hdr)) 2194 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2176 err = afiucv_hs_callback_rx(sk, skb); 2195 kfree_skb(skb);
2177 else 2196 break;
2178 kfree(skb); 2197 }
2179 break; 2198 /* fall through and receive non-zero length data */
2199 case (AF_IUCV_FLAG_SHT):
2200 /* shutdown request */
2201 /* fall through and receive zero length data */
2180 case 0: 2202 case 0:
2181 /* plain data frame */ 2203 /* plain data frame */
2182 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, 2204 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
@@ -2202,65 +2224,64 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2202 struct iucv_sock *iucv = NULL; 2224 struct iucv_sock *iucv = NULL;
2203 struct sk_buff_head *list; 2225 struct sk_buff_head *list;
2204 struct sk_buff *list_skb; 2226 struct sk_buff *list_skb;
2205 struct sk_buff *this = NULL; 2227 struct sk_buff *nskb;
2206 unsigned long flags; 2228 unsigned long flags;
2207 struct hlist_node *node; 2229 struct hlist_node *node;
2208 2230
2209 read_lock(&iucv_sk_list.lock); 2231 read_lock_irqsave(&iucv_sk_list.lock, flags);
2210 sk_for_each(sk, node, &iucv_sk_list.head) 2232 sk_for_each(sk, node, &iucv_sk_list.head)
2211 if (sk == isk) { 2233 if (sk == isk) {
2212 iucv = iucv_sk(sk); 2234 iucv = iucv_sk(sk);
2213 break; 2235 break;
2214 } 2236 }
2215 read_unlock(&iucv_sk_list.lock); 2237 read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2216 2238
2217 if (!iucv) 2239 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2218 return; 2240 return;
2219 2241
2220 bh_lock_sock(sk);
2221 list = &iucv->send_skb_q; 2242 list = &iucv->send_skb_q;
2222 list_skb = list->next; 2243 spin_lock_irqsave(&list->lock, flags);
2223 if (skb_queue_empty(list)) 2244 if (skb_queue_empty(list))
2224 goto out_unlock; 2245 goto out_unlock;
2225 2246 list_skb = list->next;
2226 spin_lock_irqsave(&list->lock, flags); 2247 nskb = list_skb->next;
2227 while (list_skb != (struct sk_buff *)list) { 2248 while (list_skb != (struct sk_buff *)list) {
2228 if (skb_shinfo(list_skb) == skb_shinfo(skb)) { 2249 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2229 this = list_skb;
2230 switch (n) { 2250 switch (n) {
2231 case TX_NOTIFY_OK: 2251 case TX_NOTIFY_OK:
2232 __skb_unlink(this, list); 2252 __skb_unlink(list_skb, list);
2253 kfree_skb(list_skb);
2233 iucv_sock_wake_msglim(sk); 2254 iucv_sock_wake_msglim(sk);
2234 dev_put(this->dev);
2235 kfree_skb(this);
2236 break; 2255 break;
2237 case TX_NOTIFY_PENDING: 2256 case TX_NOTIFY_PENDING:
2238 atomic_inc(&iucv->pendings); 2257 atomic_inc(&iucv->pendings);
2239 break; 2258 break;
2240 case TX_NOTIFY_DELAYED_OK: 2259 case TX_NOTIFY_DELAYED_OK:
2241 __skb_unlink(this, list); 2260 __skb_unlink(list_skb, list);
2242 atomic_dec(&iucv->pendings); 2261 atomic_dec(&iucv->pendings);
2243 if (atomic_read(&iucv->pendings) <= 0) 2262 if (atomic_read(&iucv->pendings) <= 0)
2244 iucv_sock_wake_msglim(sk); 2263 iucv_sock_wake_msglim(sk);
2245 dev_put(this->dev); 2264 kfree_skb(list_skb);
2246 kfree_skb(this);
2247 break; 2265 break;
2248 case TX_NOTIFY_UNREACHABLE: 2266 case TX_NOTIFY_UNREACHABLE:
2249 case TX_NOTIFY_DELAYED_UNREACHABLE: 2267 case TX_NOTIFY_DELAYED_UNREACHABLE:
2250 case TX_NOTIFY_TPQFULL: /* not yet used */ 2268 case TX_NOTIFY_TPQFULL: /* not yet used */
2251 case TX_NOTIFY_GENERALERROR: 2269 case TX_NOTIFY_GENERALERROR:
2252 case TX_NOTIFY_DELAYED_GENERALERROR: 2270 case TX_NOTIFY_DELAYED_GENERALERROR:
2253 __skb_unlink(this, list); 2271 __skb_unlink(list_skb, list);
2254 dev_put(this->dev); 2272 kfree_skb(list_skb);
2255 kfree_skb(this); 2273 if (sk->sk_state == IUCV_CONNECTED) {
2256 sk->sk_state = IUCV_DISCONN; 2274 sk->sk_state = IUCV_DISCONN;
2257 sk->sk_state_change(sk); 2275 sk->sk_state_change(sk);
2276 }
2258 break; 2277 break;
2259 } 2278 }
2260 break; 2279 break;
2261 } 2280 }
2262 list_skb = list_skb->next; 2281 list_skb = nskb;
2282 nskb = nskb->next;
2263 } 2283 }
2284out_unlock:
2264 spin_unlock_irqrestore(&list->lock, flags); 2285 spin_unlock_irqrestore(&list->lock, flags);
2265 2286
2266 if (sk->sk_state == IUCV_CLOSING) { 2287 if (sk->sk_state == IUCV_CLOSING) {
@@ -2270,9 +2291,45 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2270 } 2291 }
2271 } 2292 }
2272 2293
2273out_unlock:
2274 bh_unlock_sock(sk);
2275} 2294}
2295
2296/*
2297 * afiucv_netdev_event: handle netdev notifier chain events
2298 */
2299static int afiucv_netdev_event(struct notifier_block *this,
2300 unsigned long event, void *ptr)
2301{
2302 struct net_device *event_dev = (struct net_device *)ptr;
2303 struct hlist_node *node;
2304 struct sock *sk;
2305 struct iucv_sock *iucv;
2306
2307 switch (event) {
2308 case NETDEV_REBOOT:
2309 case NETDEV_GOING_DOWN:
2310 sk_for_each(sk, node, &iucv_sk_list.head) {
2311 iucv = iucv_sk(sk);
2312 if ((iucv->hs_dev == event_dev) &&
2313 (sk->sk_state == IUCV_CONNECTED)) {
2314 if (event == NETDEV_GOING_DOWN)
2315 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2316 sk->sk_state = IUCV_DISCONN;
2317 sk->sk_state_change(sk);
2318 }
2319 }
2320 break;
2321 case NETDEV_DOWN:
2322 case NETDEV_UNREGISTER:
2323 default:
2324 break;
2325 }
2326 return NOTIFY_DONE;
2327}
2328
2329static struct notifier_block afiucv_netdev_notifier = {
2330 .notifier_call = afiucv_netdev_event,
2331};
2332
2276static const struct proto_ops iucv_sock_ops = { 2333static const struct proto_ops iucv_sock_ops = {
2277 .family = PF_IUCV, 2334 .family = PF_IUCV,
2278 .owner = THIS_MODULE, 2335 .owner = THIS_MODULE,
@@ -2372,7 +2429,8 @@ static int __init afiucv_init(void)
2372 err = afiucv_iucv_init(); 2429 err = afiucv_iucv_init();
2373 if (err) 2430 if (err)
2374 goto out_sock; 2431 goto out_sock;
2375 } 2432 } else
2433 register_netdevice_notifier(&afiucv_netdev_notifier);
2376 dev_add_pack(&iucv_packet_type); 2434 dev_add_pack(&iucv_packet_type);
2377 return 0; 2435 return 0;
2378 2436
@@ -2393,7 +2451,8 @@ static void __exit afiucv_exit(void)
2393 driver_unregister(&af_iucv_driver); 2451 driver_unregister(&af_iucv_driver);
2394 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2452 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2395 symbol_put(iucv_if); 2453 symbol_put(iucv_if);
2396 } 2454 } else
2455 unregister_netdevice_notifier(&afiucv_netdev_notifier);
2397 dev_remove_pack(&iucv_packet_type); 2456 dev_remove_pack(&iucv_packet_type);
2398 sock_unregister(PF_IUCV); 2457 sock_unregister(PF_IUCV);
2399 proto_unregister(&iucv_proto); 2458 proto_unregister(&iucv_proto);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 403be43b793d..3ad1f9db5f8b 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1800,7 +1800,7 @@ static void iucv_work_fn(struct work_struct *work)
1800 * Handles external interrupts coming in from CP. 1800 * Handles external interrupts coming in from CP.
1801 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). 1801 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
1802 */ 1802 */
1803static void iucv_external_interrupt(unsigned int ext_int_code, 1803static void iucv_external_interrupt(struct ext_code ext_code,
1804 unsigned int param32, unsigned long param64) 1804 unsigned int param32, unsigned long param64)
1805{ 1805{
1806 struct iucv_irq_data *p; 1806 struct iucv_irq_data *p;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 11dbb2255ccb..34e418508a67 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1714,7 +1714,7 @@ static int key_notify_sa_flush(const struct km_event *c)
1714static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) 1714static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
1715{ 1715{
1716 struct net *net = sock_net(sk); 1716 struct net *net = sock_net(sk);
1717 unsigned proto; 1717 unsigned int proto;
1718 struct km_event c; 1718 struct km_event c;
1719 struct xfrm_audit audit_info; 1719 struct xfrm_audit audit_info;
1720 int err, err2; 1720 int err, err2;
@@ -3480,7 +3480,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3480 3480
3481 /* Addresses to be used by KM for negotiation, if ext is available */ 3481 /* Addresses to be used by KM for negotiation, if ext is available */
3482 if (k != NULL && (set_sadb_kmaddress(skb, k) < 0)) 3482 if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
3483 return -EINVAL; 3483 goto err;
3484 3484
3485 /* selector src */ 3485 /* selector src */
3486 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel); 3486 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
@@ -3547,7 +3547,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
3547 goto out; 3547 goto out;
3548 3548
3549 err = -EMSGSIZE; 3549 err = -EMSGSIZE;
3550 if ((unsigned)len > sk->sk_sndbuf - 32) 3550 if ((unsigned int)len > sk->sk_sndbuf - 32)
3551 goto out; 3551 goto out;
3552 3552
3553 err = -ENOBUFS; 3553 err = -ENOBUFS;
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
index 110e7bc2de5e..2870f41ea44d 100644
--- a/net/l2tp/Makefile
+++ b/net/l2tp/Makefile
@@ -10,3 +10,6 @@ obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
10obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o 10obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
11obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o 11obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
12obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o 12obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
13ifneq ($(CONFIG_IPV6),)
14obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip6.o
15endif
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 89ff8c67943e..32b2155e7ab4 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -18,6 +18,8 @@
18 * published by the Free Software Foundation. 18 * published by the Free Software Foundation.
19 */ 19 */
20 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
21#include <linux/module.h> 23#include <linux/module.h>
22#include <linux/string.h> 24#include <linux/string.h>
23#include <linux/list.h> 25#include <linux/list.h>
@@ -53,6 +55,10 @@
53#include <net/inet_common.h> 55#include <net/inet_common.h>
54#include <net/xfrm.h> 56#include <net/xfrm.h>
55#include <net/protocol.h> 57#include <net/protocol.h>
58#include <net/inet6_connection_sock.h>
59#include <net/inet_ecn.h>
60#include <net/ip6_route.h>
61#include <net/ip6_checksum.h>
56 62
57#include <asm/byteorder.h> 63#include <asm/byteorder.h>
58#include <linux/atomic.h> 64#include <linux/atomic.h>
@@ -82,12 +88,6 @@
82/* Default trace flags */ 88/* Default trace flags */
83#define L2TP_DEFAULT_DEBUG_FLAGS 0 89#define L2TP_DEFAULT_DEBUG_FLAGS 0
84 90
85#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
86 do { \
87 if ((_mask) & (_type)) \
88 printk(_lvl "L2TP: " _fmt, ##args); \
89 } while (0)
90
91/* Private data stored for received packets in the skb. 91/* Private data stored for received packets in the skb.
92 */ 92 */
93struct l2tp_skb_cb { 93struct l2tp_skb_cb {
@@ -137,14 +137,20 @@ static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
137 l2tp_tunnel_free(tunnel); 137 l2tp_tunnel_free(tunnel);
138} 138}
139#ifdef L2TP_REFCNT_DEBUG 139#ifdef L2TP_REFCNT_DEBUG
140#define l2tp_tunnel_inc_refcount(_t) do { \ 140#define l2tp_tunnel_inc_refcount(_t) \
141 printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ 141do { \
142 l2tp_tunnel_inc_refcount_1(_t); \ 142 pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
143 } while (0) 143 __func__, __LINE__, (_t)->name, \
144#define l2tp_tunnel_dec_refcount(_t) do { \ 144 atomic_read(&_t->ref_count)); \
145 printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ 145 l2tp_tunnel_inc_refcount_1(_t); \
146 l2tp_tunnel_dec_refcount_1(_t); \ 146} while (0)
147 } while (0) 147#define l2tp_tunnel_dec_refcount(_t)
148do { \
149 pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
150 __func__, __LINE__, (_t)->name, \
151 atomic_read(&_t->ref_count)); \
152 l2tp_tunnel_dec_refcount_1(_t); \
153} while (0)
148#else 154#else
149#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) 155#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
150#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) 156#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
@@ -326,16 +332,20 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
326 struct sk_buff *skbp; 332 struct sk_buff *skbp;
327 struct sk_buff *tmp; 333 struct sk_buff *tmp;
328 u32 ns = L2TP_SKB_CB(skb)->ns; 334 u32 ns = L2TP_SKB_CB(skb)->ns;
335 struct l2tp_stats *sstats;
329 336
330 spin_lock_bh(&session->reorder_q.lock); 337 spin_lock_bh(&session->reorder_q.lock);
338 sstats = &session->stats;
331 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 339 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
332 if (L2TP_SKB_CB(skbp)->ns > ns) { 340 if (L2TP_SKB_CB(skbp)->ns > ns) {
333 __skb_queue_before(&session->reorder_q, skbp, skb); 341 __skb_queue_before(&session->reorder_q, skbp, skb);
334 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 342 l2tp_dbg(session, L2TP_MSG_SEQ,
335 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 343 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
336 session->name, ns, L2TP_SKB_CB(skbp)->ns, 344 session->name, ns, L2TP_SKB_CB(skbp)->ns,
337 skb_queue_len(&session->reorder_q)); 345 skb_queue_len(&session->reorder_q));
338 session->stats.rx_oos_packets++; 346 u64_stats_update_begin(&sstats->syncp);
347 sstats->rx_oos_packets++;
348 u64_stats_update_end(&sstats->syncp);
339 goto out; 349 goto out;
340 } 350 }
341 } 351 }
@@ -352,16 +362,23 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
352{ 362{
353 struct l2tp_tunnel *tunnel = session->tunnel; 363 struct l2tp_tunnel *tunnel = session->tunnel;
354 int length = L2TP_SKB_CB(skb)->length; 364 int length = L2TP_SKB_CB(skb)->length;
365 struct l2tp_stats *tstats, *sstats;
355 366
356 /* We're about to requeue the skb, so return resources 367 /* We're about to requeue the skb, so return resources
357 * to its current owner (a socket receive buffer). 368 * to its current owner (a socket receive buffer).
358 */ 369 */
359 skb_orphan(skb); 370 skb_orphan(skb);
360 371
361 tunnel->stats.rx_packets++; 372 tstats = &tunnel->stats;
362 tunnel->stats.rx_bytes += length; 373 u64_stats_update_begin(&tstats->syncp);
363 session->stats.rx_packets++; 374 sstats = &session->stats;
364 session->stats.rx_bytes += length; 375 u64_stats_update_begin(&sstats->syncp);
376 tstats->rx_packets++;
377 tstats->rx_bytes += length;
378 sstats->rx_packets++;
379 sstats->rx_bytes += length;
380 u64_stats_update_end(&tstats->syncp);
381 u64_stats_update_end(&sstats->syncp);
365 382
366 if (L2TP_SKB_CB(skb)->has_seq) { 383 if (L2TP_SKB_CB(skb)->has_seq) {
367 /* Bump our Nr */ 384 /* Bump our Nr */
@@ -371,8 +388,8 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
371 else 388 else
372 session->nr &= 0xffffff; 389 session->nr &= 0xffffff;
373 390
374 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 391 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
375 "%s: updated nr to %hu\n", session->name, session->nr); 392 session->name, session->nr);
376 } 393 }
377 394
378 /* call private receive handler */ 395 /* call private receive handler */
@@ -392,6 +409,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
392{ 409{
393 struct sk_buff *skb; 410 struct sk_buff *skb;
394 struct sk_buff *tmp; 411 struct sk_buff *tmp;
412 struct l2tp_stats *sstats;
395 413
396 /* If the pkt at the head of the queue has the nr that we 414 /* If the pkt at the head of the queue has the nr that we
397 * expect to send up next, dequeue it and any other 415 * expect to send up next, dequeue it and any other
@@ -399,16 +417,19 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
399 */ 417 */
400start: 418start:
401 spin_lock_bh(&session->reorder_q.lock); 419 spin_lock_bh(&session->reorder_q.lock);
420 sstats = &session->stats;
402 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 421 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
403 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 422 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
404 session->stats.rx_seq_discards++; 423 u64_stats_update_begin(&sstats->syncp);
405 session->stats.rx_errors++; 424 sstats->rx_seq_discards++;
406 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 425 sstats->rx_errors++;
407 "%s: oos pkt %u len %d discarded (too old), " 426 u64_stats_update_end(&sstats->syncp);
408 "waiting for %u, reorder_q_len=%d\n", 427 l2tp_dbg(session, L2TP_MSG_SEQ,
409 session->name, L2TP_SKB_CB(skb)->ns, 428 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
410 L2TP_SKB_CB(skb)->length, session->nr, 429 session->name, L2TP_SKB_CB(skb)->ns,
411 skb_queue_len(&session->reorder_q)); 430 L2TP_SKB_CB(skb)->length, session->nr,
431 skb_queue_len(&session->reorder_q));
432 session->reorder_skip = 1;
412 __skb_unlink(skb, &session->reorder_q); 433 __skb_unlink(skb, &session->reorder_q);
413 kfree_skb(skb); 434 kfree_skb(skb);
414 if (session->deref) 435 if (session->deref)
@@ -417,13 +438,20 @@ start:
417 } 438 }
418 439
419 if (L2TP_SKB_CB(skb)->has_seq) { 440 if (L2TP_SKB_CB(skb)->has_seq) {
441 if (session->reorder_skip) {
442 l2tp_dbg(session, L2TP_MSG_SEQ,
443 "%s: advancing nr to next pkt: %u -> %u",
444 session->name, session->nr,
445 L2TP_SKB_CB(skb)->ns);
446 session->reorder_skip = 0;
447 session->nr = L2TP_SKB_CB(skb)->ns;
448 }
420 if (L2TP_SKB_CB(skb)->ns != session->nr) { 449 if (L2TP_SKB_CB(skb)->ns != session->nr) {
421 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 450 l2tp_dbg(session, L2TP_MSG_SEQ,
422 "%s: holding oos pkt %u len %d, " 451 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
423 "waiting for %u, reorder_q_len=%d\n", 452 session->name, L2TP_SKB_CB(skb)->ns,
424 session->name, L2TP_SKB_CB(skb)->ns, 453 L2TP_SKB_CB(skb)->length, session->nr,
425 L2TP_SKB_CB(skb)->length, session->nr, 454 skb_queue_len(&session->reorder_q));
426 skb_queue_len(&session->reorder_q));
427 goto out; 455 goto out;
428 } 456 }
429 } 457 }
@@ -446,21 +474,43 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
446{ 474{
447 struct udphdr *uh = udp_hdr(skb); 475 struct udphdr *uh = udp_hdr(skb);
448 u16 ulen = ntohs(uh->len); 476 u16 ulen = ntohs(uh->len);
449 struct inet_sock *inet;
450 __wsum psum; 477 __wsum psum;
451 478
452 if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check) 479 if (sk->sk_no_check || skb_csum_unnecessary(skb))
453 return 0; 480 return 0;
454 481
455 inet = inet_sk(sk); 482#if IS_ENABLED(CONFIG_IPV6)
456 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen, 483 if (sk->sk_family == PF_INET6) {
457 IPPROTO_UDP, 0); 484 if (!uh->check) {
458 485 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
459 if ((skb->ip_summed == CHECKSUM_COMPLETE) && 486 return 1;
460 !csum_fold(csum_add(psum, skb->csum))) 487 }
461 return 0; 488 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
462 489 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
463 skb->csum = psum; 490 &ipv6_hdr(skb)->daddr, ulen,
491 IPPROTO_UDP, skb->csum)) {
492 skb->ip_summed = CHECKSUM_UNNECESSARY;
493 return 0;
494 }
495 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
496 &ipv6_hdr(skb)->daddr,
497 skb->len, IPPROTO_UDP,
498 0));
499 } else
500#endif
501 {
502 struct inet_sock *inet;
503 if (!uh->check)
504 return 0;
505 inet = inet_sk(sk);
506 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
507 ulen, IPPROTO_UDP, 0);
508
509 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
510 !csum_fold(csum_add(psum, skb->csum)))
511 return 0;
512 skb->csum = psum;
513 }
464 514
465 return __skb_checksum_complete(skb); 515 return __skb_checksum_complete(skb);
466} 516}
@@ -532,6 +582,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
532 struct l2tp_tunnel *tunnel = session->tunnel; 582 struct l2tp_tunnel *tunnel = session->tunnel;
533 int offset; 583 int offset;
534 u32 ns, nr; 584 u32 ns, nr;
585 struct l2tp_stats *sstats = &session->stats;
535 586
536 /* The ref count is increased since we now hold a pointer to 587 /* The ref count is increased since we now hold a pointer to
537 * the session. Take care to decrement the refcnt when exiting 588 * the session. Take care to decrement the refcnt when exiting
@@ -544,10 +595,13 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
544 /* Parse and check optional cookie */ 595 /* Parse and check optional cookie */
545 if (session->peer_cookie_len > 0) { 596 if (session->peer_cookie_len > 0) {
546 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { 597 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
547 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, 598 l2tp_info(tunnel, L2TP_MSG_DATA,
548 "%s: cookie mismatch (%u/%u). Discarding.\n", 599 "%s: cookie mismatch (%u/%u). Discarding.\n",
549 tunnel->name, tunnel->tunnel_id, session->session_id); 600 tunnel->name, tunnel->tunnel_id,
550 session->stats.rx_cookie_discards++; 601 session->session_id);
602 u64_stats_update_begin(&sstats->syncp);
603 sstats->rx_cookie_discards++;
604 u64_stats_update_end(&sstats->syncp);
551 goto discard; 605 goto discard;
552 } 606 }
553 ptr += session->peer_cookie_len; 607 ptr += session->peer_cookie_len;
@@ -573,9 +627,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
573 L2TP_SKB_CB(skb)->ns = ns; 627 L2TP_SKB_CB(skb)->ns = ns;
574 L2TP_SKB_CB(skb)->has_seq = 1; 628 L2TP_SKB_CB(skb)->has_seq = 1;
575 629
576 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 630 l2tp_dbg(session, L2TP_MSG_SEQ,
577 "%s: recv data ns=%u, nr=%u, session nr=%u\n", 631 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
578 session->name, ns, nr, session->nr); 632 session->name, ns, nr, session->nr);
579 } 633 }
580 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { 634 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
581 u32 l2h = ntohl(*(__be32 *) ptr); 635 u32 l2h = ntohl(*(__be32 *) ptr);
@@ -587,9 +641,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
587 L2TP_SKB_CB(skb)->ns = ns; 641 L2TP_SKB_CB(skb)->ns = ns;
588 L2TP_SKB_CB(skb)->has_seq = 1; 642 L2TP_SKB_CB(skb)->has_seq = 1;
589 643
590 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 644 l2tp_dbg(session, L2TP_MSG_SEQ,
591 "%s: recv data ns=%u, session nr=%u\n", 645 "%s: recv data ns=%u, session nr=%u\n",
592 session->name, ns, session->nr); 646 session->name, ns, session->nr);
593 } 647 }
594 } 648 }
595 649
@@ -602,9 +656,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
602 * configure it so. 656 * configure it so.
603 */ 657 */
604 if ((!session->lns_mode) && (!session->send_seq)) { 658 if ((!session->lns_mode) && (!session->send_seq)) {
605 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, 659 l2tp_info(session, L2TP_MSG_SEQ,
606 "%s: requested to enable seq numbers by LNS\n", 660 "%s: requested to enable seq numbers by LNS\n",
607 session->name); 661 session->name);
608 session->send_seq = -1; 662 session->send_seq = -1;
609 l2tp_session_set_header_len(session, tunnel->version); 663 l2tp_session_set_header_len(session, tunnel->version);
610 } 664 }
@@ -613,10 +667,12 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
613 * If user has configured mandatory sequence numbers, discard. 667 * If user has configured mandatory sequence numbers, discard.
614 */ 668 */
615 if (session->recv_seq) { 669 if (session->recv_seq) {
616 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, 670 l2tp_warn(session, L2TP_MSG_SEQ,
617 "%s: recv data has no seq numbers when required. " 671 "%s: recv data has no seq numbers when required. Discarding.\n",
618 "Discarding\n", session->name); 672 session->name);
619 session->stats.rx_seq_discards++; 673 u64_stats_update_begin(&sstats->syncp);
674 sstats->rx_seq_discards++;
675 u64_stats_update_end(&sstats->syncp);
620 goto discard; 676 goto discard;
621 } 677 }
622 678
@@ -626,16 +682,18 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
626 * LAC is broken. Discard the frame. 682 * LAC is broken. Discard the frame.
627 */ 683 */
628 if ((!session->lns_mode) && (session->send_seq)) { 684 if ((!session->lns_mode) && (session->send_seq)) {
629 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, 685 l2tp_info(session, L2TP_MSG_SEQ,
630 "%s: requested to disable seq numbers by LNS\n", 686 "%s: requested to disable seq numbers by LNS\n",
631 session->name); 687 session->name);
632 session->send_seq = 0; 688 session->send_seq = 0;
633 l2tp_session_set_header_len(session, tunnel->version); 689 l2tp_session_set_header_len(session, tunnel->version);
634 } else if (session->send_seq) { 690 } else if (session->send_seq) {
635 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, 691 l2tp_warn(session, L2TP_MSG_SEQ,
636 "%s: recv data has no seq numbers when required. " 692 "%s: recv data has no seq numbers when required. Discarding.\n",
637 "Discarding\n", session->name); 693 session->name);
638 session->stats.rx_seq_discards++; 694 u64_stats_update_begin(&sstats->syncp);
695 sstats->rx_seq_discards++;
696 u64_stats_update_end(&sstats->syncp);
639 goto discard; 697 goto discard;
640 } 698 }
641 } 699 }
@@ -689,13 +747,14 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
689 * packets 747 * packets
690 */ 748 */
691 if (L2TP_SKB_CB(skb)->ns != session->nr) { 749 if (L2TP_SKB_CB(skb)->ns != session->nr) {
692 session->stats.rx_seq_discards++; 750 u64_stats_update_begin(&sstats->syncp);
693 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 751 sstats->rx_seq_discards++;
694 "%s: oos pkt %u len %d discarded, " 752 u64_stats_update_end(&sstats->syncp);
695 "waiting for %u, reorder_q_len=%d\n", 753 l2tp_dbg(session, L2TP_MSG_SEQ,
696 session->name, L2TP_SKB_CB(skb)->ns, 754 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
697 L2TP_SKB_CB(skb)->length, session->nr, 755 session->name, L2TP_SKB_CB(skb)->ns,
698 skb_queue_len(&session->reorder_q)); 756 L2TP_SKB_CB(skb)->length, session->nr,
757 skb_queue_len(&session->reorder_q));
699 goto discard; 758 goto discard;
700 } 759 }
701 skb_queue_tail(&session->reorder_q, skb); 760 skb_queue_tail(&session->reorder_q, skb);
@@ -716,7 +775,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
716 return; 775 return;
717 776
718discard: 777discard:
719 session->stats.rx_errors++; 778 u64_stats_update_begin(&sstats->syncp);
779 sstats->rx_errors++;
780 u64_stats_update_end(&sstats->syncp);
720 kfree_skb(skb); 781 kfree_skb(skb);
721 782
722 if (session->deref) 783 if (session->deref)
@@ -739,9 +800,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
739 unsigned char *ptr, *optr; 800 unsigned char *ptr, *optr;
740 u16 hdrflags; 801 u16 hdrflags;
741 u32 tunnel_id, session_id; 802 u32 tunnel_id, session_id;
742 int offset;
743 u16 version; 803 u16 version;
744 int length; 804 int length;
805 struct l2tp_stats *tstats;
745 806
746 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 807 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
747 goto discard_bad_csum; 808 goto discard_bad_csum;
@@ -751,8 +812,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
751 812
752 /* Short packet? */ 813 /* Short packet? */
753 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { 814 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
754 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, 815 l2tp_info(tunnel, L2TP_MSG_DATA,
755 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); 816 "%s: recv short packet (len=%d)\n",
817 tunnel->name, skb->len);
756 goto error; 818 goto error;
757 } 819 }
758 820
@@ -762,14 +824,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
762 if (!pskb_may_pull(skb, length)) 824 if (!pskb_may_pull(skb, length))
763 goto error; 825 goto error;
764 826
765 printk(KERN_DEBUG "%s: recv: ", tunnel->name); 827 pr_debug("%s: recv\n", tunnel->name);
766 828 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
767 offset = 0;
768 do {
769 printk(" %02X", skb->data[offset]);
770 } while (++offset < length);
771
772 printk("\n");
773 } 829 }
774 830
775 /* Point to L2TP header */ 831 /* Point to L2TP header */
@@ -781,9 +837,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
781 /* Check protocol version */ 837 /* Check protocol version */
782 version = hdrflags & L2TP_HDR_VER_MASK; 838 version = hdrflags & L2TP_HDR_VER_MASK;
783 if (version != tunnel->version) { 839 if (version != tunnel->version) {
784 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, 840 l2tp_info(tunnel, L2TP_MSG_DATA,
785 "%s: recv protocol version mismatch: got %d expected %d\n", 841 "%s: recv protocol version mismatch: got %d expected %d\n",
786 tunnel->name, version, tunnel->version); 842 tunnel->name, version, tunnel->version);
787 goto error; 843 goto error;
788 } 844 }
789 845
@@ -792,8 +848,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
792 848
793 /* If type is control packet, it is handled by userspace. */ 849 /* If type is control packet, it is handled by userspace. */
794 if (hdrflags & L2TP_HDRFLAG_T) { 850 if (hdrflags & L2TP_HDRFLAG_T) {
795 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, 851 l2tp_dbg(tunnel, L2TP_MSG_DATA,
796 "%s: recv control packet, len=%d\n", tunnel->name, length); 852 "%s: recv control packet, len=%d\n",
853 tunnel->name, length);
797 goto error; 854 goto error;
798 } 855 }
799 856
@@ -821,9 +878,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
821 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); 878 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
822 if (!session || !session->recv_skb) { 879 if (!session || !session->recv_skb) {
823 /* Not found? Pass to userspace to deal with */ 880 /* Not found? Pass to userspace to deal with */
824 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, 881 l2tp_info(tunnel, L2TP_MSG_DATA,
825 "%s: no session found (%u/%u). Passing up.\n", 882 "%s: no session found (%u/%u). Passing up.\n",
826 tunnel->name, tunnel_id, session_id); 883 tunnel->name, tunnel_id, session_id);
827 goto error; 884 goto error;
828 } 885 }
829 886
@@ -834,7 +891,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
834discard_bad_csum: 891discard_bad_csum:
835 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); 892 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
836 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); 893 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
837 tunnel->stats.rx_errors++; 894 tstats = &tunnel->stats;
895 u64_stats_update_begin(&tstats->syncp);
896 tstats->rx_errors++;
897 u64_stats_update_end(&tstats->syncp);
838 kfree_skb(skb); 898 kfree_skb(skb);
839 899
840 return 0; 900 return 0;
@@ -860,8 +920,8 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
860 if (tunnel == NULL) 920 if (tunnel == NULL)
861 goto pass_up; 921 goto pass_up;
862 922
863 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, 923 l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
864 "%s: received %d bytes\n", tunnel->name, skb->len); 924 tunnel->name, skb->len);
865 925
866 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) 926 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
867 goto pass_up_put; 927 goto pass_up_put;
@@ -903,8 +963,8 @@ static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
903 *bufp++ = 0; 963 *bufp++ = 0;
904 session->ns++; 964 session->ns++;
905 session->ns &= 0xffff; 965 session->ns &= 0xffff;
906 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 966 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
907 "%s: updated ns to %u\n", session->name, session->ns); 967 session->name, session->ns);
908 } 968 }
909 969
910 return bufp - optr; 970 return bufp - optr;
@@ -940,8 +1000,9 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
940 l2h = 0x40000000 | session->ns; 1000 l2h = 0x40000000 | session->ns;
941 session->ns++; 1001 session->ns++;
942 session->ns &= 0xffffff; 1002 session->ns &= 0xffffff;
943 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 1003 l2tp_dbg(session, L2TP_MSG_SEQ,
944 "%s: updated ns to %u\n", session->name, session->ns); 1004 "%s: updated ns to %u\n",
1005 session->name, session->ns);
945 } 1006 }
946 1007
947 *((__be32 *) bufp) = htonl(l2h); 1008 *((__be32 *) bufp) = htonl(l2h);
@@ -960,46 +1021,50 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
960 struct l2tp_tunnel *tunnel = session->tunnel; 1021 struct l2tp_tunnel *tunnel = session->tunnel;
961 unsigned int len = skb->len; 1022 unsigned int len = skb->len;
962 int error; 1023 int error;
1024 struct l2tp_stats *tstats, *sstats;
963 1025
964 /* Debug */ 1026 /* Debug */
965 if (session->send_seq) 1027 if (session->send_seq)
966 PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, 1028 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
967 "%s: send %Zd bytes, ns=%u\n", session->name, 1029 session->name, data_len, session->ns - 1);
968 data_len, session->ns - 1);
969 else 1030 else
970 PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, 1031 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
971 "%s: send %Zd bytes\n", session->name, data_len); 1032 session->name, data_len);
972 1033
973 if (session->debug & L2TP_MSG_DATA) { 1034 if (session->debug & L2TP_MSG_DATA) {
974 int i;
975 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1035 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
976 unsigned char *datap = skb->data + uhlen; 1036 unsigned char *datap = skb->data + uhlen;
977 1037
978 printk(KERN_DEBUG "%s: xmit:", session->name); 1038 pr_debug("%s: xmit\n", session->name);
979 for (i = 0; i < (len - uhlen); i++) { 1039 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
980 printk(" %02X", *datap++); 1040 datap, min_t(size_t, 32, len - uhlen));
981 if (i == 31) {
982 printk(" ...");
983 break;
984 }
985 }
986 printk("\n");
987 } 1041 }
988 1042
989 /* Queue the packet to IP for output */ 1043 /* Queue the packet to IP for output */
990 skb->local_df = 1; 1044 skb->local_df = 1;
991 error = ip_queue_xmit(skb, fl); 1045#if IS_ENABLED(CONFIG_IPV6)
1046 if (skb->sk->sk_family == PF_INET6)
1047 error = inet6_csk_xmit(skb, NULL);
1048 else
1049#endif
1050 error = ip_queue_xmit(skb, fl);
992 1051
993 /* Update stats */ 1052 /* Update stats */
1053 tstats = &tunnel->stats;
1054 u64_stats_update_begin(&tstats->syncp);
1055 sstats = &session->stats;
1056 u64_stats_update_begin(&sstats->syncp);
994 if (error >= 0) { 1057 if (error >= 0) {
995 tunnel->stats.tx_packets++; 1058 tstats->tx_packets++;
996 tunnel->stats.tx_bytes += len; 1059 tstats->tx_bytes += len;
997 session->stats.tx_packets++; 1060 sstats->tx_packets++;
998 session->stats.tx_bytes += len; 1061 sstats->tx_bytes += len;
999 } else { 1062 } else {
1000 tunnel->stats.tx_errors++; 1063 tstats->tx_errors++;
1001 session->stats.tx_errors++; 1064 sstats->tx_errors++;
1002 } 1065 }
1066 u64_stats_update_end(&tstats->syncp);
1067 u64_stats_update_end(&sstats->syncp);
1003 1068
1004 return 0; 1069 return 0;
1005} 1070}
@@ -1021,6 +1086,31 @@ static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1021 skb->destructor = l2tp_sock_wfree; 1086 skb->destructor = l2tp_sock_wfree;
1022} 1087}
1023 1088
1089#if IS_ENABLED(CONFIG_IPV6)
1090static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
1091 int udp_len)
1092{
1093 struct ipv6_pinfo *np = inet6_sk(sk);
1094 struct udphdr *uh = udp_hdr(skb);
1095
1096 if (!skb_dst(skb) || !skb_dst(skb)->dev ||
1097 !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
1098 __wsum csum = skb_checksum(skb, 0, udp_len, 0);
1099 skb->ip_summed = CHECKSUM_UNNECESSARY;
1100 uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
1101 IPPROTO_UDP, csum);
1102 if (uh->check == 0)
1103 uh->check = CSUM_MANGLED_0;
1104 } else {
1105 skb->ip_summed = CHECKSUM_PARTIAL;
1106 skb->csum_start = skb_transport_header(skb) - skb->head;
1107 skb->csum_offset = offsetof(struct udphdr, check);
1108 uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
1109 udp_len, IPPROTO_UDP, 0);
1110 }
1111}
1112#endif
1113
1024/* If caller requires the skb to have a ppp header, the header must be 1114/* If caller requires the skb to have a ppp header, the header must be
1025 * inserted in the skb data before calling this function. 1115 * inserted in the skb data before calling this function.
1026 */ 1116 */
@@ -1089,6 +1179,11 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1089 uh->check = 0; 1179 uh->check = 0;
1090 1180
1091 /* Calculate UDP checksum if configured to do so */ 1181 /* Calculate UDP checksum if configured to do so */
1182#if IS_ENABLED(CONFIG_IPV6)
1183 if (sk->sk_family == PF_INET6)
1184 l2tp_xmit_ipv6_csum(sk, skb, udp_len);
1185 else
1186#endif
1092 if (sk->sk_no_check == UDP_CSUM_NOXMIT) 1187 if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1093 skb->ip_summed = CHECKSUM_NONE; 1188 skb->ip_summed = CHECKSUM_NONE;
1094 else if ((skb_dst(skb) && skb_dst(skb)->dev) && 1189 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
@@ -1141,8 +1236,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1141 if (tunnel == NULL) 1236 if (tunnel == NULL)
1142 goto end; 1237 goto end;
1143 1238
1144 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, 1239 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1145 "%s: closing...\n", tunnel->name);
1146 1240
1147 /* Close all sessions */ 1241 /* Close all sessions */
1148 l2tp_tunnel_closeall(tunnel); 1242 l2tp_tunnel_closeall(tunnel);
@@ -1184,8 +1278,8 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1184 1278
1185 BUG_ON(tunnel == NULL); 1279 BUG_ON(tunnel == NULL);
1186 1280
1187 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, 1281 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1188 "%s: closing all sessions...\n", tunnel->name); 1282 tunnel->name);
1189 1283
1190 write_lock_bh(&tunnel->hlist_lock); 1284 write_lock_bh(&tunnel->hlist_lock);
1191 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 1285 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
@@ -1193,8 +1287,8 @@ again:
1193 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { 1287 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1194 session = hlist_entry(walk, struct l2tp_session, hlist); 1288 session = hlist_entry(walk, struct l2tp_session, hlist);
1195 1289
1196 PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO, 1290 l2tp_info(session, L2TP_MSG_CONTROL,
1197 "%s: closing session\n", session->name); 1291 "%s: closing session\n", session->name);
1198 1292
1199 hlist_del_init(&session->hlist); 1293 hlist_del_init(&session->hlist);
1200 1294
@@ -1247,8 +1341,7 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1247 BUG_ON(atomic_read(&tunnel->ref_count) != 0); 1341 BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1248 BUG_ON(tunnel->sock != NULL); 1342 BUG_ON(tunnel->sock != NULL);
1249 1343
1250 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, 1344 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1251 "%s: free...\n", tunnel->name);
1252 1345
1253 /* Remove from tunnel list */ 1346 /* Remove from tunnel list */
1254 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1347 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1268,31 +1361,69 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1268{ 1361{
1269 int err = -EINVAL; 1362 int err = -EINVAL;
1270 struct sockaddr_in udp_addr; 1363 struct sockaddr_in udp_addr;
1364#if IS_ENABLED(CONFIG_IPV6)
1365 struct sockaddr_in6 udp6_addr;
1366 struct sockaddr_l2tpip6 ip6_addr;
1367#endif
1271 struct sockaddr_l2tpip ip_addr; 1368 struct sockaddr_l2tpip ip_addr;
1272 struct socket *sock = NULL; 1369 struct socket *sock = NULL;
1273 1370
1274 switch (cfg->encap) { 1371 switch (cfg->encap) {
1275 case L2TP_ENCAPTYPE_UDP: 1372 case L2TP_ENCAPTYPE_UDP:
1276 err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); 1373#if IS_ENABLED(CONFIG_IPV6)
1277 if (err < 0) 1374 if (cfg->local_ip6 && cfg->peer_ip6) {
1278 goto out; 1375 err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp);
1376 if (err < 0)
1377 goto out;
1279 1378
1280 sock = *sockp; 1379 sock = *sockp;
1281 1380
1282 memset(&udp_addr, 0, sizeof(udp_addr)); 1381 memset(&udp6_addr, 0, sizeof(udp6_addr));
1283 udp_addr.sin_family = AF_INET; 1382 udp6_addr.sin6_family = AF_INET6;
1284 udp_addr.sin_addr = cfg->local_ip; 1383 memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1285 udp_addr.sin_port = htons(cfg->local_udp_port); 1384 sizeof(udp6_addr.sin6_addr));
1286 err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr)); 1385 udp6_addr.sin6_port = htons(cfg->local_udp_port);
1287 if (err < 0) 1386 err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
1288 goto out; 1387 sizeof(udp6_addr));
1388 if (err < 0)
1389 goto out;
1289 1390
1290 udp_addr.sin_family = AF_INET; 1391 udp6_addr.sin6_family = AF_INET6;
1291 udp_addr.sin_addr = cfg->peer_ip; 1392 memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
1292 udp_addr.sin_port = htons(cfg->peer_udp_port); 1393 sizeof(udp6_addr.sin6_addr));
1293 err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0); 1394 udp6_addr.sin6_port = htons(cfg->peer_udp_port);
1294 if (err < 0) 1395 err = kernel_connect(sock,
1295 goto out; 1396 (struct sockaddr *) &udp6_addr,
1397 sizeof(udp6_addr), 0);
1398 if (err < 0)
1399 goto out;
1400 } else
1401#endif
1402 {
1403 err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
1404 if (err < 0)
1405 goto out;
1406
1407 sock = *sockp;
1408
1409 memset(&udp_addr, 0, sizeof(udp_addr));
1410 udp_addr.sin_family = AF_INET;
1411 udp_addr.sin_addr = cfg->local_ip;
1412 udp_addr.sin_port = htons(cfg->local_udp_port);
1413 err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
1414 sizeof(udp_addr));
1415 if (err < 0)
1416 goto out;
1417
1418 udp_addr.sin_family = AF_INET;
1419 udp_addr.sin_addr = cfg->peer_ip;
1420 udp_addr.sin_port = htons(cfg->peer_udp_port);
1421 err = kernel_connect(sock,
1422 (struct sockaddr *) &udp_addr,
1423 sizeof(udp_addr), 0);
1424 if (err < 0)
1425 goto out;
1426 }
1296 1427
1297 if (!cfg->use_udp_checksums) 1428 if (!cfg->use_udp_checksums)
1298 sock->sk->sk_no_check = UDP_CSUM_NOXMIT; 1429 sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
@@ -1300,27 +1431,61 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1300 break; 1431 break;
1301 1432
1302 case L2TP_ENCAPTYPE_IP: 1433 case L2TP_ENCAPTYPE_IP:
1303 err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp); 1434#if IS_ENABLED(CONFIG_IPV6)
1304 if (err < 0) 1435 if (cfg->local_ip6 && cfg->peer_ip6) {
1305 goto out; 1436 err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP,
1437 sockp);
1438 if (err < 0)
1439 goto out;
1306 1440
1307 sock = *sockp; 1441 sock = *sockp;
1308 1442
1309 memset(&ip_addr, 0, sizeof(ip_addr)); 1443 memset(&ip6_addr, 0, sizeof(ip6_addr));
1310 ip_addr.l2tp_family = AF_INET; 1444 ip6_addr.l2tp_family = AF_INET6;
1311 ip_addr.l2tp_addr = cfg->local_ip; 1445 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1312 ip_addr.l2tp_conn_id = tunnel_id; 1446 sizeof(ip6_addr.l2tp_addr));
1313 err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr)); 1447 ip6_addr.l2tp_conn_id = tunnel_id;
1314 if (err < 0) 1448 err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1315 goto out; 1449 sizeof(ip6_addr));
1450 if (err < 0)
1451 goto out;
1316 1452
1317 ip_addr.l2tp_family = AF_INET; 1453 ip6_addr.l2tp_family = AF_INET6;
1318 ip_addr.l2tp_addr = cfg->peer_ip; 1454 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1319 ip_addr.l2tp_conn_id = peer_tunnel_id; 1455 sizeof(ip6_addr.l2tp_addr));
1320 err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0); 1456 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1321 if (err < 0) 1457 err = kernel_connect(sock,
1322 goto out; 1458 (struct sockaddr *) &ip6_addr,
1459 sizeof(ip6_addr), 0);
1460 if (err < 0)
1461 goto out;
1462 } else
1463#endif
1464 {
1465 err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP,
1466 sockp);
1467 if (err < 0)
1468 goto out;
1469
1470 sock = *sockp;
1471
1472 memset(&ip_addr, 0, sizeof(ip_addr));
1473 ip_addr.l2tp_family = AF_INET;
1474 ip_addr.l2tp_addr = cfg->local_ip;
1475 ip_addr.l2tp_conn_id = tunnel_id;
1476 err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1477 sizeof(ip_addr));
1478 if (err < 0)
1479 goto out;
1323 1480
1481 ip_addr.l2tp_family = AF_INET;
1482 ip_addr.l2tp_addr = cfg->peer_ip;
1483 ip_addr.l2tp_conn_id = peer_tunnel_id;
1484 err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1485 sizeof(ip_addr), 0);
1486 if (err < 0)
1487 goto out;
1488 }
1324 break; 1489 break;
1325 1490
1326 default: 1491 default:
@@ -1357,7 +1522,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1357 err = -EBADF; 1522 err = -EBADF;
1358 sock = sockfd_lookup(fd, &err); 1523 sock = sockfd_lookup(fd, &err);
1359 if (!sock) { 1524 if (!sock) {
1360 printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n", 1525 pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
1361 tunnel_id, fd, err); 1526 tunnel_id, fd, err);
1362 goto err; 1527 goto err;
1363 } 1528 }
@@ -1373,7 +1538,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1373 case L2TP_ENCAPTYPE_UDP: 1538 case L2TP_ENCAPTYPE_UDP:
1374 err = -EPROTONOSUPPORT; 1539 err = -EPROTONOSUPPORT;
1375 if (sk->sk_protocol != IPPROTO_UDP) { 1540 if (sk->sk_protocol != IPPROTO_UDP) {
1376 printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1541 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1377 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); 1542 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1378 goto err; 1543 goto err;
1379 } 1544 }
@@ -1381,7 +1546,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1381 case L2TP_ENCAPTYPE_IP: 1546 case L2TP_ENCAPTYPE_IP:
1382 err = -EPROTONOSUPPORT; 1547 err = -EPROTONOSUPPORT;
1383 if (sk->sk_protocol != IPPROTO_L2TP) { 1548 if (sk->sk_protocol != IPPROTO_L2TP) {
1384 printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1549 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1385 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); 1550 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1386 goto err; 1551 goto err;
1387 } 1552 }
@@ -1424,6 +1589,12 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1424 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1589 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1425 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; 1590 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1426 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1591 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1592#if IS_ENABLED(CONFIG_IPV6)
1593 if (sk->sk_family == PF_INET6)
1594 udpv6_encap_enable();
1595 else
1596#endif
1597 udp_encap_enable();
1427 } 1598 }
1428 1599
1429 sk->sk_user_data = tunnel; 1600 sk->sk_user_data = tunnel;
@@ -1577,7 +1748,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1577 1748
1578 session->session_id = session_id; 1749 session->session_id = session_id;
1579 session->peer_session_id = peer_session_id; 1750 session->peer_session_id = peer_session_id;
1580 session->nr = 1; 1751 session->nr = 0;
1581 1752
1582 sprintf(&session->name[0], "sess %u/%u", 1753 sprintf(&session->name[0], "sess %u/%u",
1583 tunnel->tunnel_id, session->session_id); 1754 tunnel->tunnel_id, session->session_id);
@@ -1683,7 +1854,7 @@ static int __init l2tp_init(void)
1683 if (rc) 1854 if (rc)
1684 goto out; 1855 goto out;
1685 1856
1686 printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION); 1857 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1687 1858
1688out: 1859out:
1689 return rc; 1860 return rc;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a16a48e79fab..a38ec6cdeee1 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -45,6 +45,7 @@ struct l2tp_stats {
45 u64 rx_oos_packets; 45 u64 rx_oos_packets;
46 u64 rx_errors; 46 u64 rx_errors;
47 u64 rx_cookie_discards; 47 u64 rx_cookie_discards;
48 struct u64_stats_sync syncp;
48}; 49};
49 50
50struct l2tp_tunnel; 51struct l2tp_tunnel;
@@ -54,15 +55,15 @@ struct l2tp_tunnel;
54 */ 55 */
55struct l2tp_session_cfg { 56struct l2tp_session_cfg {
56 enum l2tp_pwtype pw_type; 57 enum l2tp_pwtype pw_type;
57 unsigned data_seq:2; /* data sequencing level 58 unsigned int data_seq:2; /* data sequencing level
58 * 0 => none, 1 => IP only, 59 * 0 => none, 1 => IP only,
59 * 2 => all 60 * 2 => all
60 */ 61 */
61 unsigned recv_seq:1; /* expect receive packets with 62 unsigned int recv_seq:1; /* expect receive packets with
62 * sequence numbers? */ 63 * sequence numbers? */
63 unsigned send_seq:1; /* send packets with sequence 64 unsigned int send_seq:1; /* send packets with sequence
64 * numbers? */ 65 * numbers? */
65 unsigned lns_mode:1; /* behave as LNS? LAC enables 66 unsigned int lns_mode:1; /* behave as LNS? LAC enables
66 * sequence numbers under 67 * sequence numbers under
67 * control of LNS. */ 68 * control of LNS. */
68 int debug; /* bitmask of debug message 69 int debug; /* bitmask of debug message
@@ -107,21 +108,22 @@ struct l2tp_session {
107 108
108 char name[32]; /* for logging */ 109 char name[32]; /* for logging */
109 char ifname[IFNAMSIZ]; 110 char ifname[IFNAMSIZ];
110 unsigned data_seq:2; /* data sequencing level 111 unsigned int data_seq:2; /* data sequencing level
111 * 0 => none, 1 => IP only, 112 * 0 => none, 1 => IP only,
112 * 2 => all 113 * 2 => all
113 */ 114 */
114 unsigned recv_seq:1; /* expect receive packets with 115 unsigned int recv_seq:1; /* expect receive packets with
115 * sequence numbers? */ 116 * sequence numbers? */
116 unsigned send_seq:1; /* send packets with sequence 117 unsigned int send_seq:1; /* send packets with sequence
117 * numbers? */ 118 * numbers? */
118 unsigned lns_mode:1; /* behave as LNS? LAC enables 119 unsigned int lns_mode:1; /* behave as LNS? LAC enables
119 * sequence numbers under 120 * sequence numbers under
120 * control of LNS. */ 121 * control of LNS. */
121 int debug; /* bitmask of debug message 122 int debug; /* bitmask of debug message
122 * categories */ 123 * categories */
123 int reorder_timeout; /* configured reorder timeout 124 int reorder_timeout; /* configured reorder timeout
124 * (in jiffies) */ 125 * (in jiffies) */
126 int reorder_skip; /* set if skip to next nr */
125 int mtu; 127 int mtu;
126 int mru; 128 int mru;
127 enum l2tp_pwtype pwtype; 129 enum l2tp_pwtype pwtype;
@@ -150,6 +152,10 @@ struct l2tp_tunnel_cfg {
150 /* Used only for kernel-created sockets */ 152 /* Used only for kernel-created sockets */
151 struct in_addr local_ip; 153 struct in_addr local_ip;
152 struct in_addr peer_ip; 154 struct in_addr peer_ip;
155#if IS_ENABLED(CONFIG_IPV6)
156 struct in6_addr *local_ip6;
157 struct in6_addr *peer_ip6;
158#endif
153 u16 local_udp_port; 159 u16 local_udp_port;
154 u16 peer_udp_port; 160 u16 peer_udp_port;
155 unsigned int use_udp_checksums:1; 161 unsigned int use_udp_checksums:1;
@@ -255,17 +261,36 @@ static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
255} 261}
256 262
257#ifdef L2TP_REFCNT_DEBUG 263#ifdef L2TP_REFCNT_DEBUG
258#define l2tp_session_inc_refcount(_s) do { \ 264#define l2tp_session_inc_refcount(_s) \
259 printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ 265do { \
260 l2tp_session_inc_refcount_1(_s); \ 266 pr_debug("l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", \
261 } while (0) 267 __func__, __LINE__, (_s)->name, \
262#define l2tp_session_dec_refcount(_s) do { \ 268 atomic_read(&_s->ref_count)); \
263 printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ 269 l2tp_session_inc_refcount_1(_s); \
264 l2tp_session_dec_refcount_1(_s); \ 270} while (0)
265 } while (0) 271#define l2tp_session_dec_refcount(_s) \
272do { \
273 pr_debug("l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", \
274 __func__, __LINE__, (_s)->name, \
275 atomic_read(&_s->ref_count)); \
276 l2tp_session_dec_refcount_1(_s); \
277} while (0)
266#else 278#else
267#define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s) 279#define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s)
268#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s) 280#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
269#endif 281#endif
270 282
283#define l2tp_printk(ptr, type, func, fmt, ...) \
284do { \
285 if (((ptr)->debug) & (type)) \
286 func(fmt, ##__VA_ARGS__); \
287} while (0)
288
289#define l2tp_warn(ptr, type, fmt, ...) \
290 l2tp_printk(ptr, type, pr_warn, fmt, ##__VA_ARGS__)
291#define l2tp_info(ptr, type, fmt, ...) \
292 l2tp_printk(ptr, type, pr_info, fmt, ##__VA_ARGS__)
293#define l2tp_dbg(ptr, type, fmt, ...) \
294 l2tp_printk(ptr, type, pr_debug, fmt, ##__VA_ARGS__)
295
271#endif /* _L2TP_CORE_H_ */ 296#endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 76130134bfa6..c3813bc84552 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/skbuff.h> 15#include <linux/skbuff.h>
14#include <linux/socket.h> 16#include <linux/socket.h>
@@ -122,6 +124,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
122 seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id); 124 seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
123 if (tunnel->sock) { 125 if (tunnel->sock) {
124 struct inet_sock *inet = inet_sk(tunnel->sock); 126 struct inet_sock *inet = inet_sk(tunnel->sock);
127
128#if IS_ENABLED(CONFIG_IPV6)
129 if (tunnel->sock->sk_family == AF_INET6) {
130 struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
131 seq_printf(m, " from %pI6c to %pI6c\n",
132 &np->saddr, &np->daddr);
133 } else
134#endif
125 seq_printf(m, " from %pI4 to %pI4\n", 135 seq_printf(m, " from %pI4 to %pI4\n",
126 &inet->inet_saddr, &inet->inet_daddr); 136 &inet->inet_saddr, &inet->inet_daddr);
127 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) 137 if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
@@ -317,11 +327,11 @@ static int __init l2tp_debugfs_init(void)
317 if (tunnels == NULL) 327 if (tunnels == NULL)
318 rc = -EIO; 328 rc = -EIO;
319 329
320 printk(KERN_INFO "L2TP debugfs support\n"); 330 pr_info("L2TP debugfs support\n");
321 331
322out: 332out:
323 if (rc) 333 if (rc)
324 printk(KERN_WARNING "l2tp debugfs: unable to init\n"); 334 pr_warn("unable to init\n");
325 335
326 return rc; 336 return rc;
327} 337}
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index d2726a74597d..443591d629ca 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/skbuff.h> 15#include <linux/skbuff.h>
14#include <linux/socket.h> 16#include <linux/socket.h>
@@ -64,7 +66,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
64 struct l2tp_eth *priv = netdev_priv(dev); 66 struct l2tp_eth *priv = netdev_priv(dev);
65 67
66 priv->dev = dev; 68 priv->dev = dev;
67 random_ether_addr(dev->dev_addr); 69 eth_hw_addr_random(dev);
68 memset(&dev->broadcast[0], 0xff, 6); 70 memset(&dev->broadcast[0], 0xff, 6);
69 71
70 return 0; 72 return 0;
@@ -115,21 +117,14 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
115 117
116 if (session->debug & L2TP_MSG_DATA) { 118 if (session->debug & L2TP_MSG_DATA) {
117 unsigned int length; 119 unsigned int length;
118 int offset;
119 u8 *ptr = skb->data; 120 u8 *ptr = skb->data;
120 121
121 length = min(32u, skb->len); 122 length = min(32u, skb->len);
122 if (!pskb_may_pull(skb, length)) 123 if (!pskb_may_pull(skb, length))
123 goto error; 124 goto error;
124 125
125 printk(KERN_DEBUG "%s: eth recv: ", session->name); 126 pr_debug("%s: eth recv\n", session->name);
126 127 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
127 offset = 0;
128 do {
129 printk(" %02X", ptr[offset]);
130 } while (++offset < length);
131
132 printk("\n");
133 } 128 }
134 129
135 if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) 130 if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -308,7 +303,7 @@ static int __init l2tp_eth_init(void)
308 if (err) 303 if (err)
309 goto out_unreg; 304 goto out_unreg;
310 305
311 printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); 306 pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
312 307
313 return 0; 308 return 0;
314 309
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 55670ec3cd0f..70614e7affab 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -9,6 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/icmp.h> 14#include <linux/icmp.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
@@ -32,15 +34,8 @@ struct l2tp_ip_sock {
32 /* inet_sock has to be the first member of l2tp_ip_sock */ 34 /* inet_sock has to be the first member of l2tp_ip_sock */
33 struct inet_sock inet; 35 struct inet_sock inet;
34 36
35 __u32 conn_id; 37 u32 conn_id;
36 __u32 peer_conn_id; 38 u32 peer_conn_id;
37
38 __u64 tx_packets;
39 __u64 tx_bytes;
40 __u64 tx_errors;
41 __u64 rx_packets;
42 __u64 rx_bytes;
43 __u64 rx_errors;
44}; 39};
45 40
46static DEFINE_RWLOCK(l2tp_ip_lock); 41static DEFINE_RWLOCK(l2tp_ip_lock);
@@ -127,7 +122,6 @@ static int l2tp_ip_recv(struct sk_buff *skb)
127 struct l2tp_session *session; 122 struct l2tp_session *session;
128 struct l2tp_tunnel *tunnel = NULL; 123 struct l2tp_tunnel *tunnel = NULL;
129 int length; 124 int length;
130 int offset;
131 125
132 /* Point to L2TP header */ 126 /* Point to L2TP header */
133 optr = ptr = skb->data; 127 optr = ptr = skb->data;
@@ -162,14 +156,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
162 if (!pskb_may_pull(skb, length)) 156 if (!pskb_may_pull(skb, length))
163 goto discard; 157 goto discard;
164 158
165 printk(KERN_DEBUG "%s: ip recv: ", tunnel->name); 159 pr_debug("%s: ip recv\n", tunnel->name);
166 160 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
167 offset = 0;
168 do {
169 printk(" %02X", ptr[offset]);
170 } while (++offset < length);
171
172 printk("\n");
173 } 161 }
174 162
175 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); 163 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
@@ -232,7 +220,7 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
232{ 220{
233 write_lock_bh(&l2tp_ip_lock); 221 write_lock_bh(&l2tp_ip_lock);
234 hlist_del_init(&sk->sk_bind_node); 222 hlist_del_init(&sk->sk_bind_node);
235 hlist_del_init(&sk->sk_node); 223 sk_del_node_init(sk);
236 write_unlock_bh(&l2tp_ip_lock); 224 write_unlock_bh(&l2tp_ip_lock);
237 sk_common_release(sk); 225 sk_common_release(sk);
238} 226}
@@ -251,9 +239,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
251{ 239{
252 struct inet_sock *inet = inet_sk(sk); 240 struct inet_sock *inet = inet_sk(sk);
253 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; 241 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
254 int ret = -EINVAL; 242 int ret;
255 int chk_addr_ret; 243 int chk_addr_ret;
256 244
245 if (!sock_flag(sk, SOCK_ZAPPED))
246 return -EINVAL;
247 if (addr_len < sizeof(struct sockaddr_l2tpip))
248 return -EINVAL;
249 if (addr->l2tp_family != AF_INET)
250 return -EINVAL;
251
257 ret = -EADDRINUSE; 252 ret = -EADDRINUSE;
258 read_lock_bh(&l2tp_ip_lock); 253 read_lock_bh(&l2tp_ip_lock);
259 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) 254 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -271,7 +266,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
271 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 266 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
272 goto out; 267 goto out;
273 268
274 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 269 if (addr->l2tp_addr.s_addr)
270 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
275 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 271 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
276 inet->inet_saddr = 0; /* Use device */ 272 inet->inet_saddr = 0; /* Use device */
277 sk_dst_reset(sk); 273 sk_dst_reset(sk);
@@ -283,6 +279,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
283 sk_del_node_init(sk); 279 sk_del_node_init(sk);
284 write_unlock_bh(&l2tp_ip_lock); 280 write_unlock_bh(&l2tp_ip_lock);
285 ret = 0; 281 ret = 0;
282 sock_reset_flag(sk, SOCK_ZAPPED);
283
286out: 284out:
287 release_sock(sk); 285 release_sock(sk);
288 286
@@ -297,72 +295,42 @@ out_in_use:
297static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 295static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
298{ 296{
299 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 297 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
300 struct inet_sock *inet = inet_sk(sk); 298 int rc;
301 struct flowi4 *fl4;
302 struct rtable *rt;
303 __be32 saddr;
304 int oif, rc;
305
306 rc = -EINVAL;
307 if (addr_len < sizeof(*lsa))
308 goto out;
309
310 rc = -EAFNOSUPPORT;
311 if (lsa->l2tp_family != AF_INET)
312 goto out;
313
314 lock_sock(sk);
315 299
316 sk_dst_reset(sk); 300 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
301 return -EINVAL;
317 302
318 oif = sk->sk_bound_dev_if; 303 if (addr_len < sizeof(*lsa))
319 saddr = inet->inet_saddr; 304 return -EINVAL;
320 305
321 rc = -EINVAL;
322 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 306 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
323 goto out; 307 return -EINVAL;
324 308
325 fl4 = &inet->cork.fl.u.ip4; 309 rc = ip4_datagram_connect(sk, uaddr, addr_len);
326 rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr, 310 if (rc < 0)
327 RT_CONN_FLAGS(sk), oif, 311 return rc;
328 IPPROTO_L2TP,
329 0, 0, sk, true);
330 if (IS_ERR(rt)) {
331 rc = PTR_ERR(rt);
332 if (rc == -ENETUNREACH)
333 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
334 goto out;
335 }
336 312
337 rc = -ENETUNREACH; 313 lock_sock(sk);
338 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
339 ip_rt_put(rt);
340 goto out;
341 }
342 314
343 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 315 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
344 316
345 if (!inet->inet_saddr)
346 inet->inet_saddr = fl4->saddr;
347 if (!inet->inet_rcv_saddr)
348 inet->inet_rcv_saddr = fl4->saddr;
349 inet->inet_daddr = fl4->daddr;
350 sk->sk_state = TCP_ESTABLISHED;
351 inet->inet_id = jiffies;
352
353 sk_dst_set(sk, &rt->dst);
354
355 write_lock_bh(&l2tp_ip_lock); 317 write_lock_bh(&l2tp_ip_lock);
356 hlist_del_init(&sk->sk_bind_node); 318 hlist_del_init(&sk->sk_bind_node);
357 sk_add_bind_node(sk, &l2tp_ip_bind_table); 319 sk_add_bind_node(sk, &l2tp_ip_bind_table);
358 write_unlock_bh(&l2tp_ip_lock); 320 write_unlock_bh(&l2tp_ip_lock);
359 321
360 rc = 0;
361out:
362 release_sock(sk); 322 release_sock(sk);
363 return rc; 323 return rc;
364} 324}
365 325
326static int l2tp_ip_disconnect(struct sock *sk, int flags)
327{
328 if (sock_flag(sk, SOCK_ZAPPED))
329 return 0;
330
331 return udp_disconnect(sk, flags);
332}
333
366static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, 334static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
367 int *uaddr_len, int peer) 335 int *uaddr_len, int peer)
368{ 336{
@@ -413,7 +381,6 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
413{ 381{
414 struct sk_buff *skb; 382 struct sk_buff *skb;
415 int rc; 383 int rc;
416 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
417 struct inet_sock *inet = inet_sk(sk); 384 struct inet_sock *inet = inet_sk(sk);
418 struct rtable *rt = NULL; 385 struct rtable *rt = NULL;
419 struct flowi4 *fl4; 386 struct flowi4 *fl4;
@@ -441,8 +408,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
441 408
442 daddr = lip->l2tp_addr.s_addr; 409 daddr = lip->l2tp_addr.s_addr;
443 } else { 410 } else {
411 rc = -EDESTADDRREQ;
444 if (sk->sk_state != TCP_ESTABLISHED) 412 if (sk->sk_state != TCP_ESTABLISHED)
445 return -EDESTADDRREQ; 413 goto out;
446 414
447 daddr = inet->inet_daddr; 415 daddr = inet->inet_daddr;
448 connected = 1; 416 connected = 1;
@@ -512,14 +480,8 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
512 rcu_read_unlock(); 480 rcu_read_unlock();
513 481
514error: 482error:
515 /* Update stats */ 483 if (rc >= 0)
516 if (rc >= 0) {
517 lsa->tx_packets++;
518 lsa->tx_bytes += len;
519 rc = len; 484 rc = len;
520 } else {
521 lsa->tx_errors++;
522 }
523 485
524out: 486out:
525 release_sock(sk); 487 release_sock(sk);
@@ -537,7 +499,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
537 size_t len, int noblock, int flags, int *addr_len) 499 size_t len, int noblock, int flags, int *addr_len)
538{ 500{
539 struct inet_sock *inet = inet_sk(sk); 501 struct inet_sock *inet = inet_sk(sk);
540 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
541 size_t copied = 0; 502 size_t copied = 0;
542 int err = -EOPNOTSUPP; 503 int err = -EOPNOTSUPP;
543 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 504 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
@@ -579,15 +540,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
579done: 540done:
580 skb_free_datagram(sk, skb); 541 skb_free_datagram(sk, skb);
581out: 542out:
582 if (err) { 543 return err ? err : copied;
583 lsk->rx_errors++;
584 return err;
585 }
586
587 lsk->rx_packets++;
588 lsk->rx_bytes += copied;
589
590 return copied;
591} 544}
592 545
593static struct proto l2tp_ip_prot = { 546static struct proto l2tp_ip_prot = {
@@ -597,7 +550,7 @@ static struct proto l2tp_ip_prot = {
597 .close = l2tp_ip_close, 550 .close = l2tp_ip_close,
598 .bind = l2tp_ip_bind, 551 .bind = l2tp_ip_bind,
599 .connect = l2tp_ip_connect, 552 .connect = l2tp_ip_connect,
600 .disconnect = udp_disconnect, 553 .disconnect = l2tp_ip_disconnect,
601 .ioctl = udp_ioctl, 554 .ioctl = udp_ioctl,
602 .destroy = l2tp_ip_destroy_sock, 555 .destroy = l2tp_ip_destroy_sock,
603 .setsockopt = ip_setsockopt, 556 .setsockopt = ip_setsockopt,
@@ -655,7 +608,7 @@ static int __init l2tp_ip_init(void)
655{ 608{
656 int err; 609 int err;
657 610
658 printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n"); 611 pr_info("L2TP IP encapsulation support (L2TPv3)\n");
659 612
660 err = proto_register(&l2tp_ip_prot, 1); 613 err = proto_register(&l2tp_ip_prot, 1);
661 if (err != 0) 614 if (err != 0)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
new file mode 100644
index 000000000000..35e1e4bde587
--- /dev/null
+++ b/net/l2tp/l2tp_ip6.c
@@ -0,0 +1,803 @@
1/*
2 * L2TPv3 IP encapsulation support for IPv6
3 *
4 * Copyright (c) 2012 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/icmp.h>
15#include <linux/module.h>
16#include <linux/skbuff.h>
17#include <linux/random.h>
18#include <linux/socket.h>
19#include <linux/l2tp.h>
20#include <linux/in.h>
21#include <linux/in6.h>
22#include <net/sock.h>
23#include <net/ip.h>
24#include <net/icmp.h>
25#include <net/udp.h>
26#include <net/inet_common.h>
27#include <net/inet_hashtables.h>
28#include <net/tcp_states.h>
29#include <net/protocol.h>
30#include <net/xfrm.h>
31
32#include <net/transp_v6.h>
33#include <net/addrconf.h>
34#include <net/ip6_route.h>
35
36#include "l2tp_core.h"
37
38struct l2tp_ip6_sock {
39 /* inet_sock has to be the first member of l2tp_ip6_sock */
40 struct inet_sock inet;
41
42 u32 conn_id;
43 u32 peer_conn_id;
44
45 /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
46 inet6_sk_generic */
47 struct ipv6_pinfo inet6;
48};
49
50static DEFINE_RWLOCK(l2tp_ip6_lock);
51static struct hlist_head l2tp_ip6_table;
52static struct hlist_head l2tp_ip6_bind_table;
53
54static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
55{
56 return (struct l2tp_ip6_sock *)sk;
57}
58
59static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
60 struct in6_addr *laddr,
61 int dif, u32 tunnel_id)
62{
63 struct hlist_node *node;
64 struct sock *sk;
65
66 sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) {
67 struct in6_addr *addr = inet6_rcv_saddr(sk);
68 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
69
70 if (l2tp == NULL)
71 continue;
72
73 if ((l2tp->conn_id == tunnel_id) &&
74 net_eq(sock_net(sk), net) &&
75 !(addr && ipv6_addr_equal(addr, laddr)) &&
76 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
77 goto found;
78 }
79
80 sk = NULL;
81found:
82 return sk;
83}
84
85static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
86 struct in6_addr *laddr,
87 int dif, u32 tunnel_id)
88{
89 struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
90 if (sk)
91 sock_hold(sk);
92
93 return sk;
94}
95
96/* When processing receive frames, there are two cases to
97 * consider. Data frames consist of a non-zero session-id and an
98 * optional cookie. Control frames consist of a regular L2TP header
99 * preceded by 32-bits of zeros.
100 *
101 * L2TPv3 Session Header Over IP
102 *
103 * 0 1 2 3
104 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * | Session ID |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 * | Cookie (optional, maximum 64 bits)...
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 * |
111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 *
113 * L2TPv3 Control Message Header Over IP
114 *
115 * 0 1 2 3
116 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
117 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118 * | (32 bits of zeros) |
119 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
120 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
121 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
122 * | Control Connection ID |
123 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
124 * | Ns | Nr |
125 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
126 *
127 * All control frames are passed to userspace.
128 */
129static int l2tp_ip6_recv(struct sk_buff *skb)
130{
131 struct sock *sk;
132 u32 session_id;
133 u32 tunnel_id;
134 unsigned char *ptr, *optr;
135 struct l2tp_session *session;
136 struct l2tp_tunnel *tunnel = NULL;
137 int length;
138
139 /* Point to L2TP header */
140 optr = ptr = skb->data;
141
142 if (!pskb_may_pull(skb, 4))
143 goto discard;
144
145 session_id = ntohl(*((__be32 *) ptr));
146 ptr += 4;
147
148 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
149 * the session_id. If it is 0, the packet is a L2TP control
150 * frame and the session_id value can be discarded.
151 */
152 if (session_id == 0) {
153 __skb_pull(skb, 4);
154 goto pass_up;
155 }
156
157 /* Ok, this is a data packet. Lookup the session. */
158 session = l2tp_session_find(&init_net, NULL, session_id);
159 if (session == NULL)
160 goto discard;
161
162 tunnel = session->tunnel;
163 if (tunnel == NULL)
164 goto discard;
165
166 /* Trace packet contents, if enabled */
167 if (tunnel->debug & L2TP_MSG_DATA) {
168 length = min(32u, skb->len);
169 if (!pskb_may_pull(skb, length))
170 goto discard;
171
172 pr_debug("%s: ip recv\n", tunnel->name);
173 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
174 }
175
176 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
177 tunnel->recv_payload_hook);
178 return 0;
179
180pass_up:
181 /* Get the tunnel_id from the L2TP header */
182 if (!pskb_may_pull(skb, 12))
183 goto discard;
184
185 if ((skb->data[0] & 0xc0) != 0xc0)
186 goto discard;
187
188 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
189 tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
190 if (tunnel != NULL)
191 sk = tunnel->sock;
192 else {
193 struct ipv6hdr *iph = ipv6_hdr(skb);
194
195 read_lock_bh(&l2tp_ip6_lock);
196 sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
197 0, tunnel_id);
198 read_unlock_bh(&l2tp_ip6_lock);
199 }
200
201 if (sk == NULL)
202 goto discard;
203
204 sock_hold(sk);
205
206 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
207 goto discard_put;
208
209 nf_reset(skb);
210
211 return sk_receive_skb(sk, skb, 1);
212
213discard_put:
214 sock_put(sk);
215
216discard:
217 kfree_skb(skb);
218 return 0;
219}
220
221static int l2tp_ip6_open(struct sock *sk)
222{
223 /* Prevent autobind. We don't have ports. */
224 inet_sk(sk)->inet_num = IPPROTO_L2TP;
225
226 write_lock_bh(&l2tp_ip6_lock);
227 sk_add_node(sk, &l2tp_ip6_table);
228 write_unlock_bh(&l2tp_ip6_lock);
229
230 return 0;
231}
232
233static void l2tp_ip6_close(struct sock *sk, long timeout)
234{
235 write_lock_bh(&l2tp_ip6_lock);
236 hlist_del_init(&sk->sk_bind_node);
237 sk_del_node_init(sk);
238 write_unlock_bh(&l2tp_ip6_lock);
239
240 sk_common_release(sk);
241}
242
243static void l2tp_ip6_destroy_sock(struct sock *sk)
244{
245 lock_sock(sk);
246 ip6_flush_pending_frames(sk);
247 release_sock(sk);
248
249 inet6_destroy_sock(sk);
250}
251
252static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
253{
254 struct inet_sock *inet = inet_sk(sk);
255 struct ipv6_pinfo *np = inet6_sk(sk);
256 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
257 __be32 v4addr = 0;
258 int addr_type;
259 int err;
260
261 if (!sock_flag(sk, SOCK_ZAPPED))
262 return -EINVAL;
263 if (addr->l2tp_family != AF_INET6)
264 return -EINVAL;
265 if (addr_len < sizeof(*addr))
266 return -EINVAL;
267
268 addr_type = ipv6_addr_type(&addr->l2tp_addr);
269
270 /* l2tp_ip6 sockets are IPv6 only */
271 if (addr_type == IPV6_ADDR_MAPPED)
272 return -EADDRNOTAVAIL;
273
274 /* L2TP is point-point, not multicast */
275 if (addr_type & IPV6_ADDR_MULTICAST)
276 return -EADDRNOTAVAIL;
277
278 err = -EADDRINUSE;
279 read_lock_bh(&l2tp_ip6_lock);
280 if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
281 sk->sk_bound_dev_if, addr->l2tp_conn_id))
282 goto out_in_use;
283 read_unlock_bh(&l2tp_ip6_lock);
284
285 lock_sock(sk);
286
287 err = -EINVAL;
288 if (sk->sk_state != TCP_CLOSE)
289 goto out_unlock;
290
291 /* Check if the address belongs to the host. */
292 rcu_read_lock();
293 if (addr_type != IPV6_ADDR_ANY) {
294 struct net_device *dev = NULL;
295
296 if (addr_type & IPV6_ADDR_LINKLOCAL) {
297 if (addr_len >= sizeof(struct sockaddr_in6) &&
298 addr->l2tp_scope_id) {
299 /* Override any existing binding, if another
300 * one is supplied by user.
301 */
302 sk->sk_bound_dev_if = addr->l2tp_scope_id;
303 }
304
305 /* Binding to link-local address requires an
306 interface */
307 if (!sk->sk_bound_dev_if)
308 goto out_unlock_rcu;
309
310 err = -ENODEV;
311 dev = dev_get_by_index_rcu(sock_net(sk),
312 sk->sk_bound_dev_if);
313 if (!dev)
314 goto out_unlock_rcu;
315 }
316
317 /* ipv4 addr of the socket is invalid. Only the
318 * unspecified and mapped address have a v4 equivalent.
319 */
320 v4addr = LOOPBACK4_IPV6;
321 err = -EADDRNOTAVAIL;
322 if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
323 goto out_unlock_rcu;
324 }
325 rcu_read_unlock();
326
327 inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
328 np->rcv_saddr = addr->l2tp_addr;
329 np->saddr = addr->l2tp_addr;
330
331 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
332
333 write_lock_bh(&l2tp_ip6_lock);
334 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
335 sk_del_node_init(sk);
336 write_unlock_bh(&l2tp_ip6_lock);
337
338 sock_reset_flag(sk, SOCK_ZAPPED);
339 release_sock(sk);
340 return 0;
341
342out_unlock_rcu:
343 rcu_read_unlock();
344out_unlock:
345 release_sock(sk);
346 return err;
347
348out_in_use:
349 read_unlock_bh(&l2tp_ip6_lock);
350 return err;
351}
352
353static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
354 int addr_len)
355{
356 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr;
357 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
358 struct in6_addr *daddr;
359 int addr_type;
360 int rc;
361
362 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
363 return -EINVAL;
364
365 if (addr_len < sizeof(*lsa))
366 return -EINVAL;
367
368 addr_type = ipv6_addr_type(&usin->sin6_addr);
369 if (addr_type & IPV6_ADDR_MULTICAST)
370 return -EINVAL;
371
372 if (addr_type & IPV6_ADDR_MAPPED) {
373 daddr = &usin->sin6_addr;
374 if (ipv4_is_multicast(daddr->s6_addr32[3]))
375 return -EINVAL;
376 }
377
378 rc = ip6_datagram_connect(sk, uaddr, addr_len);
379
380 lock_sock(sk);
381
382 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
383
384 write_lock_bh(&l2tp_ip6_lock);
385 hlist_del_init(&sk->sk_bind_node);
386 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
387 write_unlock_bh(&l2tp_ip6_lock);
388
389 release_sock(sk);
390
391 return rc;
392}
393
394static int l2tp_ip6_disconnect(struct sock *sk, int flags)
395{
396 if (sock_flag(sk, SOCK_ZAPPED))
397 return 0;
398
399 return udp_disconnect(sk, flags);
400}
401
402static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
403 int *uaddr_len, int peer)
404{
405 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
406 struct sock *sk = sock->sk;
407 struct ipv6_pinfo *np = inet6_sk(sk);
408 struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
409
410 lsa->l2tp_family = AF_INET6;
411 lsa->l2tp_flowinfo = 0;
412 lsa->l2tp_scope_id = 0;
413 if (peer) {
414 if (!lsk->peer_conn_id)
415 return -ENOTCONN;
416 lsa->l2tp_conn_id = lsk->peer_conn_id;
417 lsa->l2tp_addr = np->daddr;
418 if (np->sndflow)
419 lsa->l2tp_flowinfo = np->flow_label;
420 } else {
421 if (ipv6_addr_any(&np->rcv_saddr))
422 lsa->l2tp_addr = np->saddr;
423 else
424 lsa->l2tp_addr = np->rcv_saddr;
425
426 lsa->l2tp_conn_id = lsk->conn_id;
427 }
428 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
429 lsa->l2tp_scope_id = sk->sk_bound_dev_if;
430 *uaddr_len = sizeof(*lsa);
431 return 0;
432}
433
434static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
435{
436 int rc;
437
438 /* Charge it to the socket, dropping if the queue is full. */
439 rc = sock_queue_rcv_skb(sk, skb);
440 if (rc < 0)
441 goto drop;
442
443 return 0;
444
445drop:
446 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
447 kfree_skb(skb);
448 return -1;
449}
450
451static int l2tp_ip6_push_pending_frames(struct sock *sk)
452{
453 struct sk_buff *skb;
454 __be32 *transhdr = NULL;
455 int err = 0;
456
457 skb = skb_peek(&sk->sk_write_queue);
458 if (skb == NULL)
459 goto out;
460
461 transhdr = (__be32 *)skb_transport_header(skb);
462 *transhdr = 0;
463
464 err = ip6_push_pending_frames(sk);
465
466out:
467 return err;
468}
469
470/* Userspace will call sendmsg() on the tunnel socket to send L2TP
471 * control frames.
472 */
473static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
474 struct msghdr *msg, size_t len)
475{
476 struct ipv6_txoptions opt_space;
477 struct sockaddr_l2tpip6 *lsa =
478 (struct sockaddr_l2tpip6 *) msg->msg_name;
479 struct in6_addr *daddr, *final_p, final;
480 struct ipv6_pinfo *np = inet6_sk(sk);
481 struct ipv6_txoptions *opt = NULL;
482 struct ip6_flowlabel *flowlabel = NULL;
483 struct dst_entry *dst = NULL;
484 struct flowi6 fl6;
485 int addr_len = msg->msg_namelen;
486 int hlimit = -1;
487 int tclass = -1;
488 int dontfrag = -1;
489 int transhdrlen = 4; /* zero session-id */
490 int ulen = len + transhdrlen;
491 int err;
492
493 /* Rough check on arithmetic overflow,
494 better check is made in ip6_append_data().
495 */
496 if (len > INT_MAX)
497 return -EMSGSIZE;
498
499 /* Mirror BSD error message compatibility */
500 if (msg->msg_flags & MSG_OOB)
501 return -EOPNOTSUPP;
502
503 /*
504 * Get and verify the address.
505 */
506 memset(&fl6, 0, sizeof(fl6));
507
508 fl6.flowi6_mark = sk->sk_mark;
509
510 if (lsa) {
511 if (addr_len < SIN6_LEN_RFC2133)
512 return -EINVAL;
513
514 if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
515 return -EAFNOSUPPORT;
516
517 daddr = &lsa->l2tp_addr;
518 if (np->sndflow) {
519 fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
520 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
521 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
522 if (flowlabel == NULL)
523 return -EINVAL;
524 daddr = &flowlabel->dst;
525 }
526 }
527
528 /*
529 * Otherwise it will be difficult to maintain
530 * sk->sk_dst_cache.
531 */
532 if (sk->sk_state == TCP_ESTABLISHED &&
533 ipv6_addr_equal(daddr, &np->daddr))
534 daddr = &np->daddr;
535
536 if (addr_len >= sizeof(struct sockaddr_in6) &&
537 lsa->l2tp_scope_id &&
538 ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
539 fl6.flowi6_oif = lsa->l2tp_scope_id;
540 } else {
541 if (sk->sk_state != TCP_ESTABLISHED)
542 return -EDESTADDRREQ;
543
544 daddr = &np->daddr;
545 fl6.flowlabel = np->flow_label;
546 }
547
548 if (fl6.flowi6_oif == 0)
549 fl6.flowi6_oif = sk->sk_bound_dev_if;
550
551 if (msg->msg_controllen) {
552 opt = &opt_space;
553 memset(opt, 0, sizeof(struct ipv6_txoptions));
554 opt->tot_len = sizeof(struct ipv6_txoptions);
555
556 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
557 &hlimit, &tclass, &dontfrag);
558 if (err < 0) {
559 fl6_sock_release(flowlabel);
560 return err;
561 }
562 if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
563 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
564 if (flowlabel == NULL)
565 return -EINVAL;
566 }
567 if (!(opt->opt_nflen|opt->opt_flen))
568 opt = NULL;
569 }
570
571 if (opt == NULL)
572 opt = np->opt;
573 if (flowlabel)
574 opt = fl6_merge_options(&opt_space, flowlabel, opt);
575 opt = ipv6_fixup_options(&opt_space, opt);
576
577 fl6.flowi6_proto = sk->sk_protocol;
578 if (!ipv6_addr_any(daddr))
579 fl6.daddr = *daddr;
580 else
581 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
582 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
583 fl6.saddr = np->saddr;
584
585 final_p = fl6_update_dst(&fl6, opt, &final);
586
587 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
588 fl6.flowi6_oif = np->mcast_oif;
589 else if (!fl6.flowi6_oif)
590 fl6.flowi6_oif = np->ucast_oif;
591
592 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
593
594 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
595 if (IS_ERR(dst)) {
596 err = PTR_ERR(dst);
597 goto out;
598 }
599
600 if (hlimit < 0) {
601 if (ipv6_addr_is_multicast(&fl6.daddr))
602 hlimit = np->mcast_hops;
603 else
604 hlimit = np->hop_limit;
605 if (hlimit < 0)
606 hlimit = ip6_dst_hoplimit(dst);
607 }
608
609 if (tclass < 0)
610 tclass = np->tclass;
611
612 if (dontfrag < 0)
613 dontfrag = np->dontfrag;
614
615 if (msg->msg_flags & MSG_CONFIRM)
616 goto do_confirm;
617
618back_from_confirm:
619 lock_sock(sk);
620 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
621 ulen, transhdrlen, hlimit, tclass, opt,
622 &fl6, (struct rt6_info *)dst,
623 msg->msg_flags, dontfrag);
624 if (err)
625 ip6_flush_pending_frames(sk);
626 else if (!(msg->msg_flags & MSG_MORE))
627 err = l2tp_ip6_push_pending_frames(sk);
628 release_sock(sk);
629done:
630 dst_release(dst);
631out:
632 fl6_sock_release(flowlabel);
633
634 return err < 0 ? err : len;
635
636do_confirm:
637 dst_confirm(dst);
638 if (!(msg->msg_flags & MSG_PROBE) || len)
639 goto back_from_confirm;
640 err = 0;
641 goto done;
642}
643
644static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
645 struct msghdr *msg, size_t len, int noblock,
646 int flags, int *addr_len)
647{
648 struct inet_sock *inet = inet_sk(sk);
649 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
650 size_t copied = 0;
651 int err = -EOPNOTSUPP;
652 struct sk_buff *skb;
653
654 if (flags & MSG_OOB)
655 goto out;
656
657 if (addr_len)
658 *addr_len = sizeof(*lsa);
659
660 if (flags & MSG_ERRQUEUE)
661 return ipv6_recv_error(sk, msg, len);
662
663 skb = skb_recv_datagram(sk, flags, noblock, &err);
664 if (!skb)
665 goto out;
666
667 copied = skb->len;
668 if (len < copied) {
669 msg->msg_flags |= MSG_TRUNC;
670 copied = len;
671 }
672
673 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
674 if (err)
675 goto done;
676
677 sock_recv_timestamp(msg, sk, skb);
678
679 /* Copy the address. */
680 if (lsa) {
681 lsa->l2tp_family = AF_INET6;
682 lsa->l2tp_unused = 0;
683 lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
684 lsa->l2tp_flowinfo = 0;
685 lsa->l2tp_scope_id = 0;
686 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
687 lsa->l2tp_scope_id = IP6CB(skb)->iif;
688 }
689
690 if (inet->cmsg_flags)
691 ip_cmsg_recv(msg, skb);
692
693 if (flags & MSG_TRUNC)
694 copied = skb->len;
695done:
696 skb_free_datagram(sk, skb);
697out:
698 return err ? err : copied;
699}
700
701static struct proto l2tp_ip6_prot = {
702 .name = "L2TP/IPv6",
703 .owner = THIS_MODULE,
704 .init = l2tp_ip6_open,
705 .close = l2tp_ip6_close,
706 .bind = l2tp_ip6_bind,
707 .connect = l2tp_ip6_connect,
708 .disconnect = l2tp_ip6_disconnect,
709 .ioctl = udp_ioctl,
710 .destroy = l2tp_ip6_destroy_sock,
711 .setsockopt = ipv6_setsockopt,
712 .getsockopt = ipv6_getsockopt,
713 .sendmsg = l2tp_ip6_sendmsg,
714 .recvmsg = l2tp_ip6_recvmsg,
715 .backlog_rcv = l2tp_ip6_backlog_recv,
716 .hash = inet_hash,
717 .unhash = inet_unhash,
718 .obj_size = sizeof(struct l2tp_ip6_sock),
719#ifdef CONFIG_COMPAT
720 .compat_setsockopt = compat_ipv6_setsockopt,
721 .compat_getsockopt = compat_ipv6_getsockopt,
722#endif
723};
724
725static const struct proto_ops l2tp_ip6_ops = {
726 .family = PF_INET6,
727 .owner = THIS_MODULE,
728 .release = inet6_release,
729 .bind = inet6_bind,
730 .connect = inet_dgram_connect,
731 .socketpair = sock_no_socketpair,
732 .accept = sock_no_accept,
733 .getname = l2tp_ip6_getname,
734 .poll = datagram_poll,
735 .ioctl = inet6_ioctl,
736 .listen = sock_no_listen,
737 .shutdown = inet_shutdown,
738 .setsockopt = sock_common_setsockopt,
739 .getsockopt = sock_common_getsockopt,
740 .sendmsg = inet_sendmsg,
741 .recvmsg = sock_common_recvmsg,
742 .mmap = sock_no_mmap,
743 .sendpage = sock_no_sendpage,
744#ifdef CONFIG_COMPAT
745 .compat_setsockopt = compat_sock_common_setsockopt,
746 .compat_getsockopt = compat_sock_common_getsockopt,
747#endif
748};
749
750static struct inet_protosw l2tp_ip6_protosw = {
751 .type = SOCK_DGRAM,
752 .protocol = IPPROTO_L2TP,
753 .prot = &l2tp_ip6_prot,
754 .ops = &l2tp_ip6_ops,
755 .no_check = 0,
756};
757
758static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
759 .handler = l2tp_ip6_recv,
760};
761
762static int __init l2tp_ip6_init(void)
763{
764 int err;
765
766 pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
767
768 err = proto_register(&l2tp_ip6_prot, 1);
769 if (err != 0)
770 goto out;
771
772 err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
773 if (err)
774 goto out1;
775
776 inet6_register_protosw(&l2tp_ip6_protosw);
777 return 0;
778
779out1:
780 proto_unregister(&l2tp_ip6_prot);
781out:
782 return err;
783}
784
785static void __exit l2tp_ip6_exit(void)
786{
787 inet6_unregister_protosw(&l2tp_ip6_protosw);
788 inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
789 proto_unregister(&l2tp_ip6_prot);
790}
791
792module_init(l2tp_ip6_init);
793module_exit(l2tp_ip6_exit);
794
795MODULE_LICENSE("GPL");
796MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
797MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
798MODULE_VERSION("1.0");
799
800/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
801 * enums
802 */
803MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 93a41a09458b..ddc553e76671 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -14,6 +14,8 @@
14 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <net/sock.h> 19#include <net/sock.h>
18#include <net/genetlink.h> 20#include <net/genetlink.h>
19#include <net/udp.h> 21#include <net/udp.h>
@@ -133,10 +135,25 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
133 if (info->attrs[L2TP_ATTR_FD]) { 135 if (info->attrs[L2TP_ATTR_FD]) {
134 fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); 136 fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
135 } else { 137 } else {
136 if (info->attrs[L2TP_ATTR_IP_SADDR]) 138#if IS_ENABLED(CONFIG_IPV6)
137 cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]); 139 if (info->attrs[L2TP_ATTR_IP6_SADDR] &&
138 if (info->attrs[L2TP_ATTR_IP_DADDR]) 140 info->attrs[L2TP_ATTR_IP6_DADDR]) {
139 cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]); 141 cfg.local_ip6 = nla_data(
142 info->attrs[L2TP_ATTR_IP6_SADDR]);
143 cfg.peer_ip6 = nla_data(
144 info->attrs[L2TP_ATTR_IP6_DADDR]);
145 } else
146#endif
147 if (info->attrs[L2TP_ATTR_IP_SADDR] &&
148 info->attrs[L2TP_ATTR_IP_DADDR]) {
149 cfg.local_ip.s_addr = nla_get_be32(
150 info->attrs[L2TP_ATTR_IP_SADDR]);
151 cfg.peer_ip.s_addr = nla_get_be32(
152 info->attrs[L2TP_ATTR_IP_DADDR]);
153 } else {
154 ret = -EINVAL;
155 goto out;
156 }
140 if (info->attrs[L2TP_ATTR_UDP_SPORT]) 157 if (info->attrs[L2TP_ATTR_UDP_SPORT])
141 cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); 158 cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
142 if (info->attrs[L2TP_ATTR_UDP_DPORT]) 159 if (info->attrs[L2TP_ATTR_UDP_DPORT])
@@ -225,47 +242,85 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
225 struct nlattr *nest; 242 struct nlattr *nest;
226 struct sock *sk = NULL; 243 struct sock *sk = NULL;
227 struct inet_sock *inet; 244 struct inet_sock *inet;
245#if IS_ENABLED(CONFIG_IPV6)
246 struct ipv6_pinfo *np = NULL;
247#endif
248 struct l2tp_stats stats;
249 unsigned int start;
228 250
229 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, 251 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
230 L2TP_CMD_TUNNEL_GET); 252 L2TP_CMD_TUNNEL_GET);
231 if (IS_ERR(hdr)) 253 if (IS_ERR(hdr))
232 return PTR_ERR(hdr); 254 return PTR_ERR(hdr);
233 255
234 NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); 256 if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
235 NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); 257 nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
236 NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); 258 nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
237 NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); 259 nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
238 NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); 260 nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
261 goto nla_put_failure;
239 262
240 nest = nla_nest_start(skb, L2TP_ATTR_STATS); 263 nest = nla_nest_start(skb, L2TP_ATTR_STATS);
241 if (nest == NULL) 264 if (nest == NULL)
242 goto nla_put_failure; 265 goto nla_put_failure;
243 266
244 NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); 267 do {
245 NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); 268 start = u64_stats_fetch_begin(&tunnel->stats.syncp);
246 NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); 269 stats.tx_packets = tunnel->stats.tx_packets;
247 NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); 270 stats.tx_bytes = tunnel->stats.tx_bytes;
248 NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); 271 stats.tx_errors = tunnel->stats.tx_errors;
249 NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); 272 stats.rx_packets = tunnel->stats.rx_packets;
250 NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); 273 stats.rx_bytes = tunnel->stats.rx_bytes;
251 NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); 274 stats.rx_errors = tunnel->stats.rx_errors;
275 stats.rx_seq_discards = tunnel->stats.rx_seq_discards;
276 stats.rx_oos_packets = tunnel->stats.rx_oos_packets;
277 } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
278
279 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
280 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
281 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
282 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
283 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
284 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
285 stats.rx_seq_discards) ||
286 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
287 stats.rx_oos_packets) ||
288 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
289 goto nla_put_failure;
252 nla_nest_end(skb, nest); 290 nla_nest_end(skb, nest);
253 291
254 sk = tunnel->sock; 292 sk = tunnel->sock;
255 if (!sk) 293 if (!sk)
256 goto out; 294 goto out;
257 295
296#if IS_ENABLED(CONFIG_IPV6)
297 if (sk->sk_family == AF_INET6)
298 np = inet6_sk(sk);
299#endif
300
258 inet = inet_sk(sk); 301 inet = inet_sk(sk);
259 302
260 switch (tunnel->encap) { 303 switch (tunnel->encap) {
261 case L2TP_ENCAPTYPE_UDP: 304 case L2TP_ENCAPTYPE_UDP:
262 NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); 305 if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
263 NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); 306 nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
264 NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); 307 nla_put_u8(skb, L2TP_ATTR_UDP_CSUM,
308 (sk->sk_no_check != UDP_CSUM_NOXMIT)))
309 goto nla_put_failure;
265 /* NOBREAK */ 310 /* NOBREAK */
266 case L2TP_ENCAPTYPE_IP: 311 case L2TP_ENCAPTYPE_IP:
267 NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); 312#if IS_ENABLED(CONFIG_IPV6)
268 NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); 313 if (np) {
314 if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
315 &np->saddr) ||
316 nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
317 &np->daddr))
318 goto nla_put_failure;
319 } else
320#endif
321 if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
322 nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
323 goto nla_put_failure;
269 break; 324 break;
270 } 325 }
271 326
@@ -556,6 +611,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
556 struct nlattr *nest; 611 struct nlattr *nest;
557 struct l2tp_tunnel *tunnel = session->tunnel; 612 struct l2tp_tunnel *tunnel = session->tunnel;
558 struct sock *sk = NULL; 613 struct sock *sk = NULL;
614 struct l2tp_stats stats;
615 unsigned int start;
559 616
560 sk = tunnel->sock; 617 sk = tunnel->sock;
561 618
@@ -563,43 +620,64 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
563 if (IS_ERR(hdr)) 620 if (IS_ERR(hdr))
564 return PTR_ERR(hdr); 621 return PTR_ERR(hdr);
565 622
566 NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); 623 if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
567 NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); 624 nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
568 NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); 625 nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
569 NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); 626 nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
570 NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); 627 session->peer_session_id) ||
571 NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); 628 nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
572 NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); 629 nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
573 if (session->mru) 630 nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
574 NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); 631 (session->mru &&
575 632 nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
576 if (session->ifname && session->ifname[0]) 633 goto nla_put_failure;
577 NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); 634
578 if (session->cookie_len) 635 if ((session->ifname && session->ifname[0] &&
579 NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); 636 nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
580 if (session->peer_cookie_len) 637 (session->cookie_len &&
581 NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); 638 nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
582 NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); 639 &session->cookie[0])) ||
583 NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); 640 (session->peer_cookie_len &&
584 NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); 641 nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
642 &session->peer_cookie[0])) ||
643 nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
644 nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
645 nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
585#ifdef CONFIG_XFRM 646#ifdef CONFIG_XFRM
586 if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) 647 (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
587 NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); 648 nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
588#endif 649#endif
589 if (session->reorder_timeout) 650 (session->reorder_timeout &&
590 NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); 651 nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
652 goto nla_put_failure;
591 653
592 nest = nla_nest_start(skb, L2TP_ATTR_STATS); 654 nest = nla_nest_start(skb, L2TP_ATTR_STATS);
593 if (nest == NULL) 655 if (nest == NULL)
594 goto nla_put_failure; 656 goto nla_put_failure;
595 NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); 657
596 NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); 658 do {
597 NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); 659 start = u64_stats_fetch_begin(&session->stats.syncp);
598 NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); 660 stats.tx_packets = session->stats.tx_packets;
599 NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); 661 stats.tx_bytes = session->stats.tx_bytes;
600 NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); 662 stats.tx_errors = session->stats.tx_errors;
601 NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); 663 stats.rx_packets = session->stats.rx_packets;
602 NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); 664 stats.rx_bytes = session->stats.rx_bytes;
665 stats.rx_errors = session->stats.rx_errors;
666 stats.rx_seq_discards = session->stats.rx_seq_discards;
667 stats.rx_oos_packets = session->stats.rx_oos_packets;
668 } while (u64_stats_fetch_retry(&session->stats.syncp, start));
669
670 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
671 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
672 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
673 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
674 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
675 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
676 stats.rx_seq_discards) ||
677 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
678 stats.rx_oos_packets) ||
679 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
680 goto nla_put_failure;
603 nla_nest_end(skb, nest); 681 nla_nest_end(skb, nest);
604 682
605 return genlmsg_end(skb, hdr); 683 return genlmsg_end(skb, hdr);
@@ -708,6 +786,14 @@ static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
708 [L2TP_ATTR_MTU] = { .type = NLA_U16, }, 786 [L2TP_ATTR_MTU] = { .type = NLA_U16, },
709 [L2TP_ATTR_MRU] = { .type = NLA_U16, }, 787 [L2TP_ATTR_MRU] = { .type = NLA_U16, },
710 [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, 788 [L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
789 [L2TP_ATTR_IP6_SADDR] = {
790 .type = NLA_BINARY,
791 .len = sizeof(struct in6_addr),
792 },
793 [L2TP_ATTR_IP6_DADDR] = {
794 .type = NLA_BINARY,
795 .len = sizeof(struct in6_addr),
796 },
711 [L2TP_ATTR_IFNAME] = { 797 [L2TP_ATTR_IFNAME] = {
712 .type = NLA_NUL_STRING, 798 .type = NLA_NUL_STRING,
713 .len = IFNAMSIZ - 1, 799 .len = IFNAMSIZ - 1,
@@ -818,7 +904,7 @@ static int l2tp_nl_init(void)
818{ 904{
819 int err; 905 int err;
820 906
821 printk(KERN_INFO "L2TP netlink interface\n"); 907 pr_info("L2TP netlink interface\n");
822 err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, 908 err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
823 ARRAY_SIZE(l2tp_nl_ops)); 909 ARRAY_SIZE(l2tp_nl_ops));
824 910
@@ -837,5 +923,4 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
837MODULE_DESCRIPTION("L2TP netlink"); 923MODULE_DESCRIPTION("L2TP netlink");
838MODULE_LICENSE("GPL"); 924MODULE_LICENSE("GPL");
839MODULE_VERSION("1.0"); 925MODULE_VERSION("1.0");
840MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \ 926MODULE_ALIAS_GENL_FAMILY("l2tp");
841 __stringify(NETLINK_GENERIC) "-type-" "l2tp");
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8a90d756c904..8ef6b9416cba 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -57,6 +57,8 @@
57 * http://openl2tp.sourceforge.net. 57 * http://openl2tp.sourceforge.net.
58 */ 58 */
59 59
60#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
60#include <linux/module.h> 62#include <linux/module.h>
61#include <linux/string.h> 63#include <linux/string.h>
62#include <linux/list.h> 64#include <linux/list.h>
@@ -82,7 +84,7 @@
82#include <net/sock.h> 84#include <net/sock.h>
83#include <linux/ppp_channel.h> 85#include <linux/ppp_channel.h>
84#include <linux/ppp_defs.h> 86#include <linux/ppp_defs.h>
85#include <linux/if_ppp.h> 87#include <linux/ppp-ioctl.h>
86#include <linux/file.h> 88#include <linux/file.h>
87#include <linux/hash.h> 89#include <linux/hash.h>
88#include <linux/sort.h> 90#include <linux/sort.h>
@@ -106,12 +108,6 @@
106/* Space for UDP, L2TP and PPP headers */ 108/* Space for UDP, L2TP and PPP headers */
107#define PPPOL2TP_HEADER_OVERHEAD 40 109#define PPPOL2TP_HEADER_OVERHEAD 40
108 110
109#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
110 do { \
111 if ((_mask) & (_type)) \
112 printk(_lvl "PPPOL2TP: " _fmt, ##args); \
113 } while (0)
114
115/* Number of bytes to build transmit L2TP headers. 111/* Number of bytes to build transmit L2TP headers.
116 * Unfortunately the size is different depending on whether sequence numbers 112 * Unfortunately the size is different depending on whether sequence numbers
117 * are enabled. 113 * are enabled.
@@ -236,9 +232,9 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
236 232
237 if (sk->sk_state & PPPOX_BOUND) { 233 if (sk->sk_state & PPPOX_BOUND) {
238 struct pppox_sock *po; 234 struct pppox_sock *po;
239 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, 235 l2tp_dbg(session, PPPOL2TP_MSG_DATA,
240 "%s: recv %d byte data frame, passing to ppp\n", 236 "%s: recv %d byte data frame, passing to ppp\n",
241 session->name, data_len); 237 session->name, data_len);
242 238
243 /* We need to forget all info related to the L2TP packet 239 /* We need to forget all info related to the L2TP packet
244 * gathered in the skb as we are going to reuse the same 240 * gathered in the skb as we are going to reuse the same
@@ -259,8 +255,8 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
259 po = pppox_sk(sk); 255 po = pppox_sk(sk);
260 ppp_input(&po->chan, skb); 256 ppp_input(&po->chan, skb);
261 } else { 257 } else {
262 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, 258 l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n",
263 "%s: socket not bound\n", session->name); 259 session->name);
264 260
265 /* Not bound. Nothing we can do, so discard. */ 261 /* Not bound. Nothing we can do, so discard. */
266 session->stats.rx_errors++; 262 session->stats.rx_errors++;
@@ -270,8 +266,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
270 return; 266 return;
271 267
272no_sock: 268no_sock:
273 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, 269 l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name);
274 "%s: no socket\n", session->name);
275 kfree_skb(skb); 270 kfree_skb(skb);
276} 271}
277 272
@@ -628,7 +623,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
628{ 623{
629 struct sock *sk = sock->sk; 624 struct sock *sk = sock->sk;
630 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; 625 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
631 struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
632 struct pppox_sock *po = pppox_sk(sk); 626 struct pppox_sock *po = pppox_sk(sk);
633 struct l2tp_session *session = NULL; 627 struct l2tp_session *session = NULL;
634 struct l2tp_tunnel *tunnel; 628 struct l2tp_tunnel *tunnel;
@@ -657,7 +651,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
657 if (sk->sk_user_data) 651 if (sk->sk_user_data)
658 goto end; /* socket is already attached */ 652 goto end; /* socket is already attached */
659 653
660 /* Get params from socket address. Handle L2TPv2 and L2TPv3 */ 654 /* Get params from socket address. Handle L2TPv2 and L2TPv3.
655 * This is nasty because there are different sockaddr_pppol2tp
656 * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use
657 * the sockaddr size to determine which structure the caller
658 * is using.
659 */
660 peer_tunnel_id = 0;
661 if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { 661 if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
662 fd = sp->pppol2tp.fd; 662 fd = sp->pppol2tp.fd;
663 tunnel_id = sp->pppol2tp.s_tunnel; 663 tunnel_id = sp->pppol2tp.s_tunnel;
@@ -665,12 +665,31 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
665 session_id = sp->pppol2tp.s_session; 665 session_id = sp->pppol2tp.s_session;
666 peer_session_id = sp->pppol2tp.d_session; 666 peer_session_id = sp->pppol2tp.d_session;
667 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { 667 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
668 struct sockaddr_pppol2tpv3 *sp3 =
669 (struct sockaddr_pppol2tpv3 *) sp;
668 ver = 3; 670 ver = 3;
669 fd = sp3->pppol2tp.fd; 671 fd = sp3->pppol2tp.fd;
670 tunnel_id = sp3->pppol2tp.s_tunnel; 672 tunnel_id = sp3->pppol2tp.s_tunnel;
671 peer_tunnel_id = sp3->pppol2tp.d_tunnel; 673 peer_tunnel_id = sp3->pppol2tp.d_tunnel;
672 session_id = sp3->pppol2tp.s_session; 674 session_id = sp3->pppol2tp.s_session;
673 peer_session_id = sp3->pppol2tp.d_session; 675 peer_session_id = sp3->pppol2tp.d_session;
676 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) {
677 struct sockaddr_pppol2tpin6 *sp6 =
678 (struct sockaddr_pppol2tpin6 *) sp;
679 fd = sp6->pppol2tp.fd;
680 tunnel_id = sp6->pppol2tp.s_tunnel;
681 peer_tunnel_id = sp6->pppol2tp.d_tunnel;
682 session_id = sp6->pppol2tp.s_session;
683 peer_session_id = sp6->pppol2tp.d_session;
684 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) {
685 struct sockaddr_pppol2tpv3in6 *sp6 =
686 (struct sockaddr_pppol2tpv3in6 *) sp;
687 ver = 3;
688 fd = sp6->pppol2tp.fd;
689 tunnel_id = sp6->pppol2tp.s_tunnel;
690 peer_tunnel_id = sp6->pppol2tp.d_tunnel;
691 session_id = sp6->pppol2tp.s_session;
692 peer_session_id = sp6->pppol2tp.d_session;
674 } else { 693 } else {
675 error = -EINVAL; 694 error = -EINVAL;
676 goto end; /* bad socket address */ 695 goto end; /* bad socket address */
@@ -711,12 +730,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
711 if (tunnel->recv_payload_hook == NULL) 730 if (tunnel->recv_payload_hook == NULL)
712 tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; 731 tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
713 732
714 if (tunnel->peer_tunnel_id == 0) { 733 if (tunnel->peer_tunnel_id == 0)
715 if (ver == 2) 734 tunnel->peer_tunnel_id = peer_tunnel_id;
716 tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
717 else
718 tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
719 }
720 735
721 /* Create session if it doesn't already exist. We handle the 736 /* Create session if it doesn't already exist. We handle the
722 * case where a session was previously created by the netlink 737 * case where a session was previously created by the netlink
@@ -807,8 +822,8 @@ out_no_ppp:
807 /* This is how we get the session context from the socket. */ 822 /* This is how we get the session context from the socket. */
808 sk->sk_user_data = session; 823 sk->sk_user_data = session;
809 sk->sk_state = PPPOX_CONNECTED; 824 sk->sk_state = PPPOX_CONNECTED;
810 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 825 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
811 "%s: created\n", session->name); 826 session->name);
812 827
813end: 828end:
814 release_sock(sk); 829 release_sock(sk);
@@ -861,8 +876,8 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
861 ps = l2tp_session_priv(session); 876 ps = l2tp_session_priv(session);
862 ps->tunnel_sock = tunnel->sock; 877 ps->tunnel_sock = tunnel->sock;
863 878
864 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 879 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
865 "%s: created\n", session->name); 880 session->name);
866 881
867 error = 0; 882 error = 0;
868 883
@@ -915,8 +930,8 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
915 goto end_put_sess; 930 goto end_put_sess;
916 } 931 }
917 932
918 inet = inet_sk(sk); 933 inet = inet_sk(tunnel->sock);
919 if (tunnel->version == 2) { 934 if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) {
920 struct sockaddr_pppol2tp sp; 935 struct sockaddr_pppol2tp sp;
921 len = sizeof(sp); 936 len = sizeof(sp);
922 memset(&sp, 0, len); 937 memset(&sp, 0, len);
@@ -932,6 +947,46 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
932 sp.pppol2tp.addr.sin_port = inet->inet_dport; 947 sp.pppol2tp.addr.sin_port = inet->inet_dport;
933 sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; 948 sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
934 memcpy(uaddr, &sp, len); 949 memcpy(uaddr, &sp, len);
950#if IS_ENABLED(CONFIG_IPV6)
951 } else if ((tunnel->version == 2) &&
952 (tunnel->sock->sk_family == AF_INET6)) {
953 struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
954 struct sockaddr_pppol2tpin6 sp;
955 len = sizeof(sp);
956 memset(&sp, 0, len);
957 sp.sa_family = AF_PPPOX;
958 sp.sa_protocol = PX_PROTO_OL2TP;
959 sp.pppol2tp.fd = tunnel->fd;
960 sp.pppol2tp.pid = pls->owner;
961 sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
962 sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
963 sp.pppol2tp.s_session = session->session_id;
964 sp.pppol2tp.d_session = session->peer_session_id;
965 sp.pppol2tp.addr.sin6_family = AF_INET6;
966 sp.pppol2tp.addr.sin6_port = inet->inet_dport;
967 memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
968 sizeof(np->daddr));
969 memcpy(uaddr, &sp, len);
970 } else if ((tunnel->version == 3) &&
971 (tunnel->sock->sk_family == AF_INET6)) {
972 struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
973 struct sockaddr_pppol2tpv3in6 sp;
974 len = sizeof(sp);
975 memset(&sp, 0, len);
976 sp.sa_family = AF_PPPOX;
977 sp.sa_protocol = PX_PROTO_OL2TP;
978 sp.pppol2tp.fd = tunnel->fd;
979 sp.pppol2tp.pid = pls->owner;
980 sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
981 sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
982 sp.pppol2tp.s_session = session->session_id;
983 sp.pppol2tp.d_session = session->peer_session_id;
984 sp.pppol2tp.addr.sin6_family = AF_INET6;
985 sp.pppol2tp.addr.sin6_port = inet->inet_dport;
986 memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
987 sizeof(np->daddr));
988 memcpy(uaddr, &sp, len);
989#endif
935 } else if (tunnel->version == 3) { 990 } else if (tunnel->version == 3) {
936 struct sockaddr_pppol2tpv3 sp; 991 struct sockaddr_pppol2tpv3 sp;
937 len = sizeof(sp); 992 len = sizeof(sp);
@@ -998,9 +1053,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
998 struct l2tp_tunnel *tunnel = session->tunnel; 1053 struct l2tp_tunnel *tunnel = session->tunnel;
999 struct pppol2tp_ioc_stats stats; 1054 struct pppol2tp_ioc_stats stats;
1000 1055
1001 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, 1056 l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
1002 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", 1057 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
1003 session->name, cmd, arg); 1058 session->name, cmd, arg);
1004 1059
1005 sk = ps->sock; 1060 sk = ps->sock;
1006 sock_hold(sk); 1061 sock_hold(sk);
@@ -1018,8 +1073,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
1018 if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) 1073 if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
1019 break; 1074 break;
1020 1075
1021 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1076 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
1022 "%s: get mtu=%d\n", session->name, session->mtu); 1077 session->name, session->mtu);
1023 err = 0; 1078 err = 0;
1024 break; 1079 break;
1025 1080
@@ -1034,8 +1089,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
1034 1089
1035 session->mtu = ifr.ifr_mtu; 1090 session->mtu = ifr.ifr_mtu;
1036 1091
1037 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1092 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
1038 "%s: set mtu=%d\n", session->name, session->mtu); 1093 session->name, session->mtu);
1039 err = 0; 1094 err = 0;
1040 break; 1095 break;
1041 1096
@@ -1048,8 +1103,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
1048 if (put_user(session->mru, (int __user *) arg)) 1103 if (put_user(session->mru, (int __user *) arg))
1049 break; 1104 break;
1050 1105
1051 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1106 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
1052 "%s: get mru=%d\n", session->name, session->mru); 1107 session->name, session->mru);
1053 err = 0; 1108 err = 0;
1054 break; 1109 break;
1055 1110
@@ -1063,8 +1118,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
1063 break; 1118 break;
1064 1119
1065 session->mru = val; 1120 session->mru = val;
1066 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1121 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
1067 "%s: set mru=%d\n", session->name, session->mru); 1122 session->name, session->mru);
1068 err = 0; 1123 err = 0;
1069 break; 1124 break;
1070 1125
@@ -1073,8 +1128,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
1073 if (put_user(ps->flags, (int __user *) arg)) 1128 if (put_user(ps->flags, (int __user *) arg))
1074 break; 1129 break;
1075 1130
1076 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1131 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
1077 "%s: get flags=%d\n", session->name, ps->flags); 1132 session->name, ps->flags);
1078 err = 0; 1133 err = 0;
1079 break; 1134 break;
1080 1135
@@ -1083,8 +1138,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
1083 if (get_user(val, (int __user *) arg)) 1138 if (get_user(val, (int __user *) arg))
1084 break; 1139 break;
1085 ps->flags = val; 1140 ps->flags = val;
1086 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1141 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
1087 "%s: set flags=%d\n", session->name, ps->flags); 1142 session->name, ps->flags);
1088 err = 0; 1143 err = 0;
1089 break; 1144 break;
1090 1145
@@ -1100,8 +1155,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
1100 if (copy_to_user((void __user *) arg, &stats, 1155 if (copy_to_user((void __user *) arg, &stats,
1101 sizeof(stats))) 1156 sizeof(stats)))
1102 break; 1157 break;
1103 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1158 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
1104 "%s: get L2TP stats\n", session->name); 1159 session->name);
1105 err = 0; 1160 err = 0;
1106 break; 1161 break;
1107 1162
@@ -1128,9 +1183,9 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1128 struct sock *sk; 1183 struct sock *sk;
1129 struct pppol2tp_ioc_stats stats; 1184 struct pppol2tp_ioc_stats stats;
1130 1185
1131 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, 1186 l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL,
1132 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", 1187 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
1133 tunnel->name, cmd, arg); 1188 tunnel->name, cmd, arg);
1134 1189
1135 sk = tunnel->sock; 1190 sk = tunnel->sock;
1136 sock_hold(sk); 1191 sock_hold(sk);
@@ -1164,8 +1219,8 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1164 err = -EFAULT; 1219 err = -EFAULT;
1165 break; 1220 break;
1166 } 1221 }
1167 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1222 l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
1168 "%s: get L2TP stats\n", tunnel->name); 1223 tunnel->name);
1169 err = 0; 1224 err = 0;
1170 break; 1225 break;
1171 1226
@@ -1254,8 +1309,8 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk,
1254 switch (optname) { 1309 switch (optname) {
1255 case PPPOL2TP_SO_DEBUG: 1310 case PPPOL2TP_SO_DEBUG:
1256 tunnel->debug = val; 1311 tunnel->debug = val;
1257 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1312 l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
1258 "%s: set debug=%x\n", tunnel->name, tunnel->debug); 1313 tunnel->name, tunnel->debug);
1259 break; 1314 break;
1260 1315
1261 default: 1316 default:
@@ -1282,8 +1337,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
1282 break; 1337 break;
1283 } 1338 }
1284 session->recv_seq = val ? -1 : 0; 1339 session->recv_seq = val ? -1 : 0;
1285 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1340 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1286 "%s: set recv_seq=%d\n", session->name, session->recv_seq); 1341 "%s: set recv_seq=%d\n",
1342 session->name, session->recv_seq);
1287 break; 1343 break;
1288 1344
1289 case PPPOL2TP_SO_SENDSEQ: 1345 case PPPOL2TP_SO_SENDSEQ:
@@ -1298,8 +1354,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
1298 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : 1354 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
1299 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; 1355 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1300 } 1356 }
1301 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1357 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1302 "%s: set send_seq=%d\n", session->name, session->send_seq); 1358 "%s: set send_seq=%d\n",
1359 session->name, session->send_seq);
1303 break; 1360 break;
1304 1361
1305 case PPPOL2TP_SO_LNSMODE: 1362 case PPPOL2TP_SO_LNSMODE:
@@ -1308,20 +1365,22 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
1308 break; 1365 break;
1309 } 1366 }
1310 session->lns_mode = val ? -1 : 0; 1367 session->lns_mode = val ? -1 : 0;
1311 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1368 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1312 "%s: set lns_mode=%d\n", session->name, session->lns_mode); 1369 "%s: set lns_mode=%d\n",
1370 session->name, session->lns_mode);
1313 break; 1371 break;
1314 1372
1315 case PPPOL2TP_SO_DEBUG: 1373 case PPPOL2TP_SO_DEBUG:
1316 session->debug = val; 1374 session->debug = val;
1317 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1375 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
1318 "%s: set debug=%x\n", session->name, session->debug); 1376 session->name, session->debug);
1319 break; 1377 break;
1320 1378
1321 case PPPOL2TP_SO_REORDERTO: 1379 case PPPOL2TP_SO_REORDERTO:
1322 session->reorder_timeout = msecs_to_jiffies(val); 1380 session->reorder_timeout = msecs_to_jiffies(val);
1323 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1381 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1324 "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); 1382 "%s: set reorder_timeout=%d\n",
1383 session->name, session->reorder_timeout);
1325 break; 1384 break;
1326 1385
1327 default: 1386 default:
@@ -1400,8 +1459,8 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk,
1400 switch (optname) { 1459 switch (optname) {
1401 case PPPOL2TP_SO_DEBUG: 1460 case PPPOL2TP_SO_DEBUG:
1402 *val = tunnel->debug; 1461 *val = tunnel->debug;
1403 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1462 l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n",
1404 "%s: get debug=%x\n", tunnel->name, tunnel->debug); 1463 tunnel->name, tunnel->debug);
1405 break; 1464 break;
1406 1465
1407 default: 1466 default:
@@ -1423,32 +1482,32 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
1423 switch (optname) { 1482 switch (optname) {
1424 case PPPOL2TP_SO_RECVSEQ: 1483 case PPPOL2TP_SO_RECVSEQ:
1425 *val = session->recv_seq; 1484 *val = session->recv_seq;
1426 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1485 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1427 "%s: get recv_seq=%d\n", session->name, *val); 1486 "%s: get recv_seq=%d\n", session->name, *val);
1428 break; 1487 break;
1429 1488
1430 case PPPOL2TP_SO_SENDSEQ: 1489 case PPPOL2TP_SO_SENDSEQ:
1431 *val = session->send_seq; 1490 *val = session->send_seq;
1432 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1491 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1433 "%s: get send_seq=%d\n", session->name, *val); 1492 "%s: get send_seq=%d\n", session->name, *val);
1434 break; 1493 break;
1435 1494
1436 case PPPOL2TP_SO_LNSMODE: 1495 case PPPOL2TP_SO_LNSMODE:
1437 *val = session->lns_mode; 1496 *val = session->lns_mode;
1438 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1497 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1439 "%s: get lns_mode=%d\n", session->name, *val); 1498 "%s: get lns_mode=%d\n", session->name, *val);
1440 break; 1499 break;
1441 1500
1442 case PPPOL2TP_SO_DEBUG: 1501 case PPPOL2TP_SO_DEBUG:
1443 *val = session->debug; 1502 *val = session->debug;
1444 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1503 l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n",
1445 "%s: get debug=%d\n", session->name, *val); 1504 session->name, *val);
1446 break; 1505 break;
1447 1506
1448 case PPPOL2TP_SO_REORDERTO: 1507 case PPPOL2TP_SO_REORDERTO:
1449 *val = (int) jiffies_to_msecs(session->reorder_timeout); 1508 *val = (int) jiffies_to_msecs(session->reorder_timeout);
1450 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1509 l2tp_info(session, PPPOL2TP_MSG_CONTROL,
1451 "%s: get reorder_timeout=%d\n", session->name, *val); 1510 "%s: get reorder_timeout=%d\n", session->name, *val);
1452 break; 1511 break;
1453 1512
1454 default: 1513 default:
@@ -1811,8 +1870,7 @@ static int __init pppol2tp_init(void)
1811 goto out_unregister_pppox; 1870 goto out_unregister_pppox;
1812#endif 1871#endif
1813 1872
1814 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", 1873 pr_info("PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION);
1815 PPPOL2TP_DRV_VERSION);
1816 1874
1817out: 1875out:
1818 return err; 1876 return err;
@@ -1845,3 +1903,4 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1845MODULE_DESCRIPTION("PPP over L2TP over UDP"); 1903MODULE_DESCRIPTION("PPP over L2TP over UDP");
1846MODULE_LICENSE("GPL"); 1904MODULE_LICENSE("GPL");
1847MODULE_VERSION(PPPOL2TP_DRV_VERSION); 1905MODULE_VERSION(PPPOL2TP_DRV_VERSION);
1906MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 8d0324bac01c..3cdaa046c1bc 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -15,6 +15,8 @@
15 * 2000-10-29 Henner Eisen lapb_data_indication() return status. 15 * 2000-10-29 Henner Eisen lapb_data_indication() return status.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/errno.h> 21#include <linux/errno.h>
20#include <linux/types.h> 22#include <linux/types.h>
@@ -32,7 +34,6 @@
32#include <linux/slab.h> 34#include <linux/slab.h>
33#include <net/sock.h> 35#include <net/sock.h>
34#include <asm/uaccess.h> 36#include <asm/uaccess.h>
35#include <asm/system.h>
36#include <linux/fcntl.h> 37#include <linux/fcntl.h>
37#include <linux/mm.h> 38#include <linux/mm.h>
38#include <linux/interrupt.h> 39#include <linux/interrupt.h>
@@ -280,9 +281,7 @@ int lapb_connect_request(struct net_device *dev)
280 281
281 lapb_establish_data_link(lapb); 282 lapb_establish_data_link(lapb);
282 283
283#if LAPB_DEBUG > 0 284 lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev);
284 printk(KERN_DEBUG "lapb: (%p) S0 -> S1\n", lapb->dev);
285#endif
286 lapb->state = LAPB_STATE_1; 285 lapb->state = LAPB_STATE_1;
287 286
288 rc = LAPB_OK; 287 rc = LAPB_OK;
@@ -306,12 +305,8 @@ int lapb_disconnect_request(struct net_device *dev)
306 goto out_put; 305 goto out_put;
307 306
308 case LAPB_STATE_1: 307 case LAPB_STATE_1:
309#if LAPB_DEBUG > 1 308 lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev);
310 printk(KERN_DEBUG "lapb: (%p) S1 TX DISC(1)\n", lapb->dev); 309 lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
311#endif
312#if LAPB_DEBUG > 0
313 printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
314#endif
315 lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); 310 lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
316 lapb->state = LAPB_STATE_0; 311 lapb->state = LAPB_STATE_0;
317 lapb_start_t1timer(lapb); 312 lapb_start_t1timer(lapb);
@@ -330,12 +325,8 @@ int lapb_disconnect_request(struct net_device *dev)
330 lapb_stop_t2timer(lapb); 325 lapb_stop_t2timer(lapb);
331 lapb->state = LAPB_STATE_2; 326 lapb->state = LAPB_STATE_2;
332 327
333#if LAPB_DEBUG > 1 328 lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev);
334 printk(KERN_DEBUG "lapb: (%p) S3 DISC(1)\n", lapb->dev); 329 lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev);
335#endif
336#if LAPB_DEBUG > 0
337 printk(KERN_DEBUG "lapb: (%p) S3 -> S2\n", lapb->dev);
338#endif
339 330
340 rc = LAPB_OK; 331 rc = LAPB_OK;
341out_put: 332out_put:
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index 2ec1af5c36cc..5dba899131b3 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -15,6 +15,8 @@
15 * 2000-10-29 Henner Eisen lapb_data_indication() return status. 15 * 2000-10-29 Henner Eisen lapb_data_indication() return status.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/errno.h> 20#include <linux/errno.h>
19#include <linux/types.h> 21#include <linux/types.h>
20#include <linux/socket.h> 22#include <linux/socket.h>
@@ -30,7 +32,6 @@
30#include <linux/slab.h> 32#include <linux/slab.h>
31#include <net/sock.h> 33#include <net/sock.h>
32#include <asm/uaccess.h> 34#include <asm/uaccess.h>
33#include <asm/system.h>
34#include <linux/fcntl.h> 35#include <linux/fcntl.h>
35#include <linux/mm.h> 36#include <linux/mm.h>
36#include <linux/interrupt.h> 37#include <linux/interrupt.h>
@@ -45,25 +46,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
45{ 46{
46 switch (frame->type) { 47 switch (frame->type) {
47 case LAPB_SABM: 48 case LAPB_SABM:
48#if LAPB_DEBUG > 1 49 lapb_dbg(1, "(%p) S0 RX SABM(%d)\n", lapb->dev, frame->pf);
49 printk(KERN_DEBUG "lapb: (%p) S0 RX SABM(%d)\n",
50 lapb->dev, frame->pf);
51#endif
52 if (lapb->mode & LAPB_EXTENDED) { 50 if (lapb->mode & LAPB_EXTENDED) {
53#if LAPB_DEBUG > 1 51 lapb_dbg(1, "(%p) S0 TX DM(%d)\n",
54 printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n", 52 lapb->dev, frame->pf);
55 lapb->dev, frame->pf);
56#endif
57 lapb_send_control(lapb, LAPB_DM, frame->pf, 53 lapb_send_control(lapb, LAPB_DM, frame->pf,
58 LAPB_RESPONSE); 54 LAPB_RESPONSE);
59 } else { 55 } else {
60#if LAPB_DEBUG > 1 56 lapb_dbg(1, "(%p) S0 TX UA(%d)\n",
61 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", 57 lapb->dev, frame->pf);
62 lapb->dev, frame->pf); 58 lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev);
63#endif
64#if LAPB_DEBUG > 0
65 printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
66#endif
67 lapb_send_control(lapb, LAPB_UA, frame->pf, 59 lapb_send_control(lapb, LAPB_UA, frame->pf,
68 LAPB_RESPONSE); 60 LAPB_RESPONSE);
69 lapb_stop_t1timer(lapb); 61 lapb_stop_t1timer(lapb);
@@ -79,18 +71,11 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
79 break; 71 break;
80 72
81 case LAPB_SABME: 73 case LAPB_SABME:
82#if LAPB_DEBUG > 1 74 lapb_dbg(1, "(%p) S0 RX SABME(%d)\n", lapb->dev, frame->pf);
83 printk(KERN_DEBUG "lapb: (%p) S0 RX SABME(%d)\n",
84 lapb->dev, frame->pf);
85#endif
86 if (lapb->mode & LAPB_EXTENDED) { 75 if (lapb->mode & LAPB_EXTENDED) {
87#if LAPB_DEBUG > 1 76 lapb_dbg(1, "(%p) S0 TX UA(%d)\n",
88 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", 77 lapb->dev, frame->pf);
89 lapb->dev, frame->pf); 78 lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev);
90#endif
91#if LAPB_DEBUG > 0
92 printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
93#endif
94 lapb_send_control(lapb, LAPB_UA, frame->pf, 79 lapb_send_control(lapb, LAPB_UA, frame->pf,
95 LAPB_RESPONSE); 80 LAPB_RESPONSE);
96 lapb_stop_t1timer(lapb); 81 lapb_stop_t1timer(lapb);
@@ -103,22 +88,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
103 lapb->va = 0; 88 lapb->va = 0;
104 lapb_connect_indication(lapb, LAPB_OK); 89 lapb_connect_indication(lapb, LAPB_OK);
105 } else { 90 } else {
106#if LAPB_DEBUG > 1 91 lapb_dbg(1, "(%p) S0 TX DM(%d)\n",
107 printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n", 92 lapb->dev, frame->pf);
108 lapb->dev, frame->pf);
109#endif
110 lapb_send_control(lapb, LAPB_DM, frame->pf, 93 lapb_send_control(lapb, LAPB_DM, frame->pf,
111 LAPB_RESPONSE); 94 LAPB_RESPONSE);
112 } 95 }
113 break; 96 break;
114 97
115 case LAPB_DISC: 98 case LAPB_DISC:
116#if LAPB_DEBUG > 1 99 lapb_dbg(1, "(%p) S0 RX DISC(%d)\n", lapb->dev, frame->pf);
117 printk(KERN_DEBUG "lapb: (%p) S0 RX DISC(%d)\n", 100 lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf);
118 lapb->dev, frame->pf);
119 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
120 lapb->dev, frame->pf);
121#endif
122 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); 101 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
123 break; 102 break;
124 103
@@ -138,68 +117,45 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb,
138{ 117{
139 switch (frame->type) { 118 switch (frame->type) {
140 case LAPB_SABM: 119 case LAPB_SABM:
141#if LAPB_DEBUG > 1 120 lapb_dbg(1, "(%p) S1 RX SABM(%d)\n", lapb->dev, frame->pf);
142 printk(KERN_DEBUG "lapb: (%p) S1 RX SABM(%d)\n",
143 lapb->dev, frame->pf);
144#endif
145 if (lapb->mode & LAPB_EXTENDED) { 121 if (lapb->mode & LAPB_EXTENDED) {
146#if LAPB_DEBUG > 1 122 lapb_dbg(1, "(%p) S1 TX DM(%d)\n",
147 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", 123 lapb->dev, frame->pf);
148 lapb->dev, frame->pf);
149#endif
150 lapb_send_control(lapb, LAPB_DM, frame->pf, 124 lapb_send_control(lapb, LAPB_DM, frame->pf,
151 LAPB_RESPONSE); 125 LAPB_RESPONSE);
152 } else { 126 } else {
153#if LAPB_DEBUG > 1 127 lapb_dbg(1, "(%p) S1 TX UA(%d)\n",
154 printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n", 128 lapb->dev, frame->pf);
155 lapb->dev, frame->pf);
156#endif
157 lapb_send_control(lapb, LAPB_UA, frame->pf, 129 lapb_send_control(lapb, LAPB_UA, frame->pf,
158 LAPB_RESPONSE); 130 LAPB_RESPONSE);
159 } 131 }
160 break; 132 break;
161 133
162 case LAPB_SABME: 134 case LAPB_SABME:
163#if LAPB_DEBUG > 1 135 lapb_dbg(1, "(%p) S1 RX SABME(%d)\n", lapb->dev, frame->pf);
164 printk(KERN_DEBUG "lapb: (%p) S1 RX SABME(%d)\n",
165 lapb->dev, frame->pf);
166#endif
167 if (lapb->mode & LAPB_EXTENDED) { 136 if (lapb->mode & LAPB_EXTENDED) {
168#if LAPB_DEBUG > 1 137 lapb_dbg(1, "(%p) S1 TX UA(%d)\n",
169 printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n", 138 lapb->dev, frame->pf);
170 lapb->dev, frame->pf);
171#endif
172 lapb_send_control(lapb, LAPB_UA, frame->pf, 139 lapb_send_control(lapb, LAPB_UA, frame->pf,
173 LAPB_RESPONSE); 140 LAPB_RESPONSE);
174 } else { 141 } else {
175#if LAPB_DEBUG > 1 142 lapb_dbg(1, "(%p) S1 TX DM(%d)\n",
176 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", 143 lapb->dev, frame->pf);
177 lapb->dev, frame->pf);
178#endif
179 lapb_send_control(lapb, LAPB_DM, frame->pf, 144 lapb_send_control(lapb, LAPB_DM, frame->pf,
180 LAPB_RESPONSE); 145 LAPB_RESPONSE);
181 } 146 }
182 break; 147 break;
183 148
184 case LAPB_DISC: 149 case LAPB_DISC:
185#if LAPB_DEBUG > 1 150 lapb_dbg(1, "(%p) S1 RX DISC(%d)\n", lapb->dev, frame->pf);
186 printk(KERN_DEBUG "lapb: (%p) S1 RX DISC(%d)\n", 151 lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf);
187 lapb->dev, frame->pf);
188 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
189 lapb->dev, frame->pf);
190#endif
191 lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); 152 lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
192 break; 153 break;
193 154
194 case LAPB_UA: 155 case LAPB_UA:
195#if LAPB_DEBUG > 1 156 lapb_dbg(1, "(%p) S1 RX UA(%d)\n", lapb->dev, frame->pf);
196 printk(KERN_DEBUG "lapb: (%p) S1 RX UA(%d)\n",
197 lapb->dev, frame->pf);
198#endif
199 if (frame->pf) { 157 if (frame->pf) {
200#if LAPB_DEBUG > 0 158 lapb_dbg(0, "(%p) S1 -> S3\n", lapb->dev);
201 printk(KERN_DEBUG "lapb: (%p) S1 -> S3\n", lapb->dev);
202#endif
203 lapb_stop_t1timer(lapb); 159 lapb_stop_t1timer(lapb);
204 lapb_stop_t2timer(lapb); 160 lapb_stop_t2timer(lapb);
205 lapb->state = LAPB_STATE_3; 161 lapb->state = LAPB_STATE_3;
@@ -213,14 +169,9 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb,
213 break; 169 break;
214 170
215 case LAPB_DM: 171 case LAPB_DM:
216#if LAPB_DEBUG > 1 172 lapb_dbg(1, "(%p) S1 RX DM(%d)\n", lapb->dev, frame->pf);
217 printk(KERN_DEBUG "lapb: (%p) S1 RX DM(%d)\n",
218 lapb->dev, frame->pf);
219#endif
220 if (frame->pf) { 173 if (frame->pf) {
221#if LAPB_DEBUG > 0 174 lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
222 printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
223#endif
224 lapb_clear_queues(lapb); 175 lapb_clear_queues(lapb);
225 lapb->state = LAPB_STATE_0; 176 lapb->state = LAPB_STATE_0;
226 lapb_start_t1timer(lapb); 177 lapb_start_t1timer(lapb);
@@ -243,34 +194,22 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
243 switch (frame->type) { 194 switch (frame->type) {
244 case LAPB_SABM: 195 case LAPB_SABM:
245 case LAPB_SABME: 196 case LAPB_SABME:
246#if LAPB_DEBUG > 1 197 lapb_dbg(1, "(%p) S2 RX {SABM,SABME}(%d)\n",
247 printk(KERN_DEBUG "lapb: (%p) S2 RX {SABM,SABME}(%d)\n", 198 lapb->dev, frame->pf);
248 lapb->dev, frame->pf); 199 lapb_dbg(1, "(%p) S2 TX DM(%d)\n", lapb->dev, frame->pf);
249 printk(KERN_DEBUG "lapb: (%p) S2 TX DM(%d)\n",
250 lapb->dev, frame->pf);
251#endif
252 lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); 200 lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
253 break; 201 break;
254 202
255 case LAPB_DISC: 203 case LAPB_DISC:
256#if LAPB_DEBUG > 1 204 lapb_dbg(1, "(%p) S2 RX DISC(%d)\n", lapb->dev, frame->pf);
257 printk(KERN_DEBUG "lapb: (%p) S2 RX DISC(%d)\n", 205 lapb_dbg(1, "(%p) S2 TX UA(%d)\n", lapb->dev, frame->pf);
258 lapb->dev, frame->pf);
259 printk(KERN_DEBUG "lapb: (%p) S2 TX UA(%d)\n",
260 lapb->dev, frame->pf);
261#endif
262 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); 206 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
263 break; 207 break;
264 208
265 case LAPB_UA: 209 case LAPB_UA:
266#if LAPB_DEBUG > 1 210 lapb_dbg(1, "(%p) S2 RX UA(%d)\n", lapb->dev, frame->pf);
267 printk(KERN_DEBUG "lapb: (%p) S2 RX UA(%d)\n",
268 lapb->dev, frame->pf);
269#endif
270 if (frame->pf) { 211 if (frame->pf) {
271#if LAPB_DEBUG > 0 212 lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
272 printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
273#endif
274 lapb->state = LAPB_STATE_0; 213 lapb->state = LAPB_STATE_0;
275 lapb_start_t1timer(lapb); 214 lapb_start_t1timer(lapb);
276 lapb_stop_t2timer(lapb); 215 lapb_stop_t2timer(lapb);
@@ -279,14 +218,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
279 break; 218 break;
280 219
281 case LAPB_DM: 220 case LAPB_DM:
282#if LAPB_DEBUG > 1 221 lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf);
283 printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
284 lapb->dev, frame->pf);
285#endif
286 if (frame->pf) { 222 if (frame->pf) {
287#if LAPB_DEBUG > 0 223 lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
288 printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
289#endif
290 lapb->state = LAPB_STATE_0; 224 lapb->state = LAPB_STATE_0;
291 lapb_start_t1timer(lapb); 225 lapb_start_t1timer(lapb);
292 lapb_stop_t2timer(lapb); 226 lapb_stop_t2timer(lapb);
@@ -298,12 +232,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
298 case LAPB_REJ: 232 case LAPB_REJ:
299 case LAPB_RNR: 233 case LAPB_RNR:
300 case LAPB_RR: 234 case LAPB_RR:
301#if LAPB_DEBUG > 1 235 lapb_dbg(1, "(%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
302 printk(KERN_DEBUG "lapb: (%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
303 lapb->dev, frame->pf);
304 printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
305 lapb->dev, frame->pf); 236 lapb->dev, frame->pf);
306#endif 237 lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf);
307 if (frame->pf) 238 if (frame->pf)
308 lapb_send_control(lapb, LAPB_DM, frame->pf, 239 lapb_send_control(lapb, LAPB_DM, frame->pf,
309 LAPB_RESPONSE); 240 LAPB_RESPONSE);
@@ -326,22 +257,15 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
326 257
327 switch (frame->type) { 258 switch (frame->type) {
328 case LAPB_SABM: 259 case LAPB_SABM:
329#if LAPB_DEBUG > 1 260 lapb_dbg(1, "(%p) S3 RX SABM(%d)\n", lapb->dev, frame->pf);
330 printk(KERN_DEBUG "lapb: (%p) S3 RX SABM(%d)\n",
331 lapb->dev, frame->pf);
332#endif
333 if (lapb->mode & LAPB_EXTENDED) { 261 if (lapb->mode & LAPB_EXTENDED) {
334#if LAPB_DEBUG > 1 262 lapb_dbg(1, "(%p) S3 TX DM(%d)\n",
335 printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n", 263 lapb->dev, frame->pf);
336 lapb->dev, frame->pf);
337#endif
338 lapb_send_control(lapb, LAPB_DM, frame->pf, 264 lapb_send_control(lapb, LAPB_DM, frame->pf,
339 LAPB_RESPONSE); 265 LAPB_RESPONSE);
340 } else { 266 } else {
341#if LAPB_DEBUG > 1 267 lapb_dbg(1, "(%p) S3 TX UA(%d)\n",
342 printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n", 268 lapb->dev, frame->pf);
343 lapb->dev, frame->pf);
344#endif
345 lapb_send_control(lapb, LAPB_UA, frame->pf, 269 lapb_send_control(lapb, LAPB_UA, frame->pf,
346 LAPB_RESPONSE); 270 LAPB_RESPONSE);
347 lapb_stop_t1timer(lapb); 271 lapb_stop_t1timer(lapb);
@@ -356,15 +280,10 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
356 break; 280 break;
357 281
358 case LAPB_SABME: 282 case LAPB_SABME:
359#if LAPB_DEBUG > 1 283 lapb_dbg(1, "(%p) S3 RX SABME(%d)\n", lapb->dev, frame->pf);
360 printk(KERN_DEBUG "lapb: (%p) S3 RX SABME(%d)\n",
361 lapb->dev, frame->pf);
362#endif
363 if (lapb->mode & LAPB_EXTENDED) { 284 if (lapb->mode & LAPB_EXTENDED) {
364#if LAPB_DEBUG > 1 285 lapb_dbg(1, "(%p) S3 TX UA(%d)\n",
365 printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n", 286 lapb->dev, frame->pf);
366 lapb->dev, frame->pf);
367#endif
368 lapb_send_control(lapb, LAPB_UA, frame->pf, 287 lapb_send_control(lapb, LAPB_UA, frame->pf,
369 LAPB_RESPONSE); 288 LAPB_RESPONSE);
370 lapb_stop_t1timer(lapb); 289 lapb_stop_t1timer(lapb);
@@ -376,23 +295,16 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
376 lapb->va = 0; 295 lapb->va = 0;
377 lapb_requeue_frames(lapb); 296 lapb_requeue_frames(lapb);
378 } else { 297 } else {
379#if LAPB_DEBUG > 1 298 lapb_dbg(1, "(%p) S3 TX DM(%d)\n",
380 printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n", 299 lapb->dev, frame->pf);
381 lapb->dev, frame->pf);
382#endif
383 lapb_send_control(lapb, LAPB_DM, frame->pf, 300 lapb_send_control(lapb, LAPB_DM, frame->pf,
384 LAPB_RESPONSE); 301 LAPB_RESPONSE);
385 } 302 }
386 break; 303 break;
387 304
388 case LAPB_DISC: 305 case LAPB_DISC:
389#if LAPB_DEBUG > 1 306 lapb_dbg(1, "(%p) S3 RX DISC(%d)\n", lapb->dev, frame->pf);
390 printk(KERN_DEBUG "lapb: (%p) S3 RX DISC(%d)\n", 307 lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
391 lapb->dev, frame->pf);
392#endif
393#if LAPB_DEBUG > 0
394 printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
395#endif
396 lapb_clear_queues(lapb); 308 lapb_clear_queues(lapb);
397 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); 309 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
398 lapb_start_t1timer(lapb); 310 lapb_start_t1timer(lapb);
@@ -402,13 +314,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
402 break; 314 break;
403 315
404 case LAPB_DM: 316 case LAPB_DM:
405#if LAPB_DEBUG > 1 317 lapb_dbg(1, "(%p) S3 RX DM(%d)\n", lapb->dev, frame->pf);
406 printk(KERN_DEBUG "lapb: (%p) S3 RX DM(%d)\n", 318 lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
407 lapb->dev, frame->pf);
408#endif
409#if LAPB_DEBUG > 0
410 printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
411#endif
412 lapb_clear_queues(lapb); 319 lapb_clear_queues(lapb);
413 lapb->state = LAPB_STATE_0; 320 lapb->state = LAPB_STATE_0;
414 lapb_start_t1timer(lapb); 321 lapb_start_t1timer(lapb);
@@ -417,10 +324,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
417 break; 324 break;
418 325
419 case LAPB_RNR: 326 case LAPB_RNR:
420#if LAPB_DEBUG > 1 327 lapb_dbg(1, "(%p) S3 RX RNR(%d) R%d\n",
421 printk(KERN_DEBUG "lapb: (%p) S3 RX RNR(%d) R%d\n", 328 lapb->dev, frame->pf, frame->nr);
422 lapb->dev, frame->pf, frame->nr);
423#endif
424 lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION; 329 lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION;
425 lapb_check_need_response(lapb, frame->cr, frame->pf); 330 lapb_check_need_response(lapb, frame->cr, frame->pf);
426 if (lapb_validate_nr(lapb, frame->nr)) { 331 if (lapb_validate_nr(lapb, frame->nr)) {
@@ -429,9 +334,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
429 lapb->frmr_data = *frame; 334 lapb->frmr_data = *frame;
430 lapb->frmr_type = LAPB_FRMR_Z; 335 lapb->frmr_type = LAPB_FRMR_Z;
431 lapb_transmit_frmr(lapb); 336 lapb_transmit_frmr(lapb);
432#if LAPB_DEBUG > 0 337 lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
433 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
434#endif
435 lapb_start_t1timer(lapb); 338 lapb_start_t1timer(lapb);
436 lapb_stop_t2timer(lapb); 339 lapb_stop_t2timer(lapb);
437 lapb->state = LAPB_STATE_4; 340 lapb->state = LAPB_STATE_4;
@@ -440,10 +343,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
440 break; 343 break;
441 344
442 case LAPB_RR: 345 case LAPB_RR:
443#if LAPB_DEBUG > 1 346 lapb_dbg(1, "(%p) S3 RX RR(%d) R%d\n",
444 printk(KERN_DEBUG "lapb: (%p) S3 RX RR(%d) R%d\n", 347 lapb->dev, frame->pf, frame->nr);
445 lapb->dev, frame->pf, frame->nr);
446#endif
447 lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; 348 lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
448 lapb_check_need_response(lapb, frame->cr, frame->pf); 349 lapb_check_need_response(lapb, frame->cr, frame->pf);
449 if (lapb_validate_nr(lapb, frame->nr)) { 350 if (lapb_validate_nr(lapb, frame->nr)) {
@@ -452,9 +353,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
452 lapb->frmr_data = *frame; 353 lapb->frmr_data = *frame;
453 lapb->frmr_type = LAPB_FRMR_Z; 354 lapb->frmr_type = LAPB_FRMR_Z;
454 lapb_transmit_frmr(lapb); 355 lapb_transmit_frmr(lapb);
455#if LAPB_DEBUG > 0 356 lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
456 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
457#endif
458 lapb_start_t1timer(lapb); 357 lapb_start_t1timer(lapb);
459 lapb_stop_t2timer(lapb); 358 lapb_stop_t2timer(lapb);
460 lapb->state = LAPB_STATE_4; 359 lapb->state = LAPB_STATE_4;
@@ -463,10 +362,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
463 break; 362 break;
464 363
465 case LAPB_REJ: 364 case LAPB_REJ:
466#if LAPB_DEBUG > 1 365 lapb_dbg(1, "(%p) S3 RX REJ(%d) R%d\n",
467 printk(KERN_DEBUG "lapb: (%p) S3 RX REJ(%d) R%d\n", 366 lapb->dev, frame->pf, frame->nr);
468 lapb->dev, frame->pf, frame->nr);
469#endif
470 lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; 367 lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
471 lapb_check_need_response(lapb, frame->cr, frame->pf); 368 lapb_check_need_response(lapb, frame->cr, frame->pf);
472 if (lapb_validate_nr(lapb, frame->nr)) { 369 if (lapb_validate_nr(lapb, frame->nr)) {
@@ -478,9 +375,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
478 lapb->frmr_data = *frame; 375 lapb->frmr_data = *frame;
479 lapb->frmr_type = LAPB_FRMR_Z; 376 lapb->frmr_type = LAPB_FRMR_Z;
480 lapb_transmit_frmr(lapb); 377 lapb_transmit_frmr(lapb);
481#if LAPB_DEBUG > 0 378 lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
482 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
483#endif
484 lapb_start_t1timer(lapb); 379 lapb_start_t1timer(lapb);
485 lapb_stop_t2timer(lapb); 380 lapb_stop_t2timer(lapb);
486 lapb->state = LAPB_STATE_4; 381 lapb->state = LAPB_STATE_4;
@@ -489,17 +384,13 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
489 break; 384 break;
490 385
491 case LAPB_I: 386 case LAPB_I:
492#if LAPB_DEBUG > 1 387 lapb_dbg(1, "(%p) S3 RX I(%d) S%d R%d\n",
493 printk(KERN_DEBUG "lapb: (%p) S3 RX I(%d) S%d R%d\n", 388 lapb->dev, frame->pf, frame->ns, frame->nr);
494 lapb->dev, frame->pf, frame->ns, frame->nr);
495#endif
496 if (!lapb_validate_nr(lapb, frame->nr)) { 389 if (!lapb_validate_nr(lapb, frame->nr)) {
497 lapb->frmr_data = *frame; 390 lapb->frmr_data = *frame;
498 lapb->frmr_type = LAPB_FRMR_Z; 391 lapb->frmr_type = LAPB_FRMR_Z;
499 lapb_transmit_frmr(lapb); 392 lapb_transmit_frmr(lapb);
500#if LAPB_DEBUG > 0 393 lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
501 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
502#endif
503 lapb_start_t1timer(lapb); 394 lapb_start_t1timer(lapb);
504 lapb_stop_t2timer(lapb); 395 lapb_stop_t2timer(lapb);
505 lapb->state = LAPB_STATE_4; 396 lapb->state = LAPB_STATE_4;
@@ -523,7 +414,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
523 * a frame lost on the wire. 414 * a frame lost on the wire.
524 */ 415 */
525 if (cn == NET_RX_DROP) { 416 if (cn == NET_RX_DROP) {
526 printk(KERN_DEBUG "LAPB: rx congestion\n"); 417 pr_debug("rx congestion\n");
527 break; 418 break;
528 } 419 }
529 lapb->vr = (lapb->vr + 1) % modulus; 420 lapb->vr = (lapb->vr + 1) % modulus;
@@ -542,11 +433,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
542 if (frame->pf) 433 if (frame->pf)
543 lapb_enquiry_response(lapb); 434 lapb_enquiry_response(lapb);
544 } else { 435 } else {
545#if LAPB_DEBUG > 1 436 lapb_dbg(1, "(%p) S3 TX REJ(%d) R%d\n",
546 printk(KERN_DEBUG 437 lapb->dev, frame->pf, lapb->vr);
547 "lapb: (%p) S3 TX REJ(%d) R%d\n",
548 lapb->dev, frame->pf, lapb->vr);
549#endif
550 lapb->condition |= LAPB_REJECT_CONDITION; 438 lapb->condition |= LAPB_REJECT_CONDITION;
551 lapb_send_control(lapb, LAPB_REJ, frame->pf, 439 lapb_send_control(lapb, LAPB_REJ, frame->pf,
552 LAPB_RESPONSE); 440 LAPB_RESPONSE);
@@ -556,31 +444,22 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
556 break; 444 break;
557 445
558 case LAPB_FRMR: 446 case LAPB_FRMR:
559#if LAPB_DEBUG > 1 447 lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
560 printk(KERN_DEBUG "lapb: (%p) S3 RX FRMR(%d) %02X " 448 lapb->dev, frame->pf,
561 "%02X %02X %02X %02X\n", lapb->dev, frame->pf, 449 skb->data[0], skb->data[1], skb->data[2],
562 skb->data[0], skb->data[1], skb->data[2], 450 skb->data[3], skb->data[4]);
563 skb->data[3], skb->data[4]);
564#endif
565 lapb_establish_data_link(lapb); 451 lapb_establish_data_link(lapb);
566#if LAPB_DEBUG > 0 452 lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
567 printk(KERN_DEBUG "lapb: (%p) S3 -> S1\n", lapb->dev);
568#endif
569 lapb_requeue_frames(lapb); 453 lapb_requeue_frames(lapb);
570 lapb->state = LAPB_STATE_1; 454 lapb->state = LAPB_STATE_1;
571 break; 455 break;
572 456
573 case LAPB_ILLEGAL: 457 case LAPB_ILLEGAL:
574#if LAPB_DEBUG > 1 458 lapb_dbg(1, "(%p) S3 RX ILLEGAL(%d)\n", lapb->dev, frame->pf);
575 printk(KERN_DEBUG "lapb: (%p) S3 RX ILLEGAL(%d)\n",
576 lapb->dev, frame->pf);
577#endif
578 lapb->frmr_data = *frame; 459 lapb->frmr_data = *frame;
579 lapb->frmr_type = LAPB_FRMR_W; 460 lapb->frmr_type = LAPB_FRMR_W;
580 lapb_transmit_frmr(lapb); 461 lapb_transmit_frmr(lapb);
581#if LAPB_DEBUG > 0 462 lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
582 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
583#endif
584 lapb_start_t1timer(lapb); 463 lapb_start_t1timer(lapb);
585 lapb_stop_t2timer(lapb); 464 lapb_stop_t2timer(lapb);
586 lapb->state = LAPB_STATE_4; 465 lapb->state = LAPB_STATE_4;
@@ -601,25 +480,16 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
601{ 480{
602 switch (frame->type) { 481 switch (frame->type) {
603 case LAPB_SABM: 482 case LAPB_SABM:
604#if LAPB_DEBUG > 1 483 lapb_dbg(1, "(%p) S4 RX SABM(%d)\n", lapb->dev, frame->pf);
605 printk(KERN_DEBUG "lapb: (%p) S4 RX SABM(%d)\n",
606 lapb->dev, frame->pf);
607#endif
608 if (lapb->mode & LAPB_EXTENDED) { 484 if (lapb->mode & LAPB_EXTENDED) {
609#if LAPB_DEBUG > 1 485 lapb_dbg(1, "(%p) S4 TX DM(%d)\n",
610 printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n", 486 lapb->dev, frame->pf);
611 lapb->dev, frame->pf);
612#endif
613 lapb_send_control(lapb, LAPB_DM, frame->pf, 487 lapb_send_control(lapb, LAPB_DM, frame->pf,
614 LAPB_RESPONSE); 488 LAPB_RESPONSE);
615 } else { 489 } else {
616#if LAPB_DEBUG > 1 490 lapb_dbg(1, "(%p) S4 TX UA(%d)\n",
617 printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n", 491 lapb->dev, frame->pf);
618 lapb->dev, frame->pf); 492 lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev);
619#endif
620#if LAPB_DEBUG > 0
621 printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
622#endif
623 lapb_send_control(lapb, LAPB_UA, frame->pf, 493 lapb_send_control(lapb, LAPB_UA, frame->pf,
624 LAPB_RESPONSE); 494 LAPB_RESPONSE);
625 lapb_stop_t1timer(lapb); 495 lapb_stop_t1timer(lapb);
@@ -635,18 +505,11 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
635 break; 505 break;
636 506
637 case LAPB_SABME: 507 case LAPB_SABME:
638#if LAPB_DEBUG > 1 508 lapb_dbg(1, "(%p) S4 RX SABME(%d)\n", lapb->dev, frame->pf);
639 printk(KERN_DEBUG "lapb: (%p) S4 RX SABME(%d)\n",
640 lapb->dev, frame->pf);
641#endif
642 if (lapb->mode & LAPB_EXTENDED) { 509 if (lapb->mode & LAPB_EXTENDED) {
643#if LAPB_DEBUG > 1 510 lapb_dbg(1, "(%p) S4 TX UA(%d)\n",
644 printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n", 511 lapb->dev, frame->pf);
645 lapb->dev, frame->pf); 512 lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev);
646#endif
647#if LAPB_DEBUG > 0
648 printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
649#endif
650 lapb_send_control(lapb, LAPB_UA, frame->pf, 513 lapb_send_control(lapb, LAPB_UA, frame->pf,
651 LAPB_RESPONSE); 514 LAPB_RESPONSE);
652 lapb_stop_t1timer(lapb); 515 lapb_stop_t1timer(lapb);
@@ -659,10 +522,8 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
659 lapb->va = 0; 522 lapb->va = 0;
660 lapb_connect_indication(lapb, LAPB_OK); 523 lapb_connect_indication(lapb, LAPB_OK);
661 } else { 524 } else {
662#if LAPB_DEBUG > 1 525 lapb_dbg(1, "(%p) S4 TX DM(%d)\n",
663 printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n", 526 lapb->dev, frame->pf);
664 lapb->dev, frame->pf);
665#endif
666 lapb_send_control(lapb, LAPB_DM, frame->pf, 527 lapb_send_control(lapb, LAPB_DM, frame->pf,
667 LAPB_RESPONSE); 528 LAPB_RESPONSE);
668 } 529 }
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index c75a79540f9f..ba4d015bd1a6 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -14,6 +14,8 @@
14 * LAPB 002 Jonathan Naylor New timer architecture. 14 * LAPB 002 Jonathan Naylor New timer architecture.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/errno.h> 19#include <linux/errno.h>
18#include <linux/types.h> 20#include <linux/types.h>
19#include <linux/socket.h> 21#include <linux/socket.h>
@@ -28,7 +30,6 @@
28#include <linux/slab.h> 30#include <linux/slab.h>
29#include <net/sock.h> 31#include <net/sock.h>
30#include <asm/uaccess.h> 32#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <linux/fcntl.h> 33#include <linux/fcntl.h>
33#include <linux/mm.h> 34#include <linux/mm.h>
34#include <linux/interrupt.h> 35#include <linux/interrupt.h>
@@ -61,10 +62,8 @@ static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll
61 *frame |= lapb->vs << 1; 62 *frame |= lapb->vs << 1;
62 } 63 }
63 64
64#if LAPB_DEBUG > 1 65 lapb_dbg(1, "(%p) S%d TX I(%d) S%d R%d\n",
65 printk(KERN_DEBUG "lapb: (%p) S%d TX I(%d) S%d R%d\n", 66 lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
66 lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
67#endif
68 67
69 lapb_transmit_buffer(lapb, skb, LAPB_COMMAND); 68 lapb_transmit_buffer(lapb, skb, LAPB_COMMAND);
70} 69}
@@ -149,11 +148,9 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
149 } 148 }
150 } 149 }
151 150
152#if LAPB_DEBUG > 2 151 lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
153 printk(KERN_DEBUG "lapb: (%p) S%d TX %02X %02X %02X\n", 152 lapb->dev, lapb->state,
154 lapb->dev, lapb->state, 153 skb->data[0], skb->data[1], skb->data[2]);
155 skb->data[0], skb->data[1], skb->data[2]);
156#endif
157 154
158 if (!lapb_data_transmit(lapb, skb)) 155 if (!lapb_data_transmit(lapb, skb))
159 kfree_skb(skb); 156 kfree_skb(skb);
@@ -165,16 +162,10 @@ void lapb_establish_data_link(struct lapb_cb *lapb)
165 lapb->n2count = 0; 162 lapb->n2count = 0;
166 163
167 if (lapb->mode & LAPB_EXTENDED) { 164 if (lapb->mode & LAPB_EXTENDED) {
168#if LAPB_DEBUG > 1 165 lapb_dbg(1, "(%p) S%d TX SABME(1)\n", lapb->dev, lapb->state);
169 printk(KERN_DEBUG "lapb: (%p) S%d TX SABME(1)\n",
170 lapb->dev, lapb->state);
171#endif
172 lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); 166 lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
173 } else { 167 } else {
174#if LAPB_DEBUG > 1 168 lapb_dbg(1, "(%p) S%d TX SABM(1)\n", lapb->dev, lapb->state);
175 printk(KERN_DEBUG "lapb: (%p) S%d TX SABM(1)\n",
176 lapb->dev, lapb->state);
177#endif
178 lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); 169 lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
179 } 170 }
180 171
@@ -184,10 +175,8 @@ void lapb_establish_data_link(struct lapb_cb *lapb)
184 175
185void lapb_enquiry_response(struct lapb_cb *lapb) 176void lapb_enquiry_response(struct lapb_cb *lapb)
186{ 177{
187#if LAPB_DEBUG > 1 178 lapb_dbg(1, "(%p) S%d TX RR(1) R%d\n",
188 printk(KERN_DEBUG "lapb: (%p) S%d TX RR(1) R%d\n", 179 lapb->dev, lapb->state, lapb->vr);
189 lapb->dev, lapb->state, lapb->vr);
190#endif
191 180
192 lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE); 181 lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE);
193 182
@@ -196,10 +185,8 @@ void lapb_enquiry_response(struct lapb_cb *lapb)
196 185
197void lapb_timeout_response(struct lapb_cb *lapb) 186void lapb_timeout_response(struct lapb_cb *lapb)
198{ 187{
199#if LAPB_DEBUG > 1 188 lapb_dbg(1, "(%p) S%d TX RR(0) R%d\n",
200 printk(KERN_DEBUG "lapb: (%p) S%d TX RR(0) R%d\n", 189 lapb->dev, lapb->state, lapb->vr);
201 lapb->dev, lapb->state, lapb->vr);
202#endif
203 lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE); 190 lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE);
204 191
205 lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; 192 lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 43a2a7fb327b..9d0a426eccbb 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -13,6 +13,8 @@
13 * LAPB 001 Jonathan Naylor Started Coding 13 * LAPB 001 Jonathan Naylor Started Coding
14 */ 14 */
15 15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
16#include <linux/errno.h> 18#include <linux/errno.h>
17#include <linux/types.h> 19#include <linux/types.h>
18#include <linux/socket.h> 20#include <linux/socket.h>
@@ -27,7 +29,6 @@
27#include <linux/slab.h> 29#include <linux/slab.h>
28#include <net/sock.h> 30#include <net/sock.h>
29#include <asm/uaccess.h> 31#include <asm/uaccess.h>
30#include <asm/system.h>
31#include <linux/fcntl.h> 32#include <linux/fcntl.h>
32#include <linux/mm.h> 33#include <linux/mm.h>
33#include <linux/interrupt.h> 34#include <linux/interrupt.h>
@@ -112,11 +113,9 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
112{ 113{
113 frame->type = LAPB_ILLEGAL; 114 frame->type = LAPB_ILLEGAL;
114 115
115#if LAPB_DEBUG > 2 116 lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
116 printk(KERN_DEBUG "lapb: (%p) S%d RX %02X %02X %02X\n", 117 lapb->dev, lapb->state,
117 lapb->dev, lapb->state, 118 skb->data[0], skb->data[1], skb->data[2]);
118 skb->data[0], skb->data[1], skb->data[2]);
119#endif
120 119
121 /* We always need to look at 2 bytes, sometimes we need 120 /* We always need to look at 2 bytes, sometimes we need
122 * to look at 3 and those cases are handled below. 121 * to look at 3 and those cases are handled below.
@@ -285,12 +284,10 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
285 dptr++; 284 dptr++;
286 *dptr++ = lapb->frmr_type; 285 *dptr++ = lapb->frmr_type;
287 286
288#if LAPB_DEBUG > 1 287 lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
289 printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", 288 lapb->dev, lapb->state,
290 lapb->dev, lapb->state, 289 skb->data[1], skb->data[2], skb->data[3],
291 skb->data[1], skb->data[2], skb->data[3], 290 skb->data[4], skb->data[5]);
292 skb->data[4], skb->data[5]);
293#endif
294 } else { 291 } else {
295 dptr = skb_put(skb, 4); 292 dptr = skb_put(skb, 4);
296 *dptr++ = LAPB_FRMR; 293 *dptr++ = LAPB_FRMR;
@@ -302,11 +299,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
302 dptr++; 299 dptr++;
303 *dptr++ = lapb->frmr_type; 300 *dptr++ = lapb->frmr_type;
304 301
305#if LAPB_DEBUG > 1 302 lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
306 printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X\n", 303 lapb->dev, lapb->state, skb->data[1],
307 lapb->dev, lapb->state, skb->data[1], 304 skb->data[2], skb->data[3]);
308 skb->data[2], skb->data[3]);
309#endif
310 } 305 }
311 306
312 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); 307 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index af6d14b44e2e..54563ad8aeb1 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -14,6 +14,8 @@
14 * LAPB 002 Jonathan Naylor New timer architecture. 14 * LAPB 002 Jonathan Naylor New timer architecture.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/errno.h> 19#include <linux/errno.h>
18#include <linux/types.h> 20#include <linux/types.h>
19#include <linux/socket.h> 21#include <linux/socket.h>
@@ -28,7 +30,6 @@
28#include <linux/skbuff.h> 30#include <linux/skbuff.h>
29#include <net/sock.h> 31#include <net/sock.h>
30#include <asm/uaccess.h> 32#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <linux/fcntl.h> 33#include <linux/fcntl.h>
33#include <linux/mm.h> 34#include <linux/mm.h>
34#include <linux/interrupt.h> 35#include <linux/interrupt.h>
@@ -106,21 +107,17 @@ static void lapb_t1timer_expiry(unsigned long param)
106 lapb_clear_queues(lapb); 107 lapb_clear_queues(lapb);
107 lapb->state = LAPB_STATE_0; 108 lapb->state = LAPB_STATE_0;
108 lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); 109 lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
109#if LAPB_DEBUG > 0 110 lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
110 printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
111#endif
112 return; 111 return;
113 } else { 112 } else {
114 lapb->n2count++; 113 lapb->n2count++;
115 if (lapb->mode & LAPB_EXTENDED) { 114 if (lapb->mode & LAPB_EXTENDED) {
116#if LAPB_DEBUG > 1 115 lapb_dbg(1, "(%p) S1 TX SABME(1)\n",
117 printk(KERN_DEBUG "lapb: (%p) S1 TX SABME(1)\n", lapb->dev); 116 lapb->dev);
118#endif
119 lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); 117 lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
120 } else { 118 } else {
121#if LAPB_DEBUG > 1 119 lapb_dbg(1, "(%p) S1 TX SABM(1)\n",
122 printk(KERN_DEBUG "lapb: (%p) S1 TX SABM(1)\n", lapb->dev); 120 lapb->dev);
123#endif
124 lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); 121 lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
125 } 122 }
126 } 123 }
@@ -134,15 +131,11 @@ static void lapb_t1timer_expiry(unsigned long param)
134 lapb_clear_queues(lapb); 131 lapb_clear_queues(lapb);
135 lapb->state = LAPB_STATE_0; 132 lapb->state = LAPB_STATE_0;
136 lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT); 133 lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
137#if LAPB_DEBUG > 0 134 lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
138 printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
139#endif
140 return; 135 return;
141 } else { 136 } else {
142 lapb->n2count++; 137 lapb->n2count++;
143#if LAPB_DEBUG > 1 138 lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
144 printk(KERN_DEBUG "lapb: (%p) S2 TX DISC(1)\n", lapb->dev);
145#endif
146 lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); 139 lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
147 } 140 }
148 break; 141 break;
@@ -156,9 +149,7 @@ static void lapb_t1timer_expiry(unsigned long param)
156 lapb->state = LAPB_STATE_0; 149 lapb->state = LAPB_STATE_0;
157 lapb_stop_t2timer(lapb); 150 lapb_stop_t2timer(lapb);
158 lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); 151 lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
159#if LAPB_DEBUG > 0 152 lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
160 printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
161#endif
162 return; 153 return;
163 } else { 154 } else {
164 lapb->n2count++; 155 lapb->n2count++;
@@ -174,9 +165,7 @@ static void lapb_t1timer_expiry(unsigned long param)
174 lapb_clear_queues(lapb); 165 lapb_clear_queues(lapb);
175 lapb->state = LAPB_STATE_0; 166 lapb->state = LAPB_STATE_0;
176 lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); 167 lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
177#if LAPB_DEBUG > 0 168 lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
178 printk(KERN_DEBUG "lapb: (%p) S4 -> S0\n", lapb->dev);
179#endif
180 return; 169 return;
181 } else { 170 } else {
182 lapb->n2count++; 171 lapb->n2count++;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index b9bef2c75026..fe5453c3e719 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -71,8 +71,7 @@ static inline u16 llc_ui_next_link_no(int sap)
71 */ 71 */
72static inline __be16 llc_proto_type(u16 arphrd) 72static inline __be16 llc_proto_type(u16 arphrd)
73{ 73{
74 return arphrd == ARPHRD_IEEE802_TR ? 74 return htons(ETH_P_802_2);
75 htons(ETH_P_TR_802_2) : htons(ETH_P_802_2);
76} 75}
77 76
78/** 77/**
@@ -518,7 +517,7 @@ static int llc_ui_listen(struct socket *sock, int backlog)
518 if (sock_flag(sk, SOCK_ZAPPED)) 517 if (sock_flag(sk, SOCK_ZAPPED))
519 goto out; 518 goto out;
520 rc = 0; 519 rc = 0;
521 if (!(unsigned)backlog) /* BSDism */ 520 if (!(unsigned int)backlog) /* BSDism */
522 backlog = 1; 521 backlog = 1;
523 sk->sk_max_ack_backlog = backlog; 522 sk->sk_max_ack_backlog = backlog;
524 if (sk->sk_state != TCP_LISTEN) { 523 if (sk->sk_state != TCP_LISTEN) {
@@ -806,10 +805,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
806 sk_wait_data(sk, &timeo); 805 sk_wait_data(sk, &timeo);
807 806
808 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { 807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
809 if (net_ratelimit()) 808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
810 printk(KERN_DEBUG "LLC(%s:%d): Application " 809 current->comm,
811 "bug, race in MSG_PEEK.\n", 810 task_pid_nr(current));
812 current->comm, task_pid_nr(current));
813 peek_seq = llc->copied_seq; 811 peek_seq = llc->copied_seq;
814 } 812 }
815 continue; 813 continue;
@@ -840,7 +838,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
840 838
841 if (!(flags & MSG_PEEK)) { 839 if (!(flags & MSG_PEEK)) {
842 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 840 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
843 sk_eat_skb(sk, skb, 0); 841 sk_eat_skb(sk, skb, false);
844 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 842 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
845 *seq = 0; 843 *seq = 0;
846 } 844 }
@@ -863,7 +861,7 @@ copy_uaddr:
863 861
864 if (!(flags & MSG_PEEK)) { 862 if (!(flags & MSG_PEEK)) {
865 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 863 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
866 sk_eat_skb(sk, skb, 0); 864 sk_eat_skb(sk, skb, false);
867 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 865 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
868 *seq = 0; 866 *seq = 0;
869 } 867 }
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index ba137a6a224d..0d0d416dfab6 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
828 else { 828 else {
829 dprintk("%s: adding to backlog...\n", __func__); 829 dprintk("%s: adding to backlog...\n", __func__);
830 llc_set_backlog_type(skb, LLC_PACKET); 830 llc_set_backlog_type(skb, LLC_PACKET);
831 if (sk_add_backlog(sk, skb)) 831 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
832 goto drop_unlock; 832 goto drop_unlock;
833 } 833 }
834out: 834out:
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index b658cba89fdd..2dae8a5df23f 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -14,9 +14,7 @@
14 */ 14 */
15 15
16#include <linux/if_arp.h> 16#include <linux/if_arp.h>
17#include <linux/if_tr.h>
18#include <linux/netdevice.h> 17#include <linux/netdevice.h>
19#include <linux/trdevice.h>
20#include <linux/skbuff.h> 18#include <linux/skbuff.h>
21#include <linux/export.h> 19#include <linux/export.h>
22#include <net/llc.h> 20#include <net/llc.h>
@@ -37,7 +35,6 @@ int llc_mac_hdr_init(struct sk_buff *skb,
37 int rc = -EINVAL; 35 int rc = -EINVAL;
38 36
39 switch (skb->dev->type) { 37 switch (skb->dev->type) {
40 case ARPHRD_IEEE802_TR:
41 case ARPHRD_ETHER: 38 case ARPHRD_ETHER:
42 case ARPHRD_LOOPBACK: 39 case ARPHRD_LOOPBACK:
43 rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa, 40 rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 94e7fca75b85..7c5073badc73 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -31,10 +31,6 @@ static int llc_mac_header_len(unsigned short devtype)
31 case ARPHRD_ETHER: 31 case ARPHRD_ETHER:
32 case ARPHRD_LOOPBACK: 32 case ARPHRD_LOOPBACK:
33 return sizeof(struct ethhdr); 33 return sizeof(struct ethhdr);
34#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
35 case ARPHRD_IEEE802_TR:
36 return sizeof(struct trh_hdr);
37#endif
38 } 34 }
39 return 0; 35 return 0;
40} 36}
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index e2ebe3586263..d75306b9c2f3 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -7,6 +7,7 @@
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <net/net_namespace.h>
10#include <net/llc.h> 11#include <net/llc.h>
11 12
12#ifndef CONFIG_SYSCTL 13#ifndef CONFIG_SYSCTL
@@ -56,48 +57,29 @@ static struct ctl_table llc_station_table[] = {
56 { }, 57 { },
57}; 58};
58 59
59static struct ctl_table llc2_dir_timeout_table[] = { 60static struct ctl_table_header *llc2_timeout_header;
60 { 61static struct ctl_table_header *llc_station_header;
61 .procname = "timeout",
62 .mode = 0555,
63 .child = llc2_timeout_table,
64 },
65 { },
66};
67
68static struct ctl_table llc_table[] = {
69 {
70 .procname = "llc2",
71 .mode = 0555,
72 .child = llc2_dir_timeout_table,
73 },
74 {
75 .procname = "station",
76 .mode = 0555,
77 .child = llc_station_table,
78 },
79 { },
80};
81
82static struct ctl_path llc_path[] = {
83 { .procname = "net", },
84 { .procname = "llc", },
85 { }
86};
87
88static struct ctl_table_header *llc_table_header;
89 62
90int __init llc_sysctl_init(void) 63int __init llc_sysctl_init(void)
91{ 64{
92 llc_table_header = register_sysctl_paths(llc_path, llc_table); 65 llc2_timeout_header = register_net_sysctl(&init_net, "net/llc/llc2/timeout", llc2_timeout_table);
66 llc_station_header = register_net_sysctl(&init_net, "net/llc/station", llc_station_table);
93 67
94 return llc_table_header ? 0 : -ENOMEM; 68 if (!llc2_timeout_header || !llc_station_header) {
69 llc_sysctl_exit();
70 return -ENOMEM;
71 }
72 return 0;
95} 73}
96 74
97void llc_sysctl_exit(void) 75void llc_sysctl_exit(void)
98{ 76{
99 if (llc_table_header) { 77 if (llc2_timeout_header) {
100 unregister_sysctl_table(llc_table_header); 78 unregister_net_sysctl_table(llc2_timeout_header);
101 llc_table_header = NULL; 79 llc2_timeout_header = NULL;
80 }
81 if (llc_station_header) {
82 unregister_net_sysctl_table(llc_station_header);
83 llc_station_header = NULL;
102 } 84 }
103} 85}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 96ddb72760b9..8d249d705980 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -225,6 +225,17 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
225 225
226 Do not select this option. 226 Do not select this option.
227 227
228config MAC80211_VERBOSE_MESH_SYNC_DEBUG
229 bool "Verbose mesh mesh synchronization debugging"
230 depends on MAC80211_DEBUG_MENU
231 depends on MAC80211_MESH
232 ---help---
233 Selecting this option causes mac80211 to print out very verbose mesh
234 synchronization debugging messages (when mac80211 is taking part in a
235 mesh network).
236
237 Do not select this option.
238
228config MAC80211_VERBOSE_TDLS_DEBUG 239config MAC80211_VERBOSE_TDLS_DEBUG
229 bool "Verbose TDLS debugging" 240 bool "Verbose TDLS debugging"
230 depends on MAC80211_DEBUG_MENU 241 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index d540c3b160f3..3e9d931bba35 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -9,7 +9,7 @@ mac80211-y := \
9 scan.o offchannel.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 ibss.o \ 11 ibss.o \
12 mlme.o work.o \ 12 work.o \
13 iface.o \ 13 iface.o \
14 rate.o \ 14 rate.o \
15 michael.o \ 15 michael.o \
@@ -25,7 +25,7 @@ mac80211-y := \
25 wme.o \ 25 wme.o \
26 event.o \ 26 event.o \
27 chan.o \ 27 chan.o \
28 driver-trace.o 28 driver-trace.o mlme.o
29 29
30mac80211-$(CONFIG_MAC80211_LEDS) += led.o 30mac80211-$(CONFIG_MAC80211_LEDS) += led.o
31mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 31mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -38,7 +38,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
38 mesh.o \ 38 mesh.o \
39 mesh_pathtbl.o \ 39 mesh_pathtbl.o \
40 mesh_plink.o \ 40 mesh_plink.o \
41 mesh_hwmp.o 41 mesh_hwmp.o \
42 mesh_sync.o
42 43
43mac80211-$(CONFIG_PM) += pm.o 44mac80211-$(CONFIG_PM) += pm.o
44 45
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 1068f668ac4e..26ddb699d693 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -49,6 +49,8 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
49 container_of(h, struct tid_ampdu_rx, rcu_head); 49 container_of(h, struct tid_ampdu_rx, rcu_head);
50 int i; 50 int i;
51 51
52 del_timer_sync(&tid_rx->reorder_timer);
53
52 for (i = 0; i < tid_rx->buf_size; i++) 54 for (i = 0; i < tid_rx->buf_size; i++)
53 dev_kfree_skb(tid_rx->reorder_buf[i]); 55 dev_kfree_skb(tid_rx->reorder_buf[i]);
54 kfree(tid_rx->reorder_buf); 56 kfree(tid_rx->reorder_buf);
@@ -91,7 +93,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
91 tid, WLAN_BACK_RECIPIENT, reason); 93 tid, WLAN_BACK_RECIPIENT, reason);
92 94
93 del_timer_sync(&tid_rx->session_timer); 95 del_timer_sync(&tid_rx->session_timer);
94 del_timer_sync(&tid_rx->reorder_timer);
95 96
96 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); 97 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
97} 98}
@@ -141,6 +142,18 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
141 u8 *timer_to_id = ptid - *ptid; 142 u8 *timer_to_id = ptid - *ptid;
142 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 143 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
143 timer_to_tid[0]); 144 timer_to_tid[0]);
145 struct tid_ampdu_rx *tid_rx;
146 unsigned long timeout;
147
148 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
149 if (!tid_rx)
150 return;
151
152 timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
153 if (time_is_after_jiffies(timeout)) {
154 mod_timer(&tid_rx->session_timer, timeout);
155 return;
156 }
144 157
145#ifdef CONFIG_MAC80211_HT_DEBUG 158#ifdef CONFIG_MAC80211_HT_DEBUG
146 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 159 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
@@ -247,11 +260,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
247 (buf_size > IEEE80211_MAX_AMPDU_BUF)) { 260 (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
248 status = WLAN_STATUS_INVALID_QOS_PARAM; 261 status = WLAN_STATUS_INVALID_QOS_PARAM;
249#ifdef CONFIG_MAC80211_HT_DEBUG 262#ifdef CONFIG_MAC80211_HT_DEBUG
250 if (net_ratelimit()) 263 net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
251 printk(KERN_DEBUG "AddBA Req with bad params from " 264 mgmt->sa, tid, ba_policy, buf_size);
252 "%pM on tid %u. policy %d, buffer size %d\n",
253 mgmt->sa, tid, ba_policy,
254 buf_size);
255#endif /* CONFIG_MAC80211_HT_DEBUG */ 265#endif /* CONFIG_MAC80211_HT_DEBUG */
256 goto end_no_lock; 266 goto end_no_lock;
257 } 267 }
@@ -268,10 +278,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
268 278
269 if (sta->ampdu_mlme.tid_rx[tid]) { 279 if (sta->ampdu_mlme.tid_rx[tid]) {
270#ifdef CONFIG_MAC80211_HT_DEBUG 280#ifdef CONFIG_MAC80211_HT_DEBUG
271 if (net_ratelimit()) 281 net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n",
272 printk(KERN_DEBUG "unexpected AddBA Req from " 282 mgmt->sa, tid);
273 "%pM on tid %u\n",
274 mgmt->sa, tid);
275#endif /* CONFIG_MAC80211_HT_DEBUG */ 283#endif /* CONFIG_MAC80211_HT_DEBUG */
276 284
277 /* delete existing Rx BA session on the same tid */ 285 /* delete existing Rx BA session on the same tid */
@@ -290,7 +298,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
290 /* rx timer */ 298 /* rx timer */
291 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; 299 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
292 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 300 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
293 init_timer(&tid_agg_rx->session_timer); 301 init_timer_deferrable(&tid_agg_rx->session_timer);
294 302
295 /* rx reorder timer */ 303 /* rx reorder timer */
296 tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired; 304 tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired;
@@ -334,8 +342,10 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
334 /* activate it for RX */ 342 /* activate it for RX */
335 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); 343 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
336 344
337 if (timeout) 345 if (timeout) {
338 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); 346 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
347 tid_agg_rx->last_rx = jiffies;
348 }
339 349
340end: 350end:
341 mutex_unlock(&sta->ampdu_mlme.mtx); 351 mutex_unlock(&sta->ampdu_mlme.mtx);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 76be61744198..7cf07158805c 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -286,25 +286,25 @@ static inline int ieee80211_ac_from_tid(int tid)
286 * a global "agg_queue_stop" refcount. 286 * a global "agg_queue_stop" refcount.
287 */ 287 */
288static void __acquires(agg_queue) 288static void __acquires(agg_queue)
289ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid) 289ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
290{ 290{
291 int queue = ieee80211_ac_from_tid(tid); 291 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
292 292
293 if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1) 293 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
294 ieee80211_stop_queue_by_reason( 294 ieee80211_stop_queue_by_reason(
295 &local->hw, queue, 295 &sdata->local->hw, queue,
296 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 296 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
297 __acquire(agg_queue); 297 __acquire(agg_queue);
298} 298}
299 299
300static void __releases(agg_queue) 300static void __releases(agg_queue)
301ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) 301ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
302{ 302{
303 int queue = ieee80211_ac_from_tid(tid); 303 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
304 304
305 if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0) 305 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
306 ieee80211_wake_queue_by_reason( 306 ieee80211_wake_queue_by_reason(
307 &local->hw, queue, 307 &sdata->local->hw, queue,
308 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 308 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
309 __release(agg_queue); 309 __release(agg_queue);
310} 310}
@@ -314,13 +314,14 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
314 * requires a call to ieee80211_agg_splice_finish later 314 * requires a call to ieee80211_agg_splice_finish later
315 */ 315 */
316static void __acquires(agg_queue) 316static void __acquires(agg_queue)
317ieee80211_agg_splice_packets(struct ieee80211_local *local, 317ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
318 struct tid_ampdu_tx *tid_tx, u16 tid) 318 struct tid_ampdu_tx *tid_tx, u16 tid)
319{ 319{
320 int queue = ieee80211_ac_from_tid(tid); 320 struct ieee80211_local *local = sdata->local;
321 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
321 unsigned long flags; 322 unsigned long flags;
322 323
323 ieee80211_stop_queue_agg(local, tid); 324 ieee80211_stop_queue_agg(sdata, tid);
324 325
325 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" 326 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
326 " from the pending queue\n", tid)) 327 " from the pending queue\n", tid))
@@ -336,9 +337,9 @@ ieee80211_agg_splice_packets(struct ieee80211_local *local,
336} 337}
337 338
338static void __releases(agg_queue) 339static void __releases(agg_queue)
339ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) 340ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
340{ 341{
341 ieee80211_wake_queue_agg(local, tid); 342 ieee80211_wake_queue_agg(sdata, tid);
342} 343}
343 344
344void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 345void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
@@ -376,9 +377,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
376 " tid %d\n", tid); 377 " tid %d\n", tid);
377#endif 378#endif
378 spin_lock_bh(&sta->lock); 379 spin_lock_bh(&sta->lock);
379 ieee80211_agg_splice_packets(local, tid_tx, tid); 380 ieee80211_agg_splice_packets(sdata, tid_tx, tid);
380 ieee80211_assign_tid_tx(sta, tid, NULL); 381 ieee80211_assign_tid_tx(sta, tid, NULL);
381 ieee80211_agg_splice_finish(local, tid); 382 ieee80211_agg_splice_finish(sdata, tid);
382 spin_unlock_bh(&sta->lock); 383 spin_unlock_bh(&sta->lock);
383 384
384 kfree_rcu(tid_tx, rcu_head); 385 kfree_rcu(tid_tx, rcu_head);
@@ -417,6 +418,24 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
417 u8 *timer_to_id = ptid - *ptid; 418 u8 *timer_to_id = ptid - *ptid;
418 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 419 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
419 timer_to_tid[0]); 420 timer_to_tid[0]);
421 struct tid_ampdu_tx *tid_tx;
422 unsigned long timeout;
423
424 rcu_read_lock();
425 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
426 if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
427 rcu_read_unlock();
428 return;
429 }
430
431 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
432 if (time_is_after_jiffies(timeout)) {
433 mod_timer(&tid_tx->session_timer, timeout);
434 rcu_read_unlock();
435 return;
436 }
437
438 rcu_read_unlock();
420 439
421#ifdef CONFIG_MAC80211_HT_DEBUG 440#ifdef CONFIG_MAC80211_HT_DEBUG
422 printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid); 441 printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
@@ -542,7 +561,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
542 /* tx timer */ 561 /* tx timer */
543 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; 562 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
544 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 563 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
545 init_timer(&tid_tx->session_timer); 564 init_timer_deferrable(&tid_tx->session_timer);
546 565
547 /* assign a dialog token */ 566 /* assign a dialog token */
548 sta->ampdu_mlme.dialog_token_allocator++; 567 sta->ampdu_mlme.dialog_token_allocator++;
@@ -586,14 +605,14 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
586 */ 605 */
587 spin_lock_bh(&sta->lock); 606 spin_lock_bh(&sta->lock);
588 607
589 ieee80211_agg_splice_packets(local, tid_tx, tid); 608 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
590 /* 609 /*
591 * Now mark as operational. This will be visible 610 * Now mark as operational. This will be visible
592 * in the TX path, and lets it go lock-free in 611 * in the TX path, and lets it go lock-free in
593 * the common case. 612 * the common case.
594 */ 613 */
595 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 614 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
596 ieee80211_agg_splice_finish(local, tid); 615 ieee80211_agg_splice_finish(sta->sdata, tid);
597 616
598 spin_unlock_bh(&sta->lock); 617 spin_unlock_bh(&sta->lock);
599} 618}
@@ -778,12 +797,12 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
778 * more. 797 * more.
779 */ 798 */
780 799
781 ieee80211_agg_splice_packets(local, tid_tx, tid); 800 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
782 801
783 /* future packets must not find the tid_tx struct any more */ 802 /* future packets must not find the tid_tx struct any more */
784 ieee80211_assign_tid_tx(sta, tid, NULL); 803 ieee80211_assign_tid_tx(sta, tid, NULL);
785 804
786 ieee80211_agg_splice_finish(local, tid); 805 ieee80211_agg_splice_finish(sta->sdata, tid);
787 806
788 kfree_rcu(tid_tx, rcu_head); 807 kfree_rcu(tid_tx, rcu_head);
789 808
@@ -884,9 +903,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
884 903
885 sta->ampdu_mlme.addba_req_num[tid] = 0; 904 sta->ampdu_mlme.addba_req_num[tid] = 0;
886 905
887 if (tid_tx->timeout) 906 if (tid_tx->timeout) {
888 mod_timer(&tid_tx->session_timer, 907 mod_timer(&tid_tx->session_timer,
889 TU_TO_EXP_TIME(tid_tx->timeout)); 908 TU_TO_EXP_TIME(tid_tx->timeout));
909 tid_tx->last_tx = jiffies;
910 }
890 911
891 } else { 912 } else {
892 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 913 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 296620d6ca0c..495831ee48f1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -336,6 +336,20 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
336 rate->mcs = idx; 336 rate->mcs = idx;
337} 337}
338 338
339void sta_set_rate_info_tx(struct sta_info *sta,
340 const struct ieee80211_tx_rate *rate,
341 struct rate_info *rinfo)
342{
343 rinfo->flags = 0;
344 if (rate->flags & IEEE80211_TX_RC_MCS)
345 rinfo->flags |= RATE_INFO_FLAGS_MCS;
346 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
347 rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
348 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
349 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
350 rate_idx_to_bitrate(rinfo, sta, rate->idx);
351}
352
339static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 353static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
340{ 354{
341 struct ieee80211_sub_if_data *sdata = sta->sdata; 355 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -378,14 +392,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
378 sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); 392 sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
379 } 393 }
380 394
381 sinfo->txrate.flags = 0; 395 sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
382 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)
383 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
384 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
385 sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
386 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI)
387 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
388 rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx);
389 396
390 sinfo->rxrate.flags = 0; 397 sinfo->rxrate.flags = 0;
391 if (sta->last_rx_rate_flag & RX_FLAG_HT) 398 if (sta->last_rx_rate_flag & RX_FLAG_HT)
@@ -405,6 +412,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
405 sinfo->llid = le16_to_cpu(sta->llid); 412 sinfo->llid = le16_to_cpu(sta->llid);
406 sinfo->plid = le16_to_cpu(sta->plid); 413 sinfo->plid = le16_to_cpu(sta->plid);
407 sinfo->plink_state = sta->plink_state; 414 sinfo->plink_state = sta->plink_state;
415 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
416 sinfo->filled |= STATION_INFO_T_OFFSET;
417 sinfo->t_offset = sta->t_offset;
418 }
408#endif 419#endif
409 } 420 }
410 421
@@ -439,6 +450,180 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
439 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 450 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
440} 451}
441 452
453static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
454 "rx_packets", "rx_bytes", "wep_weak_iv_count",
455 "rx_duplicates", "rx_fragments", "rx_dropped",
456 "tx_packets", "tx_bytes", "tx_fragments",
457 "tx_filtered", "tx_retry_failed", "tx_retries",
458 "beacon_loss", "sta_state", "txrate", "rxrate", "signal",
459 "channel", "noise", "ch_time", "ch_time_busy",
460 "ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
461};
462#define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats)
463
464static int ieee80211_get_et_sset_count(struct wiphy *wiphy,
465 struct net_device *dev,
466 int sset)
467{
468 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
469 int rv = 0;
470
471 if (sset == ETH_SS_STATS)
472 rv += STA_STATS_LEN;
473
474 rv += drv_get_et_sset_count(sdata, sset);
475
476 if (rv == 0)
477 return -EOPNOTSUPP;
478 return rv;
479}
480
481static void ieee80211_get_et_stats(struct wiphy *wiphy,
482 struct net_device *dev,
483 struct ethtool_stats *stats,
484 u64 *data)
485{
486 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
487 struct sta_info *sta;
488 struct ieee80211_local *local = sdata->local;
489 struct station_info sinfo;
490 struct survey_info survey;
491 int i, q;
492#define STA_STATS_SURVEY_LEN 7
493
494 memset(data, 0, sizeof(u64) * STA_STATS_LEN);
495
496#define ADD_STA_STATS(sta) \
497 do { \
498 data[i++] += sta->rx_packets; \
499 data[i++] += sta->rx_bytes; \
500 data[i++] += sta->wep_weak_iv_count; \
501 data[i++] += sta->num_duplicates; \
502 data[i++] += sta->rx_fragments; \
503 data[i++] += sta->rx_dropped; \
504 \
505 data[i++] += sta->tx_packets; \
506 data[i++] += sta->tx_bytes; \
507 data[i++] += sta->tx_fragments; \
508 data[i++] += sta->tx_filtered_count; \
509 data[i++] += sta->tx_retry_failed; \
510 data[i++] += sta->tx_retry_count; \
511 data[i++] += sta->beacon_loss_count; \
512 } while (0)
513
514 /* For Managed stations, find the single station based on BSSID
515 * and use that. For interface types, iterate through all available
516 * stations and add stats for any station that is assigned to this
517 * network device.
518 */
519
520 rcu_read_lock();
521
522 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
523 sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
524
525 if (!(sta && !WARN_ON(sta->sdata->dev != dev)))
526 goto do_survey;
527
528 i = 0;
529 ADD_STA_STATS(sta);
530
531 data[i++] = sta->sta_state;
532
533 sinfo.filled = 0;
534 sta_set_sinfo(sta, &sinfo);
535
536 if (sinfo.filled | STATION_INFO_TX_BITRATE)
537 data[i] = 100000 *
538 cfg80211_calculate_bitrate(&sinfo.txrate);
539 i++;
540 if (sinfo.filled | STATION_INFO_RX_BITRATE)
541 data[i] = 100000 *
542 cfg80211_calculate_bitrate(&sinfo.rxrate);
543 i++;
544
545 if (sinfo.filled | STATION_INFO_SIGNAL_AVG)
546 data[i] = (u8)sinfo.signal_avg;
547 i++;
548 } else {
549 list_for_each_entry_rcu(sta, &local->sta_list, list) {
550 /* Make sure this station belongs to the proper dev */
551 if (sta->sdata->dev != dev)
552 continue;
553
554 i = 0;
555 ADD_STA_STATS(sta);
556 }
557 }
558
559do_survey:
560 i = STA_STATS_LEN - STA_STATS_SURVEY_LEN;
561 /* Get survey stats for current channel */
562 q = 0;
563 while (true) {
564 survey.filled = 0;
565 if (drv_get_survey(local, q, &survey) != 0) {
566 survey.filled = 0;
567 break;
568 }
569
570 if (survey.channel &&
571 (local->oper_channel->center_freq ==
572 survey.channel->center_freq))
573 break;
574 q++;
575 }
576
577 if (survey.filled)
578 data[i++] = survey.channel->center_freq;
579 else
580 data[i++] = 0;
581 if (survey.filled & SURVEY_INFO_NOISE_DBM)
582 data[i++] = (u8)survey.noise;
583 else
584 data[i++] = -1LL;
585 if (survey.filled & SURVEY_INFO_CHANNEL_TIME)
586 data[i++] = survey.channel_time;
587 else
588 data[i++] = -1LL;
589 if (survey.filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
590 data[i++] = survey.channel_time_busy;
591 else
592 data[i++] = -1LL;
593 if (survey.filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
594 data[i++] = survey.channel_time_ext_busy;
595 else
596 data[i++] = -1LL;
597 if (survey.filled & SURVEY_INFO_CHANNEL_TIME_RX)
598 data[i++] = survey.channel_time_rx;
599 else
600 data[i++] = -1LL;
601 if (survey.filled & SURVEY_INFO_CHANNEL_TIME_TX)
602 data[i++] = survey.channel_time_tx;
603 else
604 data[i++] = -1LL;
605
606 rcu_read_unlock();
607
608 if (WARN_ON(i != STA_STATS_LEN))
609 return;
610
611 drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
612}
613
614static void ieee80211_get_et_strings(struct wiphy *wiphy,
615 struct net_device *dev,
616 u32 sset, u8 *data)
617{
618 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
619 int sz_sta_stats = 0;
620
621 if (sset == ETH_SS_STATS) {
622 sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats);
623 memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats);
624 }
625 drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
626}
442 627
443static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, 628static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
444 int idx, u8 *mac, struct station_info *sinfo) 629 int idx, u8 *mac, struct station_info *sinfo)
@@ -489,27 +674,13 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
489 return ret; 674 return ret;
490} 675}
491 676
492static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
493 struct beacon_parameters *params)
494{
495 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
496
497 bss_conf->ssid_len = params->ssid_len;
498
499 if (params->ssid_len)
500 memcpy(bss_conf->ssid, params->ssid, params->ssid_len);
501
502 bss_conf->hidden_ssid =
503 (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
504}
505
506static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, 677static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
507 u8 *resp, size_t resp_len) 678 const u8 *resp, size_t resp_len)
508{ 679{
509 struct sk_buff *new, *old; 680 struct sk_buff *new, *old;
510 681
511 if (!resp || !resp_len) 682 if (!resp || !resp_len)
512 return -EINVAL; 683 return 1;
513 684
514 old = rtnl_dereference(sdata->u.ap.probe_resp); 685 old = rtnl_dereference(sdata->u.ap.probe_resp);
515 686
@@ -520,50 +691,28 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
520 memcpy(skb_put(new, resp_len), resp, resp_len); 691 memcpy(skb_put(new, resp_len), resp, resp_len);
521 692
522 rcu_assign_pointer(sdata->u.ap.probe_resp, new); 693 rcu_assign_pointer(sdata->u.ap.probe_resp, new);
523 synchronize_rcu(); 694 if (old) {
524 695 /* TODO: use call_rcu() */
525 if (old) 696 synchronize_rcu();
526 dev_kfree_skb(old); 697 dev_kfree_skb(old);
698 }
527 699
528 return 0; 700 return 0;
529} 701}
530 702
531/* 703static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
532 * This handles both adding a beacon and setting new beacon info 704 struct cfg80211_beacon_data *params)
533 */
534static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
535 struct beacon_parameters *params)
536{ 705{
537 struct beacon_data *new, *old; 706 struct beacon_data *new, *old;
538 int new_head_len, new_tail_len; 707 int new_head_len, new_tail_len;
539 int size; 708 int size, err;
540 int err = -EINVAL; 709 u32 changed = BSS_CHANGED_BEACON;
541 u32 changed = 0;
542 710
543 old = rtnl_dereference(sdata->u.ap.beacon); 711 old = rtnl_dereference(sdata->u.ap.beacon);
544 712
545 /* head must not be zero-length */
546 if (params->head && !params->head_len)
547 return -EINVAL;
548
549 /*
550 * This is a kludge. beacon interval should really be part
551 * of the beacon information.
552 */
553 if (params->interval &&
554 (sdata->vif.bss_conf.beacon_int != params->interval)) {
555 sdata->vif.bss_conf.beacon_int = params->interval;
556 ieee80211_bss_info_change_notify(sdata,
557 BSS_CHANGED_BEACON_INT);
558 }
559
560 /* Need to have a beacon head if we don't have one yet */ 713 /* Need to have a beacon head if we don't have one yet */
561 if (!params->head && !old) 714 if (!params->head && !old)
562 return err; 715 return -EINVAL;
563
564 /* sorry, no way to start beaconing without dtim period */
565 if (!params->dtim_period && !old)
566 return err;
567 716
568 /* new or old head? */ 717 /* new or old head? */
569 if (params->head) 718 if (params->head)
@@ -586,12 +735,6 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
586 735
587 /* start filling the new info now */ 736 /* start filling the new info now */
588 737
589 /* new or old dtim period? */
590 if (params->dtim_period)
591 new->dtim_period = params->dtim_period;
592 else
593 new->dtim_period = old->dtim_period;
594
595 /* 738 /*
596 * pointers go into the block we allocated, 739 * pointers go into the block we allocated,
597 * memory is | beacon_data | head | tail | 740 * memory is | beacon_data | head | tail |
@@ -614,46 +757,37 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
614 if (old) 757 if (old)
615 memcpy(new->tail, old->tail, new_tail_len); 758 memcpy(new->tail, old->tail, new_tail_len);
616 759
617 sdata->vif.bss_conf.dtim_period = new->dtim_period;
618
619 rcu_assign_pointer(sdata->u.ap.beacon, new);
620
621 synchronize_rcu();
622
623 kfree(old);
624
625 err = ieee80211_set_probe_resp(sdata, params->probe_resp, 760 err = ieee80211_set_probe_resp(sdata, params->probe_resp,
626 params->probe_resp_len); 761 params->probe_resp_len);
627 if (!err) 762 if (err < 0)
763 return err;
764 if (err == 0)
628 changed |= BSS_CHANGED_AP_PROBE_RESP; 765 changed |= BSS_CHANGED_AP_PROBE_RESP;
629 766
630 ieee80211_config_ap_ssid(sdata, params); 767 rcu_assign_pointer(sdata->u.ap.beacon, new);
631 changed |= BSS_CHANGED_BEACON_ENABLED | 768
632 BSS_CHANGED_BEACON | 769 if (old)
633 BSS_CHANGED_SSID; 770 kfree_rcu(old, rcu_head);
634 771
635 ieee80211_bss_info_change_notify(sdata, changed); 772 return changed;
636 return 0;
637} 773}
638 774
639static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, 775static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
640 struct beacon_parameters *params) 776 struct cfg80211_ap_settings *params)
641{ 777{
642 struct ieee80211_sub_if_data *sdata; 778 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
643 struct beacon_data *old; 779 struct beacon_data *old;
644 struct ieee80211_sub_if_data *vlan; 780 struct ieee80211_sub_if_data *vlan;
645 int ret; 781 u32 changed = BSS_CHANGED_BEACON_INT |
646 782 BSS_CHANGED_BEACON_ENABLED |
647 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 783 BSS_CHANGED_BEACON |
784 BSS_CHANGED_SSID;
785 int err;
648 786
649 old = rtnl_dereference(sdata->u.ap.beacon); 787 old = rtnl_dereference(sdata->u.ap.beacon);
650 if (old) 788 if (old)
651 return -EALREADY; 789 return -EALREADY;
652 790
653 ret = ieee80211_config_beacon(sdata, params);
654 if (ret)
655 return ret;
656
657 /* 791 /*
658 * Apply control port protocol, this allows us to 792 * Apply control port protocol, this allows us to
659 * not encrypt dynamic WEP control frames. 793 * not encrypt dynamic WEP control frames.
@@ -667,14 +801,36 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
667 params->crypto.control_port_no_encrypt; 801 params->crypto.control_port_no_encrypt;
668 } 802 }
669 803
804 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
805 sdata->vif.bss_conf.dtim_period = params->dtim_period;
806
807 sdata->vif.bss_conf.ssid_len = params->ssid_len;
808 if (params->ssid_len)
809 memcpy(sdata->vif.bss_conf.ssid, params->ssid,
810 params->ssid_len);
811 sdata->vif.bss_conf.hidden_ssid =
812 (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
813
814 err = ieee80211_assign_beacon(sdata, &params->beacon);
815 if (err < 0)
816 return err;
817 changed |= err;
818
819 ieee80211_bss_info_change_notify(sdata, changed);
820
821 netif_carrier_on(dev);
822 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
823 netif_carrier_on(vlan->dev);
824
670 return 0; 825 return 0;
671} 826}
672 827
673static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, 828static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
674 struct beacon_parameters *params) 829 struct cfg80211_beacon_data *params)
675{ 830{
676 struct ieee80211_sub_if_data *sdata; 831 struct ieee80211_sub_if_data *sdata;
677 struct beacon_data *old; 832 struct beacon_data *old;
833 int err;
678 834
679 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 835 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
680 836
@@ -682,12 +838,16 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
682 if (!old) 838 if (!old)
683 return -ENOENT; 839 return -ENOENT;
684 840
685 return ieee80211_config_beacon(sdata, params); 841 err = ieee80211_assign_beacon(sdata, params);
842 if (err < 0)
843 return err;
844 ieee80211_bss_info_change_notify(sdata, err);
845 return 0;
686} 846}
687 847
688static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) 848static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
689{ 849{
690 struct ieee80211_sub_if_data *sdata; 850 struct ieee80211_sub_if_data *sdata, *vlan;
691 struct beacon_data *old; 851 struct beacon_data *old;
692 852
693 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 853 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -696,11 +856,16 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
696 if (!old) 856 if (!old)
697 return -ENOENT; 857 return -ENOENT;
698 858
859 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
860 netif_carrier_off(vlan->dev);
861 netif_carrier_off(dev);
862
699 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); 863 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
700 synchronize_rcu(); 864
701 kfree(old); 865 kfree_rcu(old, rcu_head);
702 866
703 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 867 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
868
704 return 0; 869 return 0;
705} 870}
706 871
@@ -776,12 +941,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
776 941
777 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) && 942 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
778 !test_sta_flag(sta, WLAN_STA_AUTH)) { 943 !test_sta_flag(sta, WLAN_STA_AUTH)) {
779 ret = sta_info_move_state_checked(sta, 944 ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
780 IEEE80211_STA_AUTH);
781 if (ret) 945 if (ret)
782 return ret; 946 return ret;
783 ret = sta_info_move_state_checked(sta, 947 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
784 IEEE80211_STA_ASSOC);
785 if (ret) 948 if (ret)
786 return ret; 949 return ret;
787 } 950 }
@@ -789,11 +952,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
789 952
790 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { 953 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
791 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) 954 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
792 ret = sta_info_move_state_checked(sta, 955 ret = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
793 IEEE80211_STA_AUTHORIZED);
794 else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 956 else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
795 ret = sta_info_move_state_checked(sta, 957 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
796 IEEE80211_STA_ASSOC);
797 if (ret) 958 if (ret)
798 return ret; 959 return ret;
799 } 960 }
@@ -805,12 +966,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
805 966
806 if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) && 967 if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) &&
807 test_sta_flag(sta, WLAN_STA_AUTH)) { 968 test_sta_flag(sta, WLAN_STA_AUTH)) {
808 ret = sta_info_move_state_checked(sta, 969 ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
809 IEEE80211_STA_AUTH);
810 if (ret) 970 if (ret)
811 return ret; 971 return ret;
812 ret = sta_info_move_state_checked(sta, 972 ret = sta_info_move_state(sta, IEEE80211_STA_NONE);
813 IEEE80211_STA_NONE);
814 if (ret) 973 if (ret)
815 return ret; 974 return ret;
816 } 975 }
@@ -934,7 +1093,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
934 } else 1093 } else
935 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1094 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
936 1095
937 if (compare_ether_addr(mac, sdata->vif.addr) == 0) 1096 if (ether_addr_equal(mac, sdata->vif.addr))
938 return -EINVAL; 1097 return -EINVAL;
939 1098
940 if (is_multicast_ether_addr(mac)) 1099 if (is_multicast_ether_addr(mac))
@@ -944,8 +1103,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
944 if (!sta) 1103 if (!sta)
945 return -ENOMEM; 1104 return -ENOMEM;
946 1105
947 sta_info_move_state(sta, IEEE80211_STA_AUTH); 1106 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
948 sta_info_move_state(sta, IEEE80211_STA_ASSOC); 1107 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
949 1108
950 err = sta_apply_parameters(local, sta, params); 1109 err = sta_apply_parameters(local, sta, params);
951 if (err) { 1110 if (err) {
@@ -1001,6 +1160,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1001 struct ieee80211_local *local = wiphy_priv(wiphy); 1160 struct ieee80211_local *local = wiphy_priv(wiphy);
1002 struct sta_info *sta; 1161 struct sta_info *sta;
1003 struct ieee80211_sub_if_data *vlansdata; 1162 struct ieee80211_sub_if_data *vlansdata;
1163 int err;
1004 1164
1005 mutex_lock(&local->sta_mtx); 1165 mutex_lock(&local->sta_mtx);
1006 1166
@@ -1019,6 +1179,9 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1019 } 1179 }
1020 1180
1021 if (params->vlan && params->vlan != sta->sdata->dev) { 1181 if (params->vlan && params->vlan != sta->sdata->dev) {
1182 bool prev_4addr = false;
1183 bool new_4addr = false;
1184
1022 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 1185 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
1023 1186
1024 if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1187 if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
@@ -1034,13 +1197,33 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1034 } 1197 }
1035 1198
1036 rcu_assign_pointer(vlansdata->u.vlan.sta, sta); 1199 rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
1200 new_4addr = true;
1201 }
1202
1203 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1204 sta->sdata->u.vlan.sta) {
1205 rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL);
1206 prev_4addr = true;
1037 } 1207 }
1038 1208
1039 sta->sdata = vlansdata; 1209 sta->sdata = vlansdata;
1210
1211 if (sta->sta_state == IEEE80211_STA_AUTHORIZED &&
1212 prev_4addr != new_4addr) {
1213 if (new_4addr)
1214 atomic_dec(&sta->sdata->bss->num_mcast_sta);
1215 else
1216 atomic_inc(&sta->sdata->bss->num_mcast_sta);
1217 }
1218
1040 ieee80211_send_layer2_update(sta); 1219 ieee80211_send_layer2_update(sta);
1041 } 1220 }
1042 1221
1043 sta_apply_parameters(local, sta, params); 1222 err = sta_apply_parameters(local, sta, params);
1223 if (err) {
1224 mutex_unlock(&local->sta_mtx);
1225 return err;
1226 }
1044 1227
1045 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates) 1228 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
1046 rate_control_rate_init(sta); 1229 rate_control_rate_init(sta);
@@ -1257,6 +1440,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
1257 /* now copy the rest of the setup parameters */ 1440 /* now copy the rest of the setup parameters */
1258 ifmsh->mesh_id_len = setup->mesh_id_len; 1441 ifmsh->mesh_id_len = setup->mesh_id_len;
1259 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); 1442 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
1443 ifmsh->mesh_sp_id = setup->sync_method;
1260 ifmsh->mesh_pp_id = setup->path_sel_proto; 1444 ifmsh->mesh_pp_id = setup->path_sel_proto;
1261 ifmsh->mesh_pm_id = setup->path_metric; 1445 ifmsh->mesh_pm_id = setup->path_metric;
1262 ifmsh->security = IEEE80211_MESH_SEC_NONE; 1446 ifmsh->security = IEEE80211_MESH_SEC_NONE;
@@ -1301,6 +1485,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1301 conf->dot11MeshTTL = nconf->element_ttl; 1485 conf->dot11MeshTTL = nconf->element_ttl;
1302 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) 1486 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
1303 conf->auto_open_plinks = nconf->auto_open_plinks; 1487 conf->auto_open_plinks = nconf->auto_open_plinks;
1488 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
1489 conf->dot11MeshNbrOffsetMaxNeighbor =
1490 nconf->dot11MeshNbrOffsetMaxNeighbor;
1304 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) 1491 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask))
1305 conf->dot11MeshHWMPmaxPREQretries = 1492 conf->dot11MeshHWMPmaxPREQretries =
1306 nconf->dot11MeshHWMPmaxPREQretries; 1493 nconf->dot11MeshHWMPmaxPREQretries;
@@ -1341,6 +1528,21 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1341 conf->dot11MeshHWMPRannInterval = 1528 conf->dot11MeshHWMPRannInterval =
1342 nconf->dot11MeshHWMPRannInterval; 1529 nconf->dot11MeshHWMPRannInterval;
1343 } 1530 }
1531 if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask))
1532 conf->dot11MeshForwarding = nconf->dot11MeshForwarding;
1533 if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) {
1534 /* our RSSI threshold implementation is supported only for
1535 * devices that report signal in dBm.
1536 */
1537 if (!(sdata->local->hw.flags & IEEE80211_HW_SIGNAL_DBM))
1538 return -ENOTSUPP;
1539 conf->rssi_threshold = nconf->rssi_threshold;
1540 }
1541 if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) {
1542 conf->ht_opmode = nconf->ht_opmode;
1543 sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode;
1544 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
1545 }
1344 return 0; 1546 return 0;
1345} 1547}
1346 1548
@@ -1449,6 +1651,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1449 if (!local->ops->conf_tx) 1651 if (!local->ops->conf_tx)
1450 return -EOPNOTSUPP; 1652 return -EOPNOTSUPP;
1451 1653
1654 if (local->hw.queues < IEEE80211_NUM_ACS)
1655 return -EOPNOTSUPP;
1656
1452 memset(&p, 0, sizeof(p)); 1657 memset(&p, 0, sizeof(p));
1453 p.aifs = params->aifs; 1658 p.aifs = params->aifs;
1454 p.cw_max = params->cwmax; 1659 p.cw_max = params->cwmax;
@@ -1461,14 +1666,11 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1461 */ 1666 */
1462 p.uapsd = false; 1667 p.uapsd = false;
1463 1668
1464 if (params->queue >= local->hw.queues) 1669 sdata->tx_conf[params->ac] = p;
1465 return -EINVAL; 1670 if (drv_conf_tx(local, sdata, params->ac, &p)) {
1466
1467 sdata->tx_conf[params->queue] = p;
1468 if (drv_conf_tx(local, sdata, params->queue, &p)) {
1469 wiphy_debug(local->hw.wiphy, 1671 wiphy_debug(local->hw.wiphy,
1470 "failed to set TX queue parameters for queue %d\n", 1672 "failed to set TX queue parameters for AC %d\n",
1471 params->queue); 1673 params->ac);
1472 return -EINVAL; 1674 return -EINVAL;
1473 } 1675 }
1474 1676
@@ -1622,19 +1824,15 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1622} 1824}
1623 1825
1624static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, 1826static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev,
1625 struct cfg80211_deauth_request *req, 1827 struct cfg80211_deauth_request *req)
1626 void *cookie)
1627{ 1828{
1628 return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), 1829 return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), req);
1629 req, cookie);
1630} 1830}
1631 1831
1632static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, 1832static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
1633 struct cfg80211_disassoc_request *req, 1833 struct cfg80211_disassoc_request *req)
1634 void *cookie)
1635{ 1834{
1636 return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), 1835 return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
1637 req, cookie);
1638} 1836}
1639 1837
1640static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, 1838static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
@@ -1868,7 +2066,6 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
1868 s32 rssi_thold, u32 rssi_hyst) 2066 s32 rssi_thold, u32 rssi_hyst)
1869{ 2067{
1870 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2068 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1871 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1872 struct ieee80211_vif *vif = &sdata->vif; 2069 struct ieee80211_vif *vif = &sdata->vif;
1873 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 2070 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1874 2071
@@ -1879,14 +2076,9 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
1879 bss_conf->cqm_rssi_thold = rssi_thold; 2076 bss_conf->cqm_rssi_thold = rssi_thold;
1880 bss_conf->cqm_rssi_hyst = rssi_hyst; 2077 bss_conf->cqm_rssi_hyst = rssi_hyst;
1881 2078
1882 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
1883 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1884 return -EOPNOTSUPP;
1885 return 0;
1886 }
1887
1888 /* tell the driver upon association, unless already associated */ 2079 /* tell the driver upon association, unless already associated */
1889 if (sdata->u.mgd.associated) 2080 if (sdata->u.mgd.associated &&
2081 sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
1890 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM); 2082 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
1891 2083
1892 return 0; 2084 return 0;
@@ -1907,8 +2099,11 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1907 return ret; 2099 return ret;
1908 } 2100 }
1909 2101
1910 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 2102 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1911 sdata->rc_rateidx_mask[i] = mask->control[i].legacy; 2103 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
2104 memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].mcs,
2105 sizeof(mask->control[i].mcs));
2106 }
1912 2107
1913 return 0; 2108 return 0;
1914} 2109}
@@ -2030,7 +2225,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
2030 if (wk->offchan_tx.wait && !wk->offchan_tx.status) 2225 if (wk->offchan_tx.wait && !wk->offchan_tx.status)
2031 cfg80211_mgmt_tx_status(wk->sdata->dev, 2226 cfg80211_mgmt_tx_status(wk->sdata->dev,
2032 (unsigned long) wk->offchan_tx.frame, 2227 (unsigned long) wk->offchan_tx.frame,
2033 wk->ie, wk->ie_len, false, GFP_KERNEL); 2228 wk->data, wk->data_len, false, GFP_KERNEL);
2034 2229
2035 return WORK_DONE_DESTROY; 2230 return WORK_DONE_DESTROY;
2036} 2231}
@@ -2109,6 +2304,10 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2109 2304
2110 IEEE80211_SKB_CB(skb)->flags = flags; 2305 IEEE80211_SKB_CB(skb)->flags = flags;
2111 2306
2307 if (flags & IEEE80211_TX_CTL_TX_OFFCHAN)
2308 IEEE80211_SKB_CB(skb)->hw_queue =
2309 local->hw.offchannel_tx_hw_queue;
2310
2112 skb->dev = sdata->dev; 2311 skb->dev = sdata->dev;
2113 2312
2114 *cookie = (unsigned long) skb; 2313 *cookie = (unsigned long) skb;
@@ -2150,6 +2349,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2150 /* modify cookie to prevent API mismatches */ 2349 /* modify cookie to prevent API mismatches */
2151 *cookie ^= 2; 2350 *cookie ^= 2;
2152 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; 2351 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
2352 IEEE80211_SKB_CB(skb)->hw_queue =
2353 local->hw.offchannel_tx_hw_queue;
2153 local->hw_roc_skb = skb; 2354 local->hw_roc_skb = skb;
2154 local->hw_roc_skb_for_status = skb; 2355 local->hw_roc_skb_for_status = skb;
2155 mutex_unlock(&local->mtx); 2356 mutex_unlock(&local->mtx);
@@ -2181,8 +2382,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2181 wk->done = ieee80211_offchan_tx_done; 2382 wk->done = ieee80211_offchan_tx_done;
2182 wk->offchan_tx.frame = skb; 2383 wk->offchan_tx.frame = skb;
2183 wk->offchan_tx.wait = wait; 2384 wk->offchan_tx.wait = wait;
2184 wk->ie_len = len; 2385 wk->data_len = len;
2185 memcpy(wk->ie, buf, len); 2386 memcpy(wk->data, buf, len);
2186 2387
2187 ieee80211_add_work(wk); 2388 ieee80211_add_work(wk);
2188 return 0; 2389 return 0;
@@ -2369,8 +2570,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2369 tf->u.setup_req.capability = 2570 tf->u.setup_req.capability =
2370 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2571 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2371 2572
2372 ieee80211_add_srates_ie(&sdata->vif, skb); 2573 ieee80211_add_srates_ie(&sdata->vif, skb, false);
2373 ieee80211_add_ext_srates_ie(&sdata->vif, skb); 2574 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
2374 ieee80211_tdls_add_ext_capab(skb); 2575 ieee80211_tdls_add_ext_capab(skb);
2375 break; 2576 break;
2376 case WLAN_TDLS_SETUP_RESPONSE: 2577 case WLAN_TDLS_SETUP_RESPONSE:
@@ -2383,8 +2584,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2383 tf->u.setup_resp.capability = 2584 tf->u.setup_resp.capability =
2384 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2585 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2385 2586
2386 ieee80211_add_srates_ie(&sdata->vif, skb); 2587 ieee80211_add_srates_ie(&sdata->vif, skb, false);
2387 ieee80211_add_ext_srates_ie(&sdata->vif, skb); 2588 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
2388 ieee80211_tdls_add_ext_capab(skb); 2589 ieee80211_tdls_add_ext_capab(skb);
2389 break; 2590 break;
2390 case WLAN_TDLS_SETUP_CONFIRM: 2591 case WLAN_TDLS_SETUP_CONFIRM:
@@ -2444,8 +2645,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2444 mgmt->u.action.u.tdls_discover_resp.capability = 2645 mgmt->u.action.u.tdls_discover_resp.capability =
2445 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2646 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2446 2647
2447 ieee80211_add_srates_ie(&sdata->vif, skb); 2648 ieee80211_add_srates_ie(&sdata->vif, skb, false);
2448 ieee80211_add_ext_srates_ie(&sdata->vif, skb); 2649 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
2449 ieee80211_tdls_add_ext_capab(skb); 2650 ieee80211_tdls_add_ext_capab(skb);
2450 break; 2651 break;
2451 default: 2652 default:
@@ -2685,13 +2886,22 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
2685} 2886}
2686 2887
2687static struct ieee80211_channel * 2888static struct ieee80211_channel *
2688ieee80211_wiphy_get_channel(struct wiphy *wiphy) 2889ieee80211_wiphy_get_channel(struct wiphy *wiphy,
2890 enum nl80211_channel_type *type)
2689{ 2891{
2690 struct ieee80211_local *local = wiphy_priv(wiphy); 2892 struct ieee80211_local *local = wiphy_priv(wiphy);
2691 2893
2894 *type = local->_oper_channel_type;
2692 return local->oper_channel; 2895 return local->oper_channel;
2693} 2896}
2694 2897
2898#ifdef CONFIG_PM
2899static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled)
2900{
2901 drv_set_wakeup(wiphy_priv(wiphy), enabled);
2902}
2903#endif
2904
2695struct cfg80211_ops mac80211_config_ops = { 2905struct cfg80211_ops mac80211_config_ops = {
2696 .add_virtual_intf = ieee80211_add_iface, 2906 .add_virtual_intf = ieee80211_add_iface,
2697 .del_virtual_intf = ieee80211_del_iface, 2907 .del_virtual_intf = ieee80211_del_iface,
@@ -2701,9 +2911,9 @@ struct cfg80211_ops mac80211_config_ops = {
2701 .get_key = ieee80211_get_key, 2911 .get_key = ieee80211_get_key,
2702 .set_default_key = ieee80211_config_default_key, 2912 .set_default_key = ieee80211_config_default_key,
2703 .set_default_mgmt_key = ieee80211_config_default_mgmt_key, 2913 .set_default_mgmt_key = ieee80211_config_default_mgmt_key,
2704 .add_beacon = ieee80211_add_beacon, 2914 .start_ap = ieee80211_start_ap,
2705 .set_beacon = ieee80211_set_beacon, 2915 .change_beacon = ieee80211_change_beacon,
2706 .del_beacon = ieee80211_del_beacon, 2916 .stop_ap = ieee80211_stop_ap,
2707 .add_station = ieee80211_add_station, 2917 .add_station = ieee80211_add_station,
2708 .del_station = ieee80211_del_station, 2918 .del_station = ieee80211_del_station,
2709 .change_station = ieee80211_change_station, 2919 .change_station = ieee80211_change_station,
@@ -2760,4 +2970,10 @@ struct cfg80211_ops mac80211_config_ops = {
2760 .probe_client = ieee80211_probe_client, 2970 .probe_client = ieee80211_probe_client,
2761 .get_channel = ieee80211_wiphy_get_channel, 2971 .get_channel = ieee80211_wiphy_get_channel,
2762 .set_noack_map = ieee80211_set_noack_map, 2972 .set_noack_map = ieee80211_set_noack_map,
2973#ifdef CONFIG_PM
2974 .set_wakeup = ieee80211_set_wakeup,
2975#endif
2976 .get_et_sset_count = ieee80211_get_et_sset_count,
2977 .get_et_stats = ieee80211_get_et_stats,
2978 .get_et_strings = ieee80211_get_et_strings,
2763}; 2979};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 889c3e93e0f4..c76cf7230c7d 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -3,6 +3,7 @@
3 */ 3 */
4 4
5#include <linux/nl80211.h> 5#include <linux/nl80211.h>
6#include <net/cfg80211.h>
6#include "ieee80211_i.h" 7#include "ieee80211_i.h"
7 8
8static enum ieee80211_chan_mode 9static enum ieee80211_chan_mode
@@ -20,23 +21,29 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
20 if (!ieee80211_sdata_running(sdata)) 21 if (!ieee80211_sdata_running(sdata))
21 continue; 22 continue;
22 23
23 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) 24 switch (sdata->vif.type) {
25 case NL80211_IFTYPE_MONITOR:
24 continue; 26 continue;
25 27 case NL80211_IFTYPE_STATION:
26 if (sdata->vif.type == NL80211_IFTYPE_STATION && 28 if (!sdata->u.mgd.associated)
27 !sdata->u.mgd.associated) 29 continue;
28 continue; 30 break;
29 31 case NL80211_IFTYPE_ADHOC:
30 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
31 if (!sdata->u.ibss.ssid_len) 32 if (!sdata->u.ibss.ssid_len)
32 continue; 33 continue;
33 if (!sdata->u.ibss.fixed_channel) 34 if (!sdata->u.ibss.fixed_channel)
34 return CHAN_MODE_HOPPING; 35 return CHAN_MODE_HOPPING;
35 } 36 break;
36 37 case NL80211_IFTYPE_AP_VLAN:
37 if (sdata->vif.type == NL80211_IFTYPE_AP && 38 /* will also have _AP interface */
38 !sdata->u.ap.beacon)
39 continue; 39 continue;
40 case NL80211_IFTYPE_AP:
41 if (!sdata->u.ap.beacon)
42 continue;
43 break;
44 default:
45 break;
46 }
40 47
41 return CHAN_MODE_FIXED; 48 return CHAN_MODE_FIXED;
42 } 49 }
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 90baea53e7c5..778e5916d7c3 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -15,12 +15,6 @@
15#include "rate.h" 15#include "rate.h"
16#include "debugfs.h" 16#include "debugfs.h"
17 17
18int mac80211_open_file_generic(struct inode *inode, struct file *file)
19{
20 file->private_data = inode->i_private;
21 return 0;
22}
23
24#define DEBUGFS_FORMAT_BUFFER_SIZE 100 18#define DEBUGFS_FORMAT_BUFFER_SIZE 100
25 19
26int mac80211_format_buffer(char __user *userbuf, size_t count, 20int mac80211_format_buffer(char __user *userbuf, size_t count,
@@ -50,7 +44,7 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \
50#define DEBUGFS_READONLY_FILE_OPS(name) \ 44#define DEBUGFS_READONLY_FILE_OPS(name) \
51static const struct file_operations name## _ops = { \ 45static const struct file_operations name## _ops = { \
52 .read = name## _read, \ 46 .read = name## _read, \
53 .open = mac80211_open_file_generic, \ 47 .open = simple_open, \
54 .llseek = generic_file_llseek, \ 48 .llseek = generic_file_llseek, \
55}; 49};
56 50
@@ -93,89 +87,10 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
93 87
94static const struct file_operations reset_ops = { 88static const struct file_operations reset_ops = {
95 .write = reset_write, 89 .write = reset_write,
96 .open = mac80211_open_file_generic, 90 .open = simple_open,
97 .llseek = noop_llseek, 91 .llseek = noop_llseek,
98}; 92};
99 93
100static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
101 size_t count, loff_t *ppos)
102{
103 struct ieee80211_local *local = file->private_data;
104 return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
105 local->uapsd_queues);
106}
107
108static ssize_t uapsd_queues_write(struct file *file,
109 const char __user *user_buf,
110 size_t count, loff_t *ppos)
111{
112 struct ieee80211_local *local = file->private_data;
113 u8 val;
114 int ret;
115
116 ret = kstrtou8_from_user(user_buf, count, 0, &val);
117 if (ret)
118 return ret;
119
120 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
121 return -ERANGE;
122
123 local->uapsd_queues = val;
124
125 return count;
126}
127
128static const struct file_operations uapsd_queues_ops = {
129 .read = uapsd_queues_read,
130 .write = uapsd_queues_write,
131 .open = mac80211_open_file_generic,
132 .llseek = default_llseek,
133};
134
135static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
136 size_t count, loff_t *ppos)
137{
138 struct ieee80211_local *local = file->private_data;
139
140 return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
141 local->uapsd_max_sp_len);
142}
143
144static ssize_t uapsd_max_sp_len_write(struct file *file,
145 const char __user *user_buf,
146 size_t count, loff_t *ppos)
147{
148 struct ieee80211_local *local = file->private_data;
149 unsigned long val;
150 char buf[10];
151 size_t len;
152 int ret;
153
154 len = min(count, sizeof(buf) - 1);
155 if (copy_from_user(buf, user_buf, len))
156 return -EFAULT;
157 buf[len] = '\0';
158
159 ret = kstrtoul(buf, 0, &val);
160
161 if (ret)
162 return -EINVAL;
163
164 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
165 return -ERANGE;
166
167 local->uapsd_max_sp_len = val;
168
169 return count;
170}
171
172static const struct file_operations uapsd_max_sp_len_ops = {
173 .read = uapsd_max_sp_len_read,
174 .write = uapsd_max_sp_len_write,
175 .open = mac80211_open_file_generic,
176 .llseek = default_llseek,
177};
178
179static ssize_t channel_type_read(struct file *file, char __user *user_buf, 94static ssize_t channel_type_read(struct file *file, char __user *user_buf,
180 size_t count, loff_t *ppos) 95 size_t count, loff_t *ppos)
181{ 96{
@@ -247,8 +162,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
247 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n"); 162 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
248 if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE) 163 if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
249 sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n"); 164 sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
250 if (local->hw.flags & IEEE80211_HW_BEACON_FILTER)
251 sf += snprintf(buf + sf, mxln - sf, "BEACON_FILTER\n");
252 if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) 165 if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
253 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n"); 166 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
254 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) 167 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
@@ -259,14 +172,14 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
259 sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n"); 172 sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
260 if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) 173 if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
261 sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n"); 174 sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
262 if (local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)
263 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_CQM_RSSI\n");
264 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK) 175 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
265 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n"); 176 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
266 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) 177 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
267 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n"); 178 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
268 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW) 179 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
269 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n"); 180 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
181 if (local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)
182 sf += snprintf(buf + sf, mxln - sf, "SCAN_WHILE_IDLE\n");
270 183
271 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 184 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
272 kfree(buf); 185 kfree(buf);
@@ -335,7 +248,7 @@ static ssize_t stats_ ##name## _read(struct file *file, \
335 \ 248 \
336static const struct file_operations stats_ ##name## _ops = { \ 249static const struct file_operations stats_ ##name## _ops = { \
337 .read = stats_ ##name## _read, \ 250 .read = stats_ ##name## _read, \
338 .open = mac80211_open_file_generic, \ 251 .open = simple_open, \
339 .llseek = generic_file_llseek, \ 252 .llseek = generic_file_llseek, \
340}; 253};
341 254
@@ -364,8 +277,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
364 DEBUGFS_ADD(wep_iv); 277 DEBUGFS_ADD(wep_iv);
365 DEBUGFS_ADD(queues); 278 DEBUGFS_ADD(queues);
366 DEBUGFS_ADD_MODE(reset, 0200); 279 DEBUGFS_ADD_MODE(reset, 0200);
367 DEBUGFS_ADD(uapsd_queues);
368 DEBUGFS_ADD(uapsd_max_sp_len);
369 DEBUGFS_ADD(channel_type); 280 DEBUGFS_ADD(channel_type);
370 DEBUGFS_ADD(hwflags); 281 DEBUGFS_ADD(hwflags);
371 DEBUGFS_ADD(user_power); 282 DEBUGFS_ADD(user_power);
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 7c87529630f5..9be4e6d71d00 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -3,7 +3,6 @@
3 3
4#ifdef CONFIG_MAC80211_DEBUGFS 4#ifdef CONFIG_MAC80211_DEBUGFS
5extern void debugfs_hw_add(struct ieee80211_local *local); 5extern void debugfs_hw_add(struct ieee80211_local *local);
6extern int mac80211_open_file_generic(struct inode *inode, struct file *file);
7extern int mac80211_format_buffer(char __user *userbuf, size_t count, 6extern int mac80211_format_buffer(char __user *userbuf, size_t count,
8 loff_t *ppos, char *fmt, ...); 7 loff_t *ppos, char *fmt, ...);
9#else 8#else
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 59edcd95a58d..7932767bb482 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -30,7 +30,7 @@ static ssize_t key_##name##_read(struct file *file, \
30#define KEY_OPS(name) \ 30#define KEY_OPS(name) \
31static const struct file_operations key_ ##name## _ops = { \ 31static const struct file_operations key_ ##name## _ops = { \
32 .read = key_##name##_read, \ 32 .read = key_##name##_read, \
33 .open = mac80211_open_file_generic, \ 33 .open = simple_open, \
34 .llseek = generic_file_llseek, \ 34 .llseek = generic_file_llseek, \
35} 35}
36 36
@@ -45,7 +45,7 @@ static const struct file_operations key_ ##name## _ops = { \
45#define KEY_CONF_OPS(name) \ 45#define KEY_CONF_OPS(name) \
46static const struct file_operations key_ ##name## _ops = { \ 46static const struct file_operations key_ ##name## _ops = { \
47 .read = key_conf_##name##_read, \ 47 .read = key_conf_##name##_read, \
48 .open = mac80211_open_file_generic, \ 48 .open = simple_open, \
49 .llseek = generic_file_llseek, \ 49 .llseek = generic_file_llseek, \
50} 50}
51 51
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 176c08ffb13c..7ed433c66d68 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -49,16 +49,15 @@ static ssize_t ieee80211_if_write(
49 size_t count, loff_t *ppos, 49 size_t count, loff_t *ppos,
50 ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) 50 ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
51{ 51{
52 u8 *buf; 52 char buf[64];
53 ssize_t ret; 53 ssize_t ret;
54 54
55 buf = kmalloc(count, GFP_KERNEL); 55 if (count >= sizeof(buf))
56 if (!buf) 56 return -E2BIG;
57 return -ENOMEM;
58 57
59 ret = -EFAULT;
60 if (copy_from_user(buf, userbuf, count)) 58 if (copy_from_user(buf, userbuf, count))
61 goto freebuf; 59 return -EFAULT;
60 buf[count] = '\0';
62 61
63 ret = -ENODEV; 62 ret = -ENODEV;
64 rtnl_lock(); 63 rtnl_lock();
@@ -66,8 +65,6 @@ static ssize_t ieee80211_if_write(
66 ret = (*write)(sdata, buf, count); 65 ret = (*write)(sdata, buf, count);
67 rtnl_unlock(); 66 rtnl_unlock();
68 67
69freebuf:
70 kfree(buf);
71 return ret; 68 return ret;
72} 69}
73 70
@@ -87,6 +84,21 @@ static ssize_t ieee80211_if_fmt_##name( \
87#define IEEE80211_IF_FMT_SIZE(name, field) \ 84#define IEEE80211_IF_FMT_SIZE(name, field) \
88 IEEE80211_IF_FMT(name, field, "%zd\n") 85 IEEE80211_IF_FMT(name, field, "%zd\n")
89 86
87#define IEEE80211_IF_FMT_HEXARRAY(name, field) \
88static ssize_t ieee80211_if_fmt_##name( \
89 const struct ieee80211_sub_if_data *sdata, \
90 char *buf, int buflen) \
91{ \
92 char *p = buf; \
93 int i; \
94 for (i = 0; i < sizeof(sdata->field); i++) { \
95 p += scnprintf(p, buflen + buf - p, "%.2x ", \
96 sdata->field[i]); \
97 } \
98 p += scnprintf(p, buflen + buf - p, "\n"); \
99 return p - buf; \
100}
101
90#define IEEE80211_IF_FMT_ATOMIC(name, field) \ 102#define IEEE80211_IF_FMT_ATOMIC(name, field) \
91static ssize_t ieee80211_if_fmt_##name( \ 103static ssize_t ieee80211_if_fmt_##name( \
92 const struct ieee80211_sub_if_data *sdata, \ 104 const struct ieee80211_sub_if_data *sdata, \
@@ -123,7 +135,7 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
123static const struct file_operations name##_ops = { \ 135static const struct file_operations name##_ops = { \
124 .read = ieee80211_if_read_##name, \ 136 .read = ieee80211_if_read_##name, \
125 .write = (_write), \ 137 .write = (_write), \
126 .open = mac80211_open_file_generic, \ 138 .open = simple_open, \
127 .llseek = generic_file_llseek, \ 139 .llseek = generic_file_llseek, \
128} 140}
129 141
@@ -148,6 +160,11 @@ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
148 HEX); 160 HEX);
149IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ], 161IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
150 HEX); 162 HEX);
163IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
164 rc_rateidx_mcs_mask[IEEE80211_BAND_2GHZ], HEXARRAY);
165IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
166 rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
167
151IEEE80211_IF_FILE(flags, flags, HEX); 168IEEE80211_IF_FILE(flags, flags, HEX);
152IEEE80211_IF_FILE(state, state, LHEX); 169IEEE80211_IF_FILE(state, state, LHEX);
153IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC); 170IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC);
@@ -320,8 +337,64 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
320 337
321__IEEE80211_IF_FILE_W(tkip_mic_test); 338__IEEE80211_IF_FILE_W(tkip_mic_test);
322 339
340static ssize_t ieee80211_if_fmt_uapsd_queues(
341 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
342{
343 const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
344
345 return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_queues);
346}
347
348static ssize_t ieee80211_if_parse_uapsd_queues(
349 struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
350{
351 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
352 u8 val;
353 int ret;
354
355 ret = kstrtou8(buf, 0, &val);
356 if (ret)
357 return ret;
358
359 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
360 return -ERANGE;
361
362 ifmgd->uapsd_queues = val;
363
364 return buflen;
365}
366__IEEE80211_IF_FILE_W(uapsd_queues);
367
368static ssize_t ieee80211_if_fmt_uapsd_max_sp_len(
369 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
370{
371 const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
372
373 return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_max_sp_len);
374}
375
376static ssize_t ieee80211_if_parse_uapsd_max_sp_len(
377 struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
378{
379 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
380 unsigned long val;
381 int ret;
382
383 ret = kstrtoul(buf, 0, &val);
384 if (ret)
385 return -EINVAL;
386
387 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
388 return -ERANGE;
389
390 ifmgd->uapsd_max_sp_len = val;
391
392 return buflen;
393}
394__IEEE80211_IF_FILE_W(uapsd_max_sp_len);
395
323/* AP attributes */ 396/* AP attributes */
324IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC); 397IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
325IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 398IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
326IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); 399IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
327 400
@@ -351,6 +424,7 @@ static ssize_t ieee80211_if_parse_tsf(
351 struct ieee80211_local *local = sdata->local; 424 struct ieee80211_local *local = sdata->local;
352 unsigned long long tsf; 425 unsigned long long tsf;
353 int ret; 426 int ret;
427 int tsf_is_delta = 0;
354 428
355 if (strncmp(buf, "reset", 5) == 0) { 429 if (strncmp(buf, "reset", 5) == 0) {
356 if (local->ops->reset_tsf) { 430 if (local->ops->reset_tsf) {
@@ -358,9 +432,20 @@ static ssize_t ieee80211_if_parse_tsf(
358 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n"); 432 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
359 } 433 }
360 } else { 434 } else {
435 if (buflen > 10 && buf[1] == '=') {
436 if (buf[0] == '+')
437 tsf_is_delta = 1;
438 else if (buf[0] == '-')
439 tsf_is_delta = -1;
440 else
441 return -EINVAL;
442 buf += 2;
443 }
361 ret = kstrtoull(buf, 10, &tsf); 444 ret = kstrtoull(buf, 10, &tsf);
362 if (ret < 0) 445 if (ret < 0)
363 return -EINVAL; 446 return -EINVAL;
447 if (tsf_is_delta)
448 tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf;
364 if (local->ops->set_tsf) { 449 if (local->ops->set_tsf) {
365 drv_set_tsf(local, sdata, tsf); 450 drv_set_tsf(local, sdata, tsf);
366 wiphy_info(local->hw.wiphy, 451 wiphy_info(local->hw.wiphy,
@@ -422,44 +507,41 @@ IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
422 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC); 507 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
423IEEE80211_IF_FILE(dot11MeshHWMPRannInterval, 508IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
424 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC); 509 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
510IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
511IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
512IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
425#endif 513#endif
426 514
427
428#define DEBUGFS_ADD(name) \
429 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
430 sdata, &name##_ops);
431
432#define DEBUGFS_ADD_MODE(name, mode) \ 515#define DEBUGFS_ADD_MODE(name, mode) \
433 debugfs_create_file(#name, mode, sdata->debugfs.dir, \ 516 debugfs_create_file(#name, mode, sdata->debugfs.dir, \
434 sdata, &name##_ops); 517 sdata, &name##_ops);
435 518
436static void add_sta_files(struct ieee80211_sub_if_data *sdata) 519#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400)
520
521static void add_common_files(struct ieee80211_sub_if_data *sdata)
437{ 522{
438 DEBUGFS_ADD(drop_unencrypted); 523 DEBUGFS_ADD(drop_unencrypted);
439 DEBUGFS_ADD(flags);
440 DEBUGFS_ADD(state);
441 DEBUGFS_ADD(channel_type);
442 DEBUGFS_ADD(rc_rateidx_mask_2ghz); 524 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
443 DEBUGFS_ADD(rc_rateidx_mask_5ghz); 525 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
526 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
527 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
528}
444 529
530static void add_sta_files(struct ieee80211_sub_if_data *sdata)
531{
445 DEBUGFS_ADD(bssid); 532 DEBUGFS_ADD(bssid);
446 DEBUGFS_ADD(aid); 533 DEBUGFS_ADD(aid);
447 DEBUGFS_ADD(last_beacon); 534 DEBUGFS_ADD(last_beacon);
448 DEBUGFS_ADD(ave_beacon); 535 DEBUGFS_ADD(ave_beacon);
449 DEBUGFS_ADD_MODE(smps, 0600); 536 DEBUGFS_ADD_MODE(smps, 0600);
450 DEBUGFS_ADD_MODE(tkip_mic_test, 0200); 537 DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
538 DEBUGFS_ADD_MODE(uapsd_queues, 0600);
539 DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
451} 540}
452 541
453static void add_ap_files(struct ieee80211_sub_if_data *sdata) 542static void add_ap_files(struct ieee80211_sub_if_data *sdata)
454{ 543{
455 DEBUGFS_ADD(drop_unencrypted); 544 DEBUGFS_ADD(num_mcast_sta);
456 DEBUGFS_ADD(flags);
457 DEBUGFS_ADD(state);
458 DEBUGFS_ADD(channel_type);
459 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
460 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
461
462 DEBUGFS_ADD(num_sta_authorized);
463 DEBUGFS_ADD(num_sta_ps); 545 DEBUGFS_ADD(num_sta_ps);
464 DEBUGFS_ADD(dtim_count); 546 DEBUGFS_ADD(dtim_count);
465 DEBUGFS_ADD(num_buffered_multicast); 547 DEBUGFS_ADD(num_buffered_multicast);
@@ -473,40 +555,20 @@ static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
473 555
474static void add_wds_files(struct ieee80211_sub_if_data *sdata) 556static void add_wds_files(struct ieee80211_sub_if_data *sdata)
475{ 557{
476 DEBUGFS_ADD(drop_unencrypted);
477 DEBUGFS_ADD(flags);
478 DEBUGFS_ADD(state);
479 DEBUGFS_ADD(channel_type);
480 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
481 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
482
483 DEBUGFS_ADD(peer); 558 DEBUGFS_ADD(peer);
484} 559}
485 560
486static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 561#ifdef CONFIG_MAC80211_MESH
487{
488 DEBUGFS_ADD(drop_unencrypted);
489 DEBUGFS_ADD(flags);
490 DEBUGFS_ADD(state);
491 DEBUGFS_ADD(channel_type);
492 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
493 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
494}
495 562
496static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 563static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
497{ 564{
498 DEBUGFS_ADD(flags); 565 DEBUGFS_ADD_MODE(tsf, 0600);
499 DEBUGFS_ADD(state);
500 DEBUGFS_ADD(channel_type);
501} 566}
502 567
503#ifdef CONFIG_MAC80211_MESH
504
505static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) 568static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
506{ 569{
507 struct dentry *dir = debugfs_create_dir("mesh_stats", 570 struct dentry *dir = debugfs_create_dir("mesh_stats",
508 sdata->debugfs.dir); 571 sdata->debugfs.dir);
509
510#define MESHSTATS_ADD(name)\ 572#define MESHSTATS_ADD(name)\
511 debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); 573 debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
512 574
@@ -546,6 +608,8 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
546 MESHPARAMS_ADD(dot11MeshHWMPRootMode); 608 MESHPARAMS_ADD(dot11MeshHWMPRootMode);
547 MESHPARAMS_ADD(dot11MeshHWMPRannInterval); 609 MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
548 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); 610 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
611 MESHPARAMS_ADD(rssi_threshold);
612 MESHPARAMS_ADD(ht_opmode);
549#undef MESHPARAMS_ADD 613#undef MESHPARAMS_ADD
550} 614}
551#endif 615#endif
@@ -555,9 +619,17 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
555 if (!sdata->debugfs.dir) 619 if (!sdata->debugfs.dir)
556 return; 620 return;
557 621
622 DEBUGFS_ADD(flags);
623 DEBUGFS_ADD(state);
624 DEBUGFS_ADD(channel_type);
625
626 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
627 add_common_files(sdata);
628
558 switch (sdata->vif.type) { 629 switch (sdata->vif.type) {
559 case NL80211_IFTYPE_MESH_POINT: 630 case NL80211_IFTYPE_MESH_POINT:
560#ifdef CONFIG_MAC80211_MESH 631#ifdef CONFIG_MAC80211_MESH
632 add_mesh_files(sdata);
561 add_mesh_stats(sdata); 633 add_mesh_stats(sdata);
562 add_mesh_config(sdata); 634 add_mesh_config(sdata);
563#endif 635#endif
@@ -574,12 +646,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
574 case NL80211_IFTYPE_WDS: 646 case NL80211_IFTYPE_WDS:
575 add_wds_files(sdata); 647 add_wds_files(sdata);
576 break; 648 break;
577 case NL80211_IFTYPE_MONITOR:
578 add_monitor_files(sdata);
579 break;
580 case NL80211_IFTYPE_AP_VLAN:
581 add_vlan_files(sdata);
582 break;
583 default: 649 default:
584 break; 650 break;
585 } 651 }
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d86217d56bd7..5ccec2c1e9f6 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -33,7 +33,7 @@ static ssize_t sta_ ##name## _read(struct file *file, \
33#define STA_OPS(name) \ 33#define STA_OPS(name) \
34static const struct file_operations sta_ ##name## _ops = { \ 34static const struct file_operations sta_ ##name## _ops = { \
35 .read = sta_##name##_read, \ 35 .read = sta_##name##_read, \
36 .open = mac80211_open_file_generic, \ 36 .open = simple_open, \
37 .llseek = generic_file_llseek, \ 37 .llseek = generic_file_llseek, \
38} 38}
39 39
@@ -41,7 +41,7 @@ static const struct file_operations sta_ ##name## _ops = { \
41static const struct file_operations sta_ ##name## _ops = { \ 41static const struct file_operations sta_ ##name## _ops = { \
42 .read = sta_##name##_read, \ 42 .read = sta_##name##_read, \
43 .write = sta_##name##_write, \ 43 .write = sta_##name##_write, \
44 .open = mac80211_open_file_generic, \ 44 .open = simple_open, \
45 .llseek = generic_file_llseek, \ 45 .llseek = generic_file_llseek, \
46} 46}
47 47
@@ -63,14 +63,16 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" 63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
64 64
65 int res = scnprintf(buf, sizeof(buf), 65 int res = scnprintf(buf, sizeof(buf),
66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA), 67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
68 TEST(PS_DRIVER), TEST(AUTHORIZED), 68 TEST(PS_DRIVER), TEST(AUTHORIZED),
69 TEST(SHORT_PREAMBLE), 69 TEST(SHORT_PREAMBLE),
70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), 70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), 71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), 72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
73 TEST(TDLS_PEER_AUTH), TEST(RATE_CONTROL)); 73 TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
74 TEST(INSERTED), TEST(RATE_CONTROL),
75 TEST(TOFFSET_KNOWN));
74#undef TEST 76#undef TEST
75 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 77 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
76} 78}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index e8960ae39861..6d33a0c743ab 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -7,7 +7,9 @@
7 7
8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) 8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
9{ 9{
10 WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER)); 10 WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n",
12 sdata->dev->name, sdata->flags);
11} 13}
12 14
13static inline struct ieee80211_sub_if_data * 15static inline struct ieee80211_sub_if_data *
@@ -33,6 +35,43 @@ static inline void drv_tx_frags(struct ieee80211_local *local,
33 local->ops->tx_frags(&local->hw, vif, sta, skbs); 35 local->ops->tx_frags(&local->hw, vif, sta, skbs);
34} 36}
35 37
38static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
39 u32 sset, u8 *data)
40{
41 struct ieee80211_local *local = sdata->local;
42 if (local->ops->get_et_strings) {
43 trace_drv_get_et_strings(local, sset);
44 local->ops->get_et_strings(&local->hw, &sdata->vif, sset, data);
45 trace_drv_return_void(local);
46 }
47}
48
49static inline void drv_get_et_stats(struct ieee80211_sub_if_data *sdata,
50 struct ethtool_stats *stats,
51 u64 *data)
52{
53 struct ieee80211_local *local = sdata->local;
54 if (local->ops->get_et_stats) {
55 trace_drv_get_et_stats(local);
56 local->ops->get_et_stats(&local->hw, &sdata->vif, stats, data);
57 trace_drv_return_void(local);
58 }
59}
60
61static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata,
62 int sset)
63{
64 struct ieee80211_local *local = sdata->local;
65 int rv = 0;
66 if (local->ops->get_et_sset_count) {
67 trace_drv_get_et_sset_count(local, sset);
68 rv = local->ops->get_et_sset_count(&local->hw, &sdata->vif,
69 sset);
70 trace_drv_return_int(local, rv);
71 }
72 return rv;
73}
74
36static inline int drv_start(struct ieee80211_local *local) 75static inline int drv_start(struct ieee80211_local *local)
37{ 76{
38 int ret; 77 int ret;
@@ -89,6 +128,19 @@ static inline int drv_resume(struct ieee80211_local *local)
89 trace_drv_return_int(local, ret); 128 trace_drv_return_int(local, ret);
90 return ret; 129 return ret;
91} 130}
131
132static inline void drv_set_wakeup(struct ieee80211_local *local,
133 bool enabled)
134{
135 might_sleep();
136
137 if (!local->ops->set_wakeup)
138 return;
139
140 trace_drv_set_wakeup(local, enabled);
141 local->ops->set_wakeup(&local->hw, enabled);
142 trace_drv_return_void(local);
143}
92#endif 144#endif
93 145
94static inline int drv_add_interface(struct ieee80211_local *local, 146static inline int drv_add_interface(struct ieee80211_local *local,
@@ -99,7 +151,8 @@ static inline int drv_add_interface(struct ieee80211_local *local,
99 might_sleep(); 151 might_sleep();
100 152
101 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 153 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
102 sdata->vif.type == NL80211_IFTYPE_MONITOR)) 154 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
155 !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))))
103 return -EINVAL; 156 return -EINVAL;
104 157
105 trace_drv_add_interface(local, sdata); 158 trace_drv_add_interface(local, sdata);
@@ -168,41 +221,6 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
168 trace_drv_return_void(local); 221 trace_drv_return_void(local);
169} 222}
170 223
171static inline int drv_tx_sync(struct ieee80211_local *local,
172 struct ieee80211_sub_if_data *sdata,
173 const u8 *bssid,
174 enum ieee80211_tx_sync_type type)
175{
176 int ret = 0;
177
178 might_sleep();
179
180 check_sdata_in_driver(sdata);
181
182 trace_drv_tx_sync(local, sdata, bssid, type);
183 if (local->ops->tx_sync)
184 ret = local->ops->tx_sync(&local->hw, &sdata->vif,
185 bssid, type);
186 trace_drv_return_int(local, ret);
187 return ret;
188}
189
190static inline void drv_finish_tx_sync(struct ieee80211_local *local,
191 struct ieee80211_sub_if_data *sdata,
192 const u8 *bssid,
193 enum ieee80211_tx_sync_type type)
194{
195 might_sleep();
196
197 check_sdata_in_driver(sdata);
198
199 trace_drv_finish_tx_sync(local, sdata, bssid, type);
200 if (local->ops->finish_tx_sync)
201 local->ops->finish_tx_sync(&local->hw, &sdata->vif,
202 bssid, type);
203 trace_drv_return_void(local);
204}
205
206static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 224static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
207 struct netdev_hw_addr_list *mc_list) 225 struct netdev_hw_addr_list *mc_list)
208{ 226{
@@ -253,6 +271,7 @@ static inline int drv_set_key(struct ieee80211_local *local,
253 271
254 might_sleep(); 272 might_sleep();
255 273
274 sdata = get_bss_sdata(sdata);
256 check_sdata_in_driver(sdata); 275 check_sdata_in_driver(sdata);
257 276
258 trace_drv_set_key(local, cmd, sdata, sta, key); 277 trace_drv_set_key(local, cmd, sdata, sta, key);
@@ -272,6 +291,7 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
272 if (sta) 291 if (sta)
273 ista = &sta->sta; 292 ista = &sta->sta;
274 293
294 sdata = get_bss_sdata(sdata);
275 check_sdata_in_driver(sdata); 295 check_sdata_in_driver(sdata);
276 296
277 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); 297 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
@@ -476,8 +496,54 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
476 trace_drv_return_void(local); 496 trace_drv_return_void(local);
477} 497}
478 498
499static inline __must_check
500int drv_sta_state(struct ieee80211_local *local,
501 struct ieee80211_sub_if_data *sdata,
502 struct sta_info *sta,
503 enum ieee80211_sta_state old_state,
504 enum ieee80211_sta_state new_state)
505{
506 int ret = 0;
507
508 might_sleep();
509
510 sdata = get_bss_sdata(sdata);
511 check_sdata_in_driver(sdata);
512
513 trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
514 if (local->ops->sta_state) {
515 ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
516 old_state, new_state);
517 } else if (old_state == IEEE80211_STA_AUTH &&
518 new_state == IEEE80211_STA_ASSOC) {
519 ret = drv_sta_add(local, sdata, &sta->sta);
520 if (ret == 0)
521 sta->uploaded = true;
522 } else if (old_state == IEEE80211_STA_ASSOC &&
523 new_state == IEEE80211_STA_AUTH) {
524 drv_sta_remove(local, sdata, &sta->sta);
525 }
526 trace_drv_return_int(local, ret);
527 return ret;
528}
529
530static inline void drv_sta_rc_update(struct ieee80211_local *local,
531 struct ieee80211_sub_if_data *sdata,
532 struct ieee80211_sta *sta, u32 changed)
533{
534 sdata = get_bss_sdata(sdata);
535 check_sdata_in_driver(sdata);
536
537 trace_drv_sta_rc_update(local, sdata, sta, changed);
538 if (local->ops->sta_rc_update)
539 local->ops->sta_rc_update(&local->hw, &sdata->vif,
540 sta, changed);
541
542 trace_drv_return_void(local);
543}
544
479static inline int drv_conf_tx(struct ieee80211_local *local, 545static inline int drv_conf_tx(struct ieee80211_local *local,
480 struct ieee80211_sub_if_data *sdata, u16 queue, 546 struct ieee80211_sub_if_data *sdata, u16 ac,
481 const struct ieee80211_tx_queue_params *params) 547 const struct ieee80211_tx_queue_params *params)
482{ 548{
483 int ret = -EOPNOTSUPP; 549 int ret = -EOPNOTSUPP;
@@ -486,10 +552,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
486 552
487 check_sdata_in_driver(sdata); 553 check_sdata_in_driver(sdata);
488 554
489 trace_drv_conf_tx(local, sdata, queue, params); 555 trace_drv_conf_tx(local, sdata, ac, params);
490 if (local->ops->conf_tx) 556 if (local->ops->conf_tx)
491 ret = local->ops->conf_tx(&local->hw, &sdata->vif, 557 ret = local->ops->conf_tx(&local->hw, &sdata->vif,
492 queue, params); 558 ac, params);
493 trace_drv_return_int(local, ret); 559 trace_drv_return_int(local, ret);
494 return ret; 560 return ret;
495} 561}
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6e9df8fd8fb8..6de00b2c268c 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -161,6 +161,21 @@ DEFINE_EVENT(local_only_evt, drv_start,
161 TP_ARGS(local) 161 TP_ARGS(local)
162); 162);
163 163
164DEFINE_EVENT(local_u32_evt, drv_get_et_strings,
165 TP_PROTO(struct ieee80211_local *local, u32 sset),
166 TP_ARGS(local, sset)
167);
168
169DEFINE_EVENT(local_u32_evt, drv_get_et_sset_count,
170 TP_PROTO(struct ieee80211_local *local, u32 sset),
171 TP_ARGS(local, sset)
172);
173
174DEFINE_EVENT(local_only_evt, drv_get_et_stats,
175 TP_PROTO(struct ieee80211_local *local),
176 TP_ARGS(local)
177);
178
164DEFINE_EVENT(local_only_evt, drv_suspend, 179DEFINE_EVENT(local_only_evt, drv_suspend,
165 TP_PROTO(struct ieee80211_local *local), 180 TP_PROTO(struct ieee80211_local *local),
166 TP_ARGS(local) 181 TP_ARGS(local)
@@ -171,6 +186,20 @@ DEFINE_EVENT(local_only_evt, drv_resume,
171 TP_ARGS(local) 186 TP_ARGS(local)
172); 187);
173 188
189TRACE_EVENT(drv_set_wakeup,
190 TP_PROTO(struct ieee80211_local *local, bool enabled),
191 TP_ARGS(local, enabled),
192 TP_STRUCT__entry(
193 LOCAL_ENTRY
194 __field(bool, enabled)
195 ),
196 TP_fast_assign(
197 LOCAL_ASSIGN;
198 __entry->enabled = enabled;
199 ),
200 TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled)
201);
202
174DEFINE_EVENT(local_only_evt, drv_stop, 203DEFINE_EVENT(local_only_evt, drv_stop,
175 TP_PROTO(struct ieee80211_local *local), 204 TP_PROTO(struct ieee80211_local *local),
176 TP_ARGS(local) 205 TP_ARGS(local)
@@ -296,7 +325,7 @@ TRACE_EVENT(drv_bss_info_changed,
296 __entry->dtimper = info->dtim_period; 325 __entry->dtimper = info->dtim_period;
297 __entry->bcnint = info->beacon_int; 326 __entry->bcnint = info->beacon_int;
298 __entry->assoc_cap = info->assoc_capability; 327 __entry->assoc_cap = info->assoc_capability;
299 __entry->timestamp = info->timestamp; 328 __entry->timestamp = info->last_tsf;
300 __entry->basic_rates = info->basic_rates; 329 __entry->basic_rates = info->basic_rates;
301 __entry->enable_beacon = info->enable_beacon; 330 __entry->enable_beacon = info->enable_beacon;
302 __entry->ht_operation_mode = info->ht_operation_mode; 331 __entry->ht_operation_mode = info->ht_operation_mode;
@@ -308,49 +337,6 @@ TRACE_EVENT(drv_bss_info_changed,
308 ) 337 )
309); 338);
310 339
311DECLARE_EVENT_CLASS(tx_sync_evt,
312 TP_PROTO(struct ieee80211_local *local,
313 struct ieee80211_sub_if_data *sdata,
314 const u8 *bssid,
315 enum ieee80211_tx_sync_type type),
316 TP_ARGS(local, sdata, bssid, type),
317
318 TP_STRUCT__entry(
319 LOCAL_ENTRY
320 VIF_ENTRY
321 __array(char, bssid, ETH_ALEN)
322 __field(u32, sync_type)
323 ),
324
325 TP_fast_assign(
326 LOCAL_ASSIGN;
327 VIF_ASSIGN;
328 memcpy(__entry->bssid, bssid, ETH_ALEN);
329 __entry->sync_type = type;
330 ),
331
332 TP_printk(
333 LOCAL_PR_FMT VIF_PR_FMT " bssid:%pM type:%d",
334 LOCAL_PR_ARG, VIF_PR_ARG, __entry->bssid, __entry->sync_type
335 )
336);
337
338DEFINE_EVENT(tx_sync_evt, drv_tx_sync,
339 TP_PROTO(struct ieee80211_local *local,
340 struct ieee80211_sub_if_data *sdata,
341 const u8 *bssid,
342 enum ieee80211_tx_sync_type type),
343 TP_ARGS(local, sdata, bssid, type)
344);
345
346DEFINE_EVENT(tx_sync_evt, drv_finish_tx_sync,
347 TP_PROTO(struct ieee80211_local *local,
348 struct ieee80211_sub_if_data *sdata,
349 const u8 *bssid,
350 enum ieee80211_tx_sync_type type),
351 TP_ARGS(local, sdata, bssid, type)
352);
353
354TRACE_EVENT(drv_prepare_multicast, 340TRACE_EVENT(drv_prepare_multicast,
355 TP_PROTO(struct ieee80211_local *local, int mc_count), 341 TP_PROTO(struct ieee80211_local *local, int mc_count),
356 342
@@ -635,6 +621,66 @@ TRACE_EVENT(drv_sta_notify,
635 ) 621 )
636); 622);
637 623
624TRACE_EVENT(drv_sta_state,
625 TP_PROTO(struct ieee80211_local *local,
626 struct ieee80211_sub_if_data *sdata,
627 struct ieee80211_sta *sta,
628 enum ieee80211_sta_state old_state,
629 enum ieee80211_sta_state new_state),
630
631 TP_ARGS(local, sdata, sta, old_state, new_state),
632
633 TP_STRUCT__entry(
634 LOCAL_ENTRY
635 VIF_ENTRY
636 STA_ENTRY
637 __field(u32, old_state)
638 __field(u32, new_state)
639 ),
640
641 TP_fast_assign(
642 LOCAL_ASSIGN;
643 VIF_ASSIGN;
644 STA_ASSIGN;
645 __entry->old_state = old_state;
646 __entry->new_state = new_state;
647 ),
648
649 TP_printk(
650 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " state: %d->%d",
651 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG,
652 __entry->old_state, __entry->new_state
653 )
654);
655
656TRACE_EVENT(drv_sta_rc_update,
657 TP_PROTO(struct ieee80211_local *local,
658 struct ieee80211_sub_if_data *sdata,
659 struct ieee80211_sta *sta,
660 u32 changed),
661
662 TP_ARGS(local, sdata, sta, changed),
663
664 TP_STRUCT__entry(
665 LOCAL_ENTRY
666 VIF_ENTRY
667 STA_ENTRY
668 __field(u32, changed)
669 ),
670
671 TP_fast_assign(
672 LOCAL_ASSIGN;
673 VIF_ASSIGN;
674 STA_ASSIGN;
675 __entry->changed = changed;
676 ),
677
678 TP_printk(
679 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " changed: 0x%x",
680 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed
681 )
682);
683
638TRACE_EVENT(drv_sta_add, 684TRACE_EVENT(drv_sta_add,
639 TP_PROTO(struct ieee80211_local *local, 685 TP_PROTO(struct ieee80211_local *local,
640 struct ieee80211_sub_if_data *sdata, 686 struct ieee80211_sub_if_data *sdata,
@@ -688,15 +734,14 @@ TRACE_EVENT(drv_sta_remove,
688TRACE_EVENT(drv_conf_tx, 734TRACE_EVENT(drv_conf_tx,
689 TP_PROTO(struct ieee80211_local *local, 735 TP_PROTO(struct ieee80211_local *local,
690 struct ieee80211_sub_if_data *sdata, 736 struct ieee80211_sub_if_data *sdata,
691 u16 queue, 737 u16 ac, const struct ieee80211_tx_queue_params *params),
692 const struct ieee80211_tx_queue_params *params),
693 738
694 TP_ARGS(local, sdata, queue, params), 739 TP_ARGS(local, sdata, ac, params),
695 740
696 TP_STRUCT__entry( 741 TP_STRUCT__entry(
697 LOCAL_ENTRY 742 LOCAL_ENTRY
698 VIF_ENTRY 743 VIF_ENTRY
699 __field(u16, queue) 744 __field(u16, ac)
700 __field(u16, txop) 745 __field(u16, txop)
701 __field(u16, cw_min) 746 __field(u16, cw_min)
702 __field(u16, cw_max) 747 __field(u16, cw_max)
@@ -707,7 +752,7 @@ TRACE_EVENT(drv_conf_tx,
707 TP_fast_assign( 752 TP_fast_assign(
708 LOCAL_ASSIGN; 753 LOCAL_ASSIGN;
709 VIF_ASSIGN; 754 VIF_ASSIGN;
710 __entry->queue = queue; 755 __entry->ac = ac;
711 __entry->txop = params->txop; 756 __entry->txop = params->txop;
712 __entry->cw_max = params->cw_max; 757 __entry->cw_max = params->cw_max;
713 __entry->cw_min = params->cw_min; 758 __entry->cw_min = params->cw_min;
@@ -716,8 +761,8 @@ TRACE_EVENT(drv_conf_tx,
716 ), 761 ),
717 762
718 TP_printk( 763 TP_printk(
719 LOCAL_PR_FMT VIF_PR_FMT " queue:%d", 764 LOCAL_PR_FMT VIF_PR_FMT " AC:%d",
720 LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue 765 LOCAL_PR_ARG, VIF_PR_ARG, __entry->ac
721 ) 766 )
722); 767);
723 768
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f25fff7607d8..6f8615c54b22 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,15 +19,6 @@
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "rate.h" 20#include "rate.h"
21 21
22bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
23{
24 const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40);
25 if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) &&
26 !(sdata->u.mgd.ht_capa.cap_info & flg))
27 return true;
28 return false;
29}
30
31static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata, 22static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
32 struct ieee80211_sta_ht_cap *ht_cap, 23 struct ieee80211_sta_ht_cap *ht_cap,
33 u16 flag) 24 u16 flag)
@@ -315,10 +306,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
315 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; 306 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
316 307
317#ifdef CONFIG_MAC80211_HT_DEBUG 308#ifdef CONFIG_MAC80211_HT_DEBUG
318 if (net_ratelimit()) 309 net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n",
319 printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n", 310 mgmt->sa, initiator ? "initiator" : "recipient",
320 mgmt->sa, initiator ? "initiator" : "recipient", tid, 311 tid,
321 le16_to_cpu(mgmt->u.action.u.delba.reason_code)); 312 le16_to_cpu(mgmt->u.action.u.delba.reason_code));
322#endif /* CONFIG_MAC80211_HT_DEBUG */ 313#endif /* CONFIG_MAC80211_HT_DEBUG */
323 314
324 if (initiator == WLAN_BACK_INITIATOR) 315 if (initiator == WLAN_BACK_INITIATOR)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a4643969a13b..33d9d0c3e3d0 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -20,7 +20,6 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <net/mac80211.h> 22#include <net/mac80211.h>
23#include <asm/unaligned.h>
24 23
25#include "ieee80211_i.h" 24#include "ieee80211_i.h"
26#include "driver-ops.h" 25#include "driver-ops.h"
@@ -36,31 +35,6 @@
36#define IEEE80211_IBSS_MAX_STA_ENTRIES 128 35#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
37 36
38 37
39static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
40 struct ieee80211_mgmt *mgmt,
41 size_t len)
42{
43 u16 auth_alg, auth_transaction;
44
45 lockdep_assert_held(&sdata->u.ibss.mtx);
46
47 if (len < 24 + 6)
48 return;
49
50 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
51 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
52
53 /*
54 * IEEE 802.11 standard does not require authentication in IBSS
55 * networks and most implementations do not seem to use it.
56 * However, try to reply to authentication attempts if someone
57 * has actually implemented this.
58 */
59 if (auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1)
60 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0,
61 sdata->u.ibss.bssid, NULL, 0, 0);
62}
63
64static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 38static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
65 const u8 *bssid, const int beacon_int, 39 const u8 *bssid, const int beacon_int,
66 struct ieee80211_channel *chan, 40 struct ieee80211_channel *chan,
@@ -92,7 +66,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
92 skb_reset_tail_pointer(skb); 66 skb_reset_tail_pointer(skb);
93 skb_reserve(skb, sdata->local->hw.extra_tx_headroom); 67 skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
94 68
95 if (memcmp(ifibss->bssid, bssid, ETH_ALEN)) 69 if (!ether_addr_equal(ifibss->bssid, bssid))
96 sta_info_flush(sdata->local, sdata); 70 sta_info_flush(sdata->local, sdata);
97 71
98 /* if merging, indicate to driver that we leave the old IBSS */ 72 /* if merging, indicate to driver that we leave the old IBSS */
@@ -186,16 +160,19 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
186 if (channel_type && sband->ht_cap.ht_supported) { 160 if (channel_type && sband->ht_cap.ht_supported) {
187 pos = skb_put(skb, 4 + 161 pos = skb_put(skb, 4 +
188 sizeof(struct ieee80211_ht_cap) + 162 sizeof(struct ieee80211_ht_cap) +
189 sizeof(struct ieee80211_ht_info)); 163 sizeof(struct ieee80211_ht_operation));
190 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, 164 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
191 sband->ht_cap.cap); 165 sband->ht_cap.cap);
192 pos = ieee80211_ie_build_ht_info(pos, 166 /*
193 &sband->ht_cap, 167 * Note: According to 802.11n-2009 9.13.3.1, HT Protection
194 chan, 168 * field and RIFS Mode are reserved in IBSS mode, therefore
195 channel_type); 169 * keep them at 0
170 */
171 pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
172 chan, channel_type, 0);
196 } 173 }
197 174
198 if (local->hw.queues >= 4) { 175 if (local->hw.queues >= IEEE80211_NUM_ACS) {
199 pos = skb_put(skb, 9); 176 pos = skb_put(skb, 9);
200 *pos++ = WLAN_EID_VENDOR_SPECIFIC; 177 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
201 *pos++ = 7; /* len */ 178 *pos++ = 7; /* len */
@@ -276,7 +253,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
276 cbss->tsf); 253 cbss->tsf);
277} 254}
278 255
279static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) 256static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
257 bool auth)
280 __acquires(RCU) 258 __acquires(RCU)
281{ 259{
282 struct ieee80211_sub_if_data *sdata = sta->sdata; 260 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -290,22 +268,34 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
290 addr, sdata->name); 268 addr, sdata->name);
291#endif 269#endif
292 270
293 sta_info_move_state(sta, IEEE80211_STA_AUTH); 271 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
294 sta_info_move_state(sta, IEEE80211_STA_ASSOC); 272 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
295 sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 273 /* authorize the station only if the network is not RSN protected. If
274 * not wait for the userspace to authorize it */
275 if (!sta->sdata->u.ibss.control_port)
276 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
296 277
297 rate_control_rate_init(sta); 278 rate_control_rate_init(sta);
298 279
299 /* If it fails, maybe we raced another insertion? */ 280 /* If it fails, maybe we raced another insertion? */
300 if (sta_info_insert_rcu(sta)) 281 if (sta_info_insert_rcu(sta))
301 return sta_info_get(sdata, addr); 282 return sta_info_get(sdata, addr);
283 if (auth) {
284#ifdef CONFIG_MAC80211_IBSS_DEBUG
285 printk(KERN_DEBUG "TX Auth SA=%pM DA=%pM BSSID=%pM"
286 "(auth_transaction=1)\n", sdata->vif.addr,
287 sdata->u.ibss.bssid, addr);
288#endif
289 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
290 addr, sdata->u.ibss.bssid, NULL, 0, 0);
291 }
302 return sta; 292 return sta;
303} 293}
304 294
305static struct sta_info * 295static struct sta_info *
306ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 296ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
307 const u8 *bssid, const u8 *addr, 297 const u8 *bssid, const u8 *addr,
308 u32 supp_rates) 298 u32 supp_rates, bool auth)
309 __acquires(RCU) 299 __acquires(RCU)
310{ 300{
311 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 301 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -318,9 +308,8 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
318 * allow new one to be added. 308 * allow new one to be added.
319 */ 309 */
320 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 310 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
321 if (net_ratelimit()) 311 net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
322 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", 312 sdata->name, addr);
323 sdata->name, addr);
324 rcu_read_lock(); 313 rcu_read_lock();
325 return NULL; 314 return NULL;
326 } 315 }
@@ -330,7 +319,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
330 return NULL; 319 return NULL;
331 } 320 }
332 321
333 if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) { 322 if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) {
334 rcu_read_lock(); 323 rcu_read_lock();
335 return NULL; 324 return NULL;
336 } 325 }
@@ -347,7 +336,42 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
347 sta->sta.supp_rates[band] = supp_rates | 336 sta->sta.supp_rates[band] = supp_rates |
348 ieee80211_mandatory_rates(local, band); 337 ieee80211_mandatory_rates(local, band);
349 338
350 return ieee80211_ibss_finish_sta(sta); 339 return ieee80211_ibss_finish_sta(sta, auth);
340}
341
342static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
343 struct ieee80211_mgmt *mgmt,
344 size_t len)
345{
346 u16 auth_alg, auth_transaction;
347
348 lockdep_assert_held(&sdata->u.ibss.mtx);
349
350 if (len < 24 + 6)
351 return;
352
353 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
354 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
355
356 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
357 return;
358#ifdef CONFIG_MAC80211_IBSS_DEBUG
359 printk(KERN_DEBUG "%s: RX Auth SA=%pM DA=%pM BSSID=%pM."
360 "(auth_transaction=%d)\n",
361 sdata->name, mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
362#endif
363 sta_info_destroy_addr(sdata, mgmt->sa);
364 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
365 rcu_read_unlock();
366
367 /*
368 * IEEE 802.11 standard does not require authentication in IBSS
369 * networks and most implementations do not seem to use it.
370 * However, try to reply to authentication attempts if someone
371 * has actually implemented this.
372 */
373 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0,
374 mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0);
351} 375}
352 376
353static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 377static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -381,14 +405,14 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
381 return; 405 return;
382 406
383 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 407 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
384 memcmp(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) { 408 ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) {
385 409
386 rcu_read_lock(); 410 rcu_read_lock();
387 sta = sta_info_get(sdata, mgmt->sa); 411 sta = sta_info_get(sdata, mgmt->sa);
388 412
389 if (elems->supp_rates) { 413 if (elems->supp_rates) {
390 supp_rates = ieee80211_sta_get_rates(local, elems, 414 supp_rates = ieee80211_sta_get_rates(local, elems,
391 band); 415 band, NULL);
392 if (sta) { 416 if (sta) {
393 u32 prev_rates; 417 u32 prev_rates;
394 418
@@ -412,20 +436,20 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
412 } else { 436 } else {
413 rcu_read_unlock(); 437 rcu_read_unlock();
414 sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, 438 sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
415 mgmt->sa, supp_rates); 439 mgmt->sa, supp_rates, true);
416 } 440 }
417 } 441 }
418 442
419 if (sta && elems->wmm_info) 443 if (sta && elems->wmm_info)
420 set_sta_flag(sta, WLAN_STA_WME); 444 set_sta_flag(sta, WLAN_STA_WME);
421 445
422 if (sta && elems->ht_info_elem && elems->ht_cap_elem && 446 if (sta && elems->ht_operation && elems->ht_cap_elem &&
423 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { 447 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
424 /* we both use HT */ 448 /* we both use HT */
425 struct ieee80211_sta_ht_cap sta_ht_cap_new; 449 struct ieee80211_sta_ht_cap sta_ht_cap_new;
426 enum nl80211_channel_type channel_type = 450 enum nl80211_channel_type channel_type =
427 ieee80211_ht_info_to_channel_type( 451 ieee80211_ht_oper_to_channel_type(
428 elems->ht_info_elem); 452 elems->ht_operation);
429 453
430 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 454 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
431 elems->ht_cap_elem, 455 elems->ht_cap_elem,
@@ -435,8 +459,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
435 * fall back to HT20 if we don't use or use 459 * fall back to HT20 if we don't use or use
436 * the other extension channel 460 * the other extension channel
437 */ 461 */
438 if ((channel_type == NL80211_CHAN_HT40MINUS || 462 if (!(channel_type == NL80211_CHAN_HT40MINUS ||
439 channel_type == NL80211_CHAN_HT40PLUS) && 463 channel_type == NL80211_CHAN_HT40PLUS) ||
440 channel_type != sdata->u.ibss.channel_type) 464 channel_type != sdata->u.ibss.channel_type)
441 sta_ht_cap_new.cap &= 465 sta_ht_cap_new.cap &=
442 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 466 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -486,7 +510,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
486 goto put_bss; 510 goto put_bss;
487 511
488 /* same BSSID */ 512 /* same BSSID */
489 if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) 513 if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
490 goto put_bss; 514 goto put_bss;
491 515
492 if (rx_status->flag & RX_FLAG_MACTIME_MPDU) { 516 if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
@@ -538,9 +562,9 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
538 sdata->name, mgmt->bssid); 562 sdata->name, mgmt->bssid);
539#endif 563#endif
540 ieee80211_sta_join_ibss(sdata, bss); 564 ieee80211_sta_join_ibss(sdata, bss);
541 supp_rates = ieee80211_sta_get_rates(local, elems, band); 565 supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
542 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 566 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
543 supp_rates); 567 supp_rates, true);
544 rcu_read_unlock(); 568 rcu_read_unlock();
545 } 569 }
546 570
@@ -562,16 +586,15 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
562 * allow new one to be added. 586 * allow new one to be added.
563 */ 587 */
564 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 588 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
565 if (net_ratelimit()) 589 net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
566 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", 590 sdata->name, addr);
567 sdata->name, addr);
568 return; 591 return;
569 } 592 }
570 593
571 if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) 594 if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
572 return; 595 return;
573 596
574 if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) 597 if (!ether_addr_equal(bssid, sdata->u.ibss.bssid))
575 return; 598 return;
576 599
577 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 600 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -643,8 +666,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
643 "IBSS networks with same SSID (merge)\n", sdata->name); 666 "IBSS networks with same SSID (merge)\n", sdata->name);
644 667
645 ieee80211_request_internal_scan(sdata, 668 ieee80211_request_internal_scan(sdata,
646 ifibss->ssid, ifibss->ssid_len, 669 ifibss->ssid, ifibss->ssid_len, NULL);
647 ifibss->fixed_channel ? ifibss->channel : NULL);
648} 670}
649 671
650static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 672static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -810,8 +832,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
810 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) 832 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
811 return; 833 return;
812 834
813 if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 && 835 if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) &&
814 memcmp(mgmt->bssid, "\xff\xff\xff\xff\xff\xff", ETH_ALEN) != 0) 836 !is_broadcast_ether_addr(mgmt->bssid))
815 return; 837 return;
816 838
817 end = ((u8 *) mgmt) + len; 839 end = ((u8 *) mgmt) + len;
@@ -855,9 +877,6 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
855 size_t baselen; 877 size_t baselen;
856 struct ieee802_11_elems elems; 878 struct ieee802_11_elems elems;
857 879
858 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
859 return; /* ignore ProbeResp to foreign address */
860
861 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 880 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
862 if (baselen > len) 881 if (baselen > len)
863 return; 882 return;
@@ -945,7 +964,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
945 list_del(&sta->list); 964 list_del(&sta->list);
946 spin_unlock_bh(&ifibss->incomplete_lock); 965 spin_unlock_bh(&ifibss->incomplete_lock);
947 966
948 ieee80211_ibss_finish_sta(sta); 967 ieee80211_ibss_finish_sta(sta, true);
949 rcu_read_unlock(); 968 rcu_read_unlock();
950 spin_lock_bh(&ifibss->incomplete_lock); 969 spin_lock_bh(&ifibss->incomplete_lock);
951 } 970 }
@@ -1045,7 +1064,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1045 4 /* IBSS params */ + 1064 4 /* IBSS params */ +
1046 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 1065 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
1047 2 + sizeof(struct ieee80211_ht_cap) + 1066 2 + sizeof(struct ieee80211_ht_cap) +
1048 2 + sizeof(struct ieee80211_ht_info) + 1067 2 + sizeof(struct ieee80211_ht_operation) +
1049 params->ie_len); 1068 params->ie_len);
1050 if (!skb) 1069 if (!skb)
1051 return -ENOMEM; 1070 return -ENOMEM;
@@ -1059,6 +1078,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1059 sdata->u.ibss.fixed_bssid = false; 1078 sdata->u.ibss.fixed_bssid = false;
1060 1079
1061 sdata->u.ibss.privacy = params->privacy; 1080 sdata->u.ibss.privacy = params->privacy;
1081 sdata->u.ibss.control_port = params->control_port;
1062 sdata->u.ibss.basic_rates = params->basic_rates; 1082 sdata->u.ibss.basic_rates = params->basic_rates;
1063 memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, 1083 memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
1064 sizeof(params->mcast_rate)); 1084 sizeof(params->mcast_rate));
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2f0642d9e154..3f3cd50fff16 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -52,7 +52,8 @@ struct ieee80211_local;
52 * increased memory use (about 2 kB of RAM per entry). */ 52 * increased memory use (about 2 kB of RAM per entry). */
53#define IEEE80211_FRAGMENT_MAX 4 53#define IEEE80211_FRAGMENT_MAX 4
54 54
55#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) 55#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
56#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
56 57
57#define IEEE80211_DEFAULT_UAPSD_QUEUES \ 58#define IEEE80211_DEFAULT_UAPSD_QUEUES \
58 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \ 59 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
@@ -105,6 +106,44 @@ struct ieee80211_bss {
105 */ 106 */
106 bool has_erp_value; 107 bool has_erp_value;
107 u8 erp_value; 108 u8 erp_value;
109
110 /* Keep track of the corruption of the last beacon/probe response. */
111 u8 corrupt_data;
112
113 /* Keep track of what bits of information we have valid info for. */
114 u8 valid_data;
115};
116
117/**
118 * enum ieee80211_corrupt_data_flags - BSS data corruption flags
119 * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted
120 * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted
121 *
122 * These are bss flags that are attached to a bss in the
123 * @corrupt_data field of &struct ieee80211_bss.
124 */
125enum ieee80211_bss_corrupt_data_flags {
126 IEEE80211_BSS_CORRUPT_BEACON = BIT(0),
127 IEEE80211_BSS_CORRUPT_PROBE_RESP = BIT(1)
128};
129
130/**
131 * enum ieee80211_valid_data_flags - BSS valid data flags
132 * @IEEE80211_BSS_VALID_DTIM: DTIM data was gathered from non-corrupt IE
133 * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
134 * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
135 * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
136 *
137 * These are bss flags that are attached to a bss in the
138 * @valid_data field of &struct ieee80211_bss. They show which parts
139 * of the data structure were recieved as a result of an un-corrupted
140 * beacon/probe response.
141 */
142enum ieee80211_bss_valid_data_flags {
143 IEEE80211_BSS_VALID_DTIM = BIT(0),
144 IEEE80211_BSS_VALID_WMM = BIT(1),
145 IEEE80211_BSS_VALID_RATES = BIT(2),
146 IEEE80211_BSS_VALID_ERP = BIT(3)
108}; 147};
109 148
110static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss) 149static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
@@ -228,7 +267,7 @@ struct ieee80211_rx_data {
228struct beacon_data { 267struct beacon_data {
229 u8 *head, *tail; 268 u8 *head, *tail;
230 int head_len, tail_len; 269 int head_len, tail_len;
231 int dtim_period; 270 struct rcu_head rcu_head;
232}; 271};
233 272
234struct ieee80211_if_ap { 273struct ieee80211_if_ap {
@@ -243,7 +282,7 @@ struct ieee80211_if_ap {
243 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; 282 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
244 struct sk_buff_head ps_bc_buf; 283 struct sk_buff_head ps_bc_buf;
245 atomic_t num_sta_ps; /* number of stations in PS mode */ 284 atomic_t num_sta_ps; /* number of stations in PS mode */
246 atomic_t num_sta_authorized; /* number of authorized stations */ 285 atomic_t num_mcast_sta; /* number of stations receiving multicast */
247 int dtim_count; 286 int dtim_count;
248 bool dtim_bc_mc; 287 bool dtim_bc_mc;
249}; 288};
@@ -280,10 +319,6 @@ struct mesh_preq_queue {
280 319
281enum ieee80211_work_type { 320enum ieee80211_work_type {
282 IEEE80211_WORK_ABORT, 321 IEEE80211_WORK_ABORT,
283 IEEE80211_WORK_DIRECT_PROBE,
284 IEEE80211_WORK_AUTH,
285 IEEE80211_WORK_ASSOC_BEACON_WAIT,
286 IEEE80211_WORK_ASSOC,
287 IEEE80211_WORK_REMAIN_ON_CHANNEL, 322 IEEE80211_WORK_REMAIN_ON_CHANNEL,
288 IEEE80211_WORK_OFFCHANNEL_TX, 323 IEEE80211_WORK_OFFCHANNEL_TX,
289}; 324};
@@ -316,36 +351,10 @@ struct ieee80211_work {
316 unsigned long timeout; 351 unsigned long timeout;
317 enum ieee80211_work_type type; 352 enum ieee80211_work_type type;
318 353
319 u8 filter_ta[ETH_ALEN];
320
321 bool started; 354 bool started;
322 355
323 union { 356 union {
324 struct { 357 struct {
325 int tries;
326 u16 algorithm, transaction;
327 u8 ssid[IEEE80211_MAX_SSID_LEN];
328 u8 ssid_len;
329 u8 key[WLAN_KEY_LEN_WEP104];
330 u8 key_len, key_idx;
331 bool privacy;
332 bool synced;
333 } probe_auth;
334 struct {
335 struct cfg80211_bss *bss;
336 const u8 *supp_rates;
337 const u8 *ht_information_ie;
338 enum ieee80211_smps_mode smps;
339 int tries;
340 u16 capability;
341 u8 prev_bssid[ETH_ALEN];
342 u8 ssid[IEEE80211_MAX_SSID_LEN];
343 u8 ssid_len;
344 u8 supp_rates_len;
345 bool wmm_used, use_11n, uapsd_used;
346 bool synced;
347 } assoc;
348 struct {
349 u32 duration; 358 u32 duration;
350 } remain; 359 } remain;
351 struct { 360 struct {
@@ -355,9 +364,8 @@ struct ieee80211_work {
355 } offchan_tx; 364 } offchan_tx;
356 }; 365 };
357 366
358 int ie_len; 367 size_t data_len;
359 /* must be last */ 368 u8 data[];
360 u8 ie[0];
361}; 369};
362 370
363/* flags used in struct ieee80211_if_managed.flags */ 371/* flags used in struct ieee80211_if_managed.flags */
@@ -371,6 +379,43 @@ enum ieee80211_sta_flags {
371 IEEE80211_STA_UAPSD_ENABLED = BIT(7), 379 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
372 IEEE80211_STA_NULLFUNC_ACKED = BIT(8), 380 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
373 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), 381 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
382 IEEE80211_STA_DISABLE_40MHZ = BIT(10),
383};
384
385struct ieee80211_mgd_auth_data {
386 struct cfg80211_bss *bss;
387 unsigned long timeout;
388 int tries;
389 u16 algorithm, expected_transaction;
390
391 u8 key[WLAN_KEY_LEN_WEP104];
392 u8 key_len, key_idx;
393 bool done;
394
395 size_t ie_len;
396 u8 ie[];
397};
398
399struct ieee80211_mgd_assoc_data {
400 struct cfg80211_bss *bss;
401 const u8 *supp_rates;
402 const u8 *ht_operation_ie;
403
404 unsigned long timeout;
405 int tries;
406
407 u16 capability;
408 u8 prev_bssid[ETH_ALEN];
409 u8 ssid[IEEE80211_MAX_SSID_LEN];
410 u8 ssid_len;
411 u8 supp_rates_len;
412 bool wmm, uapsd;
413 bool have_beacon;
414 bool sent_assoc;
415 bool synced;
416
417 size_t ie_len;
418 u8 ie[];
374}; 419};
375 420
376struct ieee80211_if_managed { 421struct ieee80211_if_managed {
@@ -389,6 +434,8 @@ struct ieee80211_if_managed {
389 434
390 struct mutex mtx; 435 struct mutex mtx;
391 struct cfg80211_bss *associated; 436 struct cfg80211_bss *associated;
437 struct ieee80211_mgd_auth_data *auth_data;
438 struct ieee80211_mgd_assoc_data *assoc_data;
392 439
393 u8 bssid[ETH_ALEN]; 440 u8 bssid[ETH_ALEN];
394 441
@@ -414,6 +461,20 @@ struct ieee80211_if_managed {
414 IEEE80211_MFP_REQUIRED 461 IEEE80211_MFP_REQUIRED
415 } mfp; /* management frame protection */ 462 } mfp; /* management frame protection */
416 463
464 /*
465 * Bitmask of enabled u-apsd queues,
466 * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
467 * to take effect.
468 */
469 unsigned int uapsd_queues;
470
471 /*
472 * Maximum number of buffered frames AP can deliver during a
473 * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
474 * Needs a new association to take effect.
475 */
476 unsigned int uapsd_max_sp_len;
477
417 int wmm_last_param_set; 478 int wmm_last_param_set;
418 479
419 u8 use_4addr; 480 u8 use_4addr;
@@ -470,7 +531,9 @@ struct ieee80211_if_ibss {
470 bool fixed_channel; 531 bool fixed_channel;
471 bool privacy; 532 bool privacy;
472 533
473 u8 bssid[ETH_ALEN]; 534 bool control_port;
535
536 u8 bssid[ETH_ALEN] __aligned(2);
474 u8 ssid[IEEE80211_MAX_SSID_LEN]; 537 u8 ssid[IEEE80211_MAX_SSID_LEN];
475 u8 ssid_len, ie_len; 538 u8 ssid_len, ie_len;
476 u8 *ie; 539 u8 *ie;
@@ -491,6 +554,24 @@ struct ieee80211_if_ibss {
491 } state; 554 } state;
492}; 555};
493 556
557/**
558 * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface
559 *
560 * these declarations define the interface, which enables
561 * vendor-specific mesh synchronization
562 *
563 */
564struct ieee802_11_elems;
565struct ieee80211_mesh_sync_ops {
566 void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata,
567 u16 stype,
568 struct ieee80211_mgmt *mgmt,
569 struct ieee802_11_elems *elems,
570 struct ieee80211_rx_status *rx_status);
571 void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata);
572 /* add other framework functions here */
573};
574
494struct ieee80211_if_mesh { 575struct ieee80211_if_mesh {
495 struct timer_list housekeeping_timer; 576 struct timer_list housekeeping_timer;
496 struct timer_list mesh_path_timer; 577 struct timer_list mesh_path_timer;
@@ -539,6 +620,11 @@ struct ieee80211_if_mesh {
539 IEEE80211_MESH_SEC_AUTHED = 0x1, 620 IEEE80211_MESH_SEC_AUTHED = 0x1,
540 IEEE80211_MESH_SEC_SECURED = 0x2, 621 IEEE80211_MESH_SEC_SECURED = 0x2,
541 } security; 622 } security;
623 /* Extensible Synchronization Framework */
624 struct ieee80211_mesh_sync_ops *sync_ops;
625 s64 sync_offset_clockdrift_max;
626 spinlock_t sync_offset_lock;
627 bool adjusting_tbtt;
542}; 628};
543 629
544#ifdef CONFIG_MAC80211_MESH 630#ifdef CONFIG_MAC80211_MESH
@@ -605,12 +691,6 @@ struct ieee80211_sub_if_data {
605 691
606 char name[IFNAMSIZ]; 692 char name[IFNAMSIZ];
607 693
608 /*
609 * keep track of whether the HT opmode (stored in
610 * vif.bss_info.ht_operation_mode) is valid.
611 */
612 bool ht_opmode_valid;
613
614 /* to detect idle changes */ 694 /* to detect idle changes */
615 bool old_idle; 695 bool old_idle;
616 696
@@ -630,7 +710,7 @@ struct ieee80211_sub_if_data {
630 __be16 control_port_protocol; 710 __be16 control_port_protocol;
631 bool control_port_no_encrypt; 711 bool control_port_no_encrypt;
632 712
633 struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES]; 713 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
634 714
635 struct work_struct work; 715 struct work_struct work;
636 struct sk_buff_head skb_queue; 716 struct sk_buff_head skb_queue;
@@ -646,6 +726,7 @@ struct ieee80211_sub_if_data {
646 726
647 /* bitmap of allowed (non-MCS) rate indexes for rate control */ 727 /* bitmap of allowed (non-MCS) rate indexes for rate control */
648 u32 rc_rateidx_mask[IEEE80211_NUM_BANDS]; 728 u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
729 u8 rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
649 730
650 union { 731 union {
651 struct ieee80211_if_ap ap; 732 struct ieee80211_if_ap ap;
@@ -699,7 +780,6 @@ enum queue_stop_reason {
699 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 780 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
700 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 781 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
701 IEEE80211_QUEUE_STOP_REASON_SKB_ADD, 782 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
702 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
703}; 783};
704 784
705#ifdef CONFIG_MAC80211_LEDS 785#ifdef CONFIG_MAC80211_LEDS
@@ -723,6 +803,8 @@ struct tpt_led_trigger {
723 * well be on the operating channel 803 * well be on the operating channel
724 * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to 804 * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
725 * determine if we are on the operating channel or not 805 * determine if we are on the operating channel or not
806 * @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating
807 * channel. This should not interrupt normal traffic.
726 * @SCAN_COMPLETED: Set for our scan work function when the driver reported 808 * @SCAN_COMPLETED: Set for our scan work function when the driver reported
727 * that the scan completed. 809 * that the scan completed.
728 * @SCAN_ABORTED: Set for our scan work function when the driver reported 810 * @SCAN_ABORTED: Set for our scan work function when the driver reported
@@ -731,6 +813,7 @@ struct tpt_led_trigger {
731enum { 813enum {
732 SCAN_SW_SCANNING, 814 SCAN_SW_SCANNING,
733 SCAN_HW_SCANNING, 815 SCAN_HW_SCANNING,
816 SCAN_ONCHANNEL_SCANNING,
734 SCAN_COMPLETED, 817 SCAN_COMPLETED,
735 SCAN_ABORTED, 818 SCAN_ABORTED,
736}; 819};
@@ -769,7 +852,6 @@ struct ieee80211_local {
769 struct list_head work_list; 852 struct list_head work_list;
770 struct timer_list work_timer; 853 struct timer_list work_timer;
771 struct work_struct work_work; 854 struct work_struct work_work;
772 struct sk_buff_head work_skb_queue;
773 855
774 /* 856 /*
775 * private workqueue to mac80211. mac80211 makes this accessible 857 * private workqueue to mac80211. mac80211 makes this accessible
@@ -970,20 +1052,6 @@ struct ieee80211_local {
970 */ 1052 */
971 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 1053 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
972 1054
973 /*
974 * Bitmask of enabled u-apsd queues,
975 * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
976 * to take effect.
977 */
978 unsigned int uapsd_queues;
979
980 /*
981 * Maximum number of buffered frames AP can deliver during a
982 * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
983 * Needs a new association to take effect.
984 */
985 unsigned int uapsd_max_sp_len;
986
987 bool pspolling; 1055 bool pspolling;
988 bool offchannel_ps_enabled; 1056 bool offchannel_ps_enabled;
989 /* 1057 /*
@@ -1035,6 +1103,9 @@ struct ieee80211_local {
1035 struct net_device napi_dev; 1103 struct net_device napi_dev;
1036 1104
1037 struct napi_struct napi; 1105 struct napi_struct napi;
1106
1107 /* virtual monitor interface */
1108 struct ieee80211_sub_if_data __rcu *monitor_sdata;
1038}; 1109};
1039 1110
1040static inline struct ieee80211_sub_if_data * 1111static inline struct ieee80211_sub_if_data *
@@ -1070,7 +1141,7 @@ struct ieee802_11_elems {
1070 u8 *wmm_info; 1141 u8 *wmm_info;
1071 u8 *wmm_param; 1142 u8 *wmm_param;
1072 struct ieee80211_ht_cap *ht_cap_elem; 1143 struct ieee80211_ht_cap *ht_cap_elem;
1073 struct ieee80211_ht_info *ht_info_elem; 1144 struct ieee80211_ht_operation *ht_operation;
1074 struct ieee80211_meshconf_ie *mesh_config; 1145 struct ieee80211_meshconf_ie *mesh_config;
1075 u8 *mesh_id; 1146 u8 *mesh_id;
1076 u8 *peering; 1147 u8 *peering;
@@ -1110,6 +1181,9 @@ struct ieee802_11_elems {
1110 u8 quiet_elem_len; 1181 u8 quiet_elem_len;
1111 u8 num_of_quiet_elem; /* can be more the one */ 1182 u8 num_of_quiet_elem; /* can be more the one */
1112 u8 timeout_int_len; 1183 u8 timeout_int_len;
1184
1185 /* whether a parse error occurred while retrieving these elements */
1186 bool parse_error;
1113}; 1187};
1114 1188
1115static inline struct ieee80211_local *hw_to_local( 1189static inline struct ieee80211_local *hw_to_local(
@@ -1118,16 +1192,10 @@ static inline struct ieee80211_local *hw_to_local(
1118 return container_of(hw, struct ieee80211_local, hw); 1192 return container_of(hw, struct ieee80211_local, hw);
1119} 1193}
1120 1194
1121static inline struct ieee80211_hw *local_to_hw(
1122 struct ieee80211_local *local)
1123{
1124 return &local->hw;
1125}
1126
1127 1195
1128static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) 1196static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
1129{ 1197{
1130 return compare_ether_addr(raddr, addr) == 0 || 1198 return ether_addr_equal(raddr, addr) ||
1131 is_broadcast_ether_addr(raddr); 1199 is_broadcast_ether_addr(raddr);
1132} 1200}
1133 1201
@@ -1146,11 +1214,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
1146int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, 1214int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1147 struct cfg80211_assoc_request *req); 1215 struct cfg80211_assoc_request *req);
1148int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, 1216int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
1149 struct cfg80211_deauth_request *req, 1217 struct cfg80211_deauth_request *req);
1150 void *cookie);
1151int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, 1218int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
1152 struct cfg80211_disassoc_request *req, 1219 struct cfg80211_disassoc_request *req);
1153 void *cookie);
1154void ieee80211_send_pspoll(struct ieee80211_local *local, 1220void ieee80211_send_pspoll(struct ieee80211_local *local,
1155 struct ieee80211_sub_if_data *sdata); 1221 struct ieee80211_sub_if_data *sdata);
1156void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); 1222void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
@@ -1168,6 +1234,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1168 struct sk_buff *skb); 1234 struct sk_buff *skb);
1169void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); 1235void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1170void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); 1236void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1237void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
1171 1238
1172/* IBSS code */ 1239/* IBSS code */
1173void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1240void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1196,6 +1263,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
1196int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 1263int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
1197 struct cfg80211_scan_request *req); 1264 struct cfg80211_scan_request *req);
1198void ieee80211_scan_cancel(struct ieee80211_local *local); 1265void ieee80211_scan_cancel(struct ieee80211_local *local);
1266void ieee80211_run_deferred_scan(struct ieee80211_local *local);
1199ieee80211_rx_result 1267ieee80211_rx_result
1200ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 1268ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1201 1269
@@ -1208,9 +1276,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
1208 struct ieee802_11_elems *elems, 1276 struct ieee802_11_elems *elems,
1209 struct ieee80211_channel *channel, 1277 struct ieee80211_channel *channel,
1210 bool beacon); 1278 bool beacon);
1211struct ieee80211_bss *
1212ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
1213 u8 *ssid, u8 ssid_len);
1214void ieee80211_rx_bss_put(struct ieee80211_local *local, 1279void ieee80211_rx_bss_put(struct ieee80211_local *local,
1215 struct ieee80211_bss *bss); 1280 struct ieee80211_bss *bss);
1216 1281
@@ -1256,7 +1321,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1256 struct net_device *dev); 1321 struct net_device *dev);
1257 1322
1258/* HT */ 1323/* HT */
1259bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata);
1260void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, 1324void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
1261 struct ieee80211_sta_ht_cap *ht_cap); 1325 struct ieee80211_sta_ht_cap *ht_cap);
1262void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, 1326void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
@@ -1340,12 +1404,13 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1340extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1404extern void *mac80211_wiphy_privid; /* for wiphy privid */
1341u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 1405u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
1342 enum nl80211_iftype type); 1406 enum nl80211_iftype type);
1343int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 1407int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
1344 int rate, int erp, int short_preamble); 1408 int rate, int erp, int short_preamble);
1345void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, 1409void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
1346 struct ieee80211_hdr *hdr, const u8 *tsc, 1410 struct ieee80211_hdr *hdr, const u8 *tsc,
1347 gfp_t gfp); 1411 gfp_t gfp);
1348void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); 1412void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
1413 bool bss_notify);
1349void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 1414void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1350 1415
1351void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, 1416void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
@@ -1385,18 +1450,22 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
1385 enum queue_stop_reason reason); 1450 enum queue_stop_reason reason);
1386void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 1451void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
1387 enum queue_stop_reason reason); 1452 enum queue_stop_reason reason);
1453void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
1388void ieee80211_add_pending_skb(struct ieee80211_local *local, 1454void ieee80211_add_pending_skb(struct ieee80211_local *local,
1389 struct sk_buff *skb); 1455 struct sk_buff *skb);
1390void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1391 struct sk_buff_head *skbs);
1392void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, 1456void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
1393 struct sk_buff_head *skbs, 1457 struct sk_buff_head *skbs,
1394 void (*fn)(void *data), void *data); 1458 void (*fn)(void *data), void *data);
1459static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1460 struct sk_buff_head *skbs)
1461{
1462 ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
1463}
1395 1464
1396void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1465void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1397 u16 transaction, u16 auth_alg, 1466 u16 transaction, u16 auth_alg,
1398 u8 *extra, size_t extra_len, const u8 *bssid, 1467 u8 *extra, size_t extra_len, const u8 *bssid,
1399 const u8 *key, u8 key_len, u8 key_idx); 1468 const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
1400int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, 1469int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1401 const u8 *ie, size_t ie_len, 1470 const u8 *ie, size_t ie_len,
1402 enum ieee80211_band band, u32 rate_mask, 1471 enum ieee80211_band band, u32 rate_mask,
@@ -1416,7 +1485,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1416 const u8 *supp_rates); 1485 const u8 *supp_rates);
1417u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1486u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1418 struct ieee802_11_elems *elems, 1487 struct ieee802_11_elems *elems,
1419 enum ieee80211_band band); 1488 enum ieee80211_band band, u32 *basic_rates);
1420int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, 1489int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1421 enum ieee80211_smps_mode smps_mode); 1490 enum ieee80211_smps_mode smps_mode);
1422void ieee80211_recalc_smps(struct ieee80211_local *local); 1491void ieee80211_recalc_smps(struct ieee80211_local *local);
@@ -1426,18 +1495,16 @@ size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1426size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); 1495size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
1427u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, 1496u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1428 u16 cap); 1497 u16 cap);
1429u8 *ieee80211_ie_build_ht_info(u8 *pos, 1498u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1430 struct ieee80211_sta_ht_cap *ht_cap, 1499 struct ieee80211_channel *channel,
1431 struct ieee80211_channel *channel, 1500 enum nl80211_channel_type channel_type,
1432 enum nl80211_channel_type channel_type); 1501 u16 prot_mode);
1433 1502
1434/* internal work items */ 1503/* internal work items */
1435void ieee80211_work_init(struct ieee80211_local *local); 1504void ieee80211_work_init(struct ieee80211_local *local);
1436void ieee80211_add_work(struct ieee80211_work *wk); 1505void ieee80211_add_work(struct ieee80211_work *wk);
1437void free_work(struct ieee80211_work *wk); 1506void free_work(struct ieee80211_work *wk);
1438void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata); 1507void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
1439ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1440 struct sk_buff *skb);
1441int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata, 1508int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1442 struct ieee80211_channel *chan, 1509 struct ieee80211_channel *chan,
1443 enum nl80211_channel_type channel_type, 1510 enum nl80211_channel_type channel_type,
@@ -1459,7 +1526,7 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
1459 struct ieee80211_sub_if_data *sdata, 1526 struct ieee80211_sub_if_data *sdata,
1460 enum nl80211_channel_type chantype); 1527 enum nl80211_channel_type chantype);
1461enum nl80211_channel_type 1528enum nl80211_channel_type
1462ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info); 1529ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper);
1463 1530
1464#ifdef CONFIG_MAC80211_NOINLINE 1531#ifdef CONFIG_MAC80211_NOINLINE
1465#define debug_noinline noinline 1532#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8e2137bd87e2..d4c19a7773db 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -127,7 +127,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
127 * The remaining checks are only performed for interfaces 127 * The remaining checks are only performed for interfaces
128 * with the same MAC address. 128 * with the same MAC address.
129 */ 129 */
130 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) 130 if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr))
131 continue; 131 continue;
132 132
133 /* 133 /*
@@ -149,6 +149,35 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
149 return 0; 149 return 0;
150} 150}
151 151
152static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
153{
154 int n_queues = sdata->local->hw.queues;
155 int i;
156
157 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
158 if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
159 IEEE80211_INVAL_HW_QUEUE))
160 return -EINVAL;
161 if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
162 n_queues))
163 return -EINVAL;
164 }
165
166 if ((sdata->vif.type != NL80211_IFTYPE_AP) ||
167 !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
168 sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
169 return 0;
170 }
171
172 if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE))
173 return -EINVAL;
174
175 if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues))
176 return -EINVAL;
177
178 return 0;
179}
180
152void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, 181void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
153 const int offset) 182 const int offset)
154{ 183{
@@ -169,6 +198,83 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
169#undef ADJUST 198#undef ADJUST
170} 199}
171 200
201static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
202{
203 struct ieee80211_local *local = sdata->local;
204 int i;
205
206 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
207 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
208 sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
209 else if (local->hw.queues >= IEEE80211_NUM_ACS)
210 sdata->vif.hw_queue[i] = i;
211 else
212 sdata->vif.hw_queue[i] = 0;
213 }
214 sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
215}
216
217static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
218{
219 struct ieee80211_sub_if_data *sdata;
220 int ret;
221
222 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
223 return 0;
224
225 if (local->monitor_sdata)
226 return 0;
227
228 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
229 if (!sdata)
230 return -ENOMEM;
231
232 /* set up data */
233 sdata->local = local;
234 sdata->vif.type = NL80211_IFTYPE_MONITOR;
235 snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
236 wiphy_name(local->hw.wiphy));
237
238 ieee80211_set_default_queues(sdata);
239
240 ret = drv_add_interface(local, sdata);
241 if (WARN_ON(ret)) {
242 /* ok .. stupid driver, it asked for this! */
243 kfree(sdata);
244 return ret;
245 }
246
247 ret = ieee80211_check_queues(sdata);
248 if (ret) {
249 kfree(sdata);
250 return ret;
251 }
252
253 rcu_assign_pointer(local->monitor_sdata, sdata);
254
255 return 0;
256}
257
258static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
259{
260 struct ieee80211_sub_if_data *sdata;
261
262 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
263 return;
264
265 sdata = rtnl_dereference(local->monitor_sdata);
266
267 if (!sdata)
268 return;
269
270 rcu_assign_pointer(local->monitor_sdata, NULL);
271 synchronize_net();
272
273 drv_remove_interface(local, sdata);
274
275 kfree(sdata);
276}
277
172/* 278/*
173 * NOTE: Be very careful when changing this function, it must NOT return 279 * NOTE: Be very careful when changing this function, it must NOT return
174 * an error on interface type changes that have been pre-checked, so most 280 * an error on interface type changes that have been pre-checked, so most
@@ -246,15 +352,18 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
246 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); 352 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
247 353
248 if (!is_valid_ether_addr(dev->dev_addr)) { 354 if (!is_valid_ether_addr(dev->dev_addr)) {
249 if (!local->open_count) 355 res = -EADDRNOTAVAIL;
250 drv_stop(local); 356 goto err_stop;
251 return -EADDRNOTAVAIL;
252 } 357 }
253 } 358 }
254 359
255 switch (sdata->vif.type) { 360 switch (sdata->vif.type) {
256 case NL80211_IFTYPE_AP_VLAN: 361 case NL80211_IFTYPE_AP_VLAN:
257 /* no need to tell driver */ 362 /* no need to tell driver, but set carrier */
363 if (rtnl_dereference(sdata->bss->beacon))
364 netif_carrier_on(dev);
365 else
366 netif_carrier_off(dev);
258 break; 367 break;
259 case NL80211_IFTYPE_MONITOR: 368 case NL80211_IFTYPE_MONITOR:
260 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { 369 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -262,6 +371,12 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
262 break; 371 break;
263 } 372 }
264 373
374 if (local->monitors == 0 && local->open_count == 0) {
375 res = ieee80211_add_virtual_monitor(local);
376 if (res)
377 goto err_stop;
378 }
379
265 /* must be before the call to ieee80211_configure_filter */ 380 /* must be before the call to ieee80211_configure_filter */
266 local->monitors++; 381 local->monitors++;
267 if (local->monitors == 1) { 382 if (local->monitors == 1) {
@@ -276,9 +391,14 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
276 break; 391 break;
277 default: 392 default:
278 if (coming_up) { 393 if (coming_up) {
394 ieee80211_del_virtual_monitor(local);
395
279 res = drv_add_interface(local, sdata); 396 res = drv_add_interface(local, sdata);
280 if (res) 397 if (res)
281 goto err_stop; 398 goto err_stop;
399 res = ieee80211_check_queues(sdata);
400 if (res)
401 goto err_del_interface;
282 } 402 }
283 403
284 if (sdata->vif.type == NL80211_IFTYPE_AP) { 404 if (sdata->vif.type == NL80211_IFTYPE_AP) {
@@ -294,7 +414,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
294 ieee80211_bss_info_change_notify(sdata, changed); 414 ieee80211_bss_info_change_notify(sdata, changed);
295 415
296 if (sdata->vif.type == NL80211_IFTYPE_STATION || 416 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
297 sdata->vif.type == NL80211_IFTYPE_ADHOC) 417 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
418 sdata->vif.type == NL80211_IFTYPE_AP)
298 netif_carrier_off(dev); 419 netif_carrier_off(dev);
299 else 420 else
300 netif_carrier_on(dev); 421 netif_carrier_on(dev);
@@ -304,7 +425,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
304 * need to initialise the hardware if the hardware 425 * need to initialise the hardware if the hardware
305 * doesn't start up with sane defaults 426 * doesn't start up with sane defaults
306 */ 427 */
307 ieee80211_set_wmm_default(sdata); 428 ieee80211_set_wmm_default(sdata, true);
308 } 429 }
309 430
310 set_bit(SDATA_STATE_RUNNING, &sdata->state); 431 set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -318,9 +439,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
318 goto err_del_interface; 439 goto err_del_interface;
319 } 440 }
320 441
321 sta_info_move_state(sta, IEEE80211_STA_AUTH); 442 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
322 sta_info_move_state(sta, IEEE80211_STA_ASSOC); 443 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
323 sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 444 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
324 445
325 res = sta_info_insert(sta); 446 res = sta_info_insert(sta);
326 if (res) { 447 if (res) {
@@ -366,6 +487,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
366 sdata->bss = NULL; 487 sdata->bss = NULL;
367 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 488 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
368 list_del(&sdata->u.vlan.list); 489 list_del(&sdata->u.vlan.list);
490 /* might already be clear but that doesn't matter */
369 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 491 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
370 return res; 492 return res;
371} 493}
@@ -486,6 +608,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
486 /* free all potentially still buffered bcast frames */ 608 /* free all potentially still buffered bcast frames */
487 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf); 609 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
488 skb_queue_purge(&sdata->u.ap.ps_bc_buf); 610 skb_queue_purge(&sdata->u.ap.ps_bc_buf);
611 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
612 ieee80211_mgd_stop(sdata);
489 } 613 }
490 614
491 if (going_down) 615 if (going_down)
@@ -506,6 +630,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
506 if (local->monitors == 0) { 630 if (local->monitors == 0) {
507 local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; 631 local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
508 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; 632 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
633 ieee80211_del_virtual_monitor(local);
509 } 634 }
510 635
511 ieee80211_adjust_monitor_flags(sdata, -1); 636 ieee80211_adjust_monitor_flags(sdata, -1);
@@ -579,6 +704,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
579 } 704 }
580 } 705 }
581 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 706 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
707
708 if (local->monitors == local->open_count && local->monitors > 0)
709 ieee80211_add_virtual_monitor(local);
582} 710}
583 711
584static int ieee80211_stop(struct net_device *dev) 712static int ieee80211_stop(struct net_device *dev)
@@ -674,7 +802,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
674 struct ieee80211_hdr *hdr; 802 struct ieee80211_hdr *hdr;
675 struct ieee80211_radiotap_header *rtap = (void *)skb->data; 803 struct ieee80211_radiotap_header *rtap = (void *)skb->data;
676 804
677 if (local->hw.queues < 4) 805 if (local->hw.queues < IEEE80211_NUM_ACS)
678 return 0; 806 return 0;
679 807
680 if (skb->len < 4 || 808 if (skb->len < 4 ||
@@ -905,6 +1033,18 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
905 ieee80211_debugfs_add_netdev(sdata); 1033 ieee80211_debugfs_add_netdev(sdata);
906} 1034}
907 1035
1036static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
1037{
1038 switch (sdata->vif.type) {
1039 case NL80211_IFTYPE_MESH_POINT:
1040 mesh_path_flush_by_iface(sdata);
1041 break;
1042
1043 default:
1044 break;
1045 }
1046}
1047
908static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, 1048static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
909 enum nl80211_iftype type) 1049 enum nl80211_iftype type)
910{ 1050{
@@ -968,6 +1108,13 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
968 if (ret) 1108 if (ret)
969 type = sdata->vif.type; 1109 type = sdata->vif.type;
970 1110
1111 /*
1112 * Ignore return value here, there's not much we can do since
1113 * the driver changed the interface type internally already.
1114 * The warnings will hopefully make driver authors fix it :-)
1115 */
1116 ieee80211_check_queues(sdata);
1117
971 ieee80211_setup_sdata(sdata, type); 1118 ieee80211_setup_sdata(sdata, type);
972 1119
973 err = ieee80211_do_open(sdata->dev, false); 1120 err = ieee80211_do_open(sdata->dev, false);
@@ -1131,11 +1278,15 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1131 struct net_device *ndev; 1278 struct net_device *ndev;
1132 struct ieee80211_sub_if_data *sdata = NULL; 1279 struct ieee80211_sub_if_data *sdata = NULL;
1133 int ret, i; 1280 int ret, i;
1281 int txqs = 1;
1134 1282
1135 ASSERT_RTNL(); 1283 ASSERT_RTNL();
1136 1284
1285 if (local->hw.queues >= IEEE80211_NUM_ACS)
1286 txqs = IEEE80211_NUM_ACS;
1287
1137 ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, 1288 ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
1138 name, ieee80211_if_setup, local->hw.queues, 1); 1289 name, ieee80211_if_setup, txqs, 1);
1139 if (!ndev) 1290 if (!ndev)
1140 return -ENOMEM; 1291 return -ENOMEM;
1141 dev_net_set(ndev, wiphy_net(local->hw.wiphy)); 1292 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -1181,8 +1332,17 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1181 sband = local->hw.wiphy->bands[i]; 1332 sband = local->hw.wiphy->bands[i];
1182 sdata->rc_rateidx_mask[i] = 1333 sdata->rc_rateidx_mask[i] =
1183 sband ? (1 << sband->n_bitrates) - 1 : 0; 1334 sband ? (1 << sband->n_bitrates) - 1 : 0;
1335 if (sband)
1336 memcpy(sdata->rc_rateidx_mcs_mask[i],
1337 sband->ht_cap.mcs.rx_mask,
1338 sizeof(sdata->rc_rateidx_mcs_mask[i]));
1339 else
1340 memset(sdata->rc_rateidx_mcs_mask[i], 0,
1341 sizeof(sdata->rc_rateidx_mcs_mask[i]));
1184 } 1342 }
1185 1343
1344 ieee80211_set_default_queues(sdata);
1345
1186 /* setup type-dependent data */ 1346 /* setup type-dependent data */
1187 ieee80211_setup_sdata(sdata, type); 1347 ieee80211_setup_sdata(sdata, type);
1188 1348
@@ -1218,8 +1378,8 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1218 list_del_rcu(&sdata->list); 1378 list_del_rcu(&sdata->list);
1219 mutex_unlock(&sdata->local->iflist_mtx); 1379 mutex_unlock(&sdata->local->iflist_mtx);
1220 1380
1221 if (ieee80211_vif_is_mesh(&sdata->vif)) 1381 /* clean up type-dependent data */
1222 mesh_path_flush_by_iface(sdata); 1382 ieee80211_clean_sdata(sdata);
1223 1383
1224 synchronize_rcu(); 1384 synchronize_rcu();
1225 unregister_netdevice(sdata->dev); 1385 unregister_netdevice(sdata->dev);
@@ -1240,8 +1400,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1240 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1400 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1241 list_del(&sdata->list); 1401 list_del(&sdata->list);
1242 1402
1243 if (ieee80211_vif_is_mesh(&sdata->vif)) 1403 ieee80211_clean_sdata(sdata);
1244 mesh_path_flush_by_iface(sdata);
1245 1404
1246 unregister_netdevice_queue(sdata->dev, &unreg_list); 1405 unregister_netdevice_queue(sdata->dev, &unreg_list);
1247 } 1406 }
@@ -1303,7 +1462,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1303 1462
1304 /* do not count disabled managed interfaces */ 1463 /* do not count disabled managed interfaces */
1305 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1464 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1306 !sdata->u.mgd.associated) { 1465 !sdata->u.mgd.associated &&
1466 !sdata->u.mgd.auth_data &&
1467 !sdata->u.mgd.assoc_data) {
1307 sdata->vif.bss_conf.idle = true; 1468 sdata->vif.bss_conf.idle = true;
1308 continue; 1469 continue;
1309 } 1470 }
@@ -1323,7 +1484,8 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1323 wk->sdata->vif.bss_conf.idle = false; 1484 wk->sdata->vif.bss_conf.idle = false;
1324 } 1485 }
1325 1486
1326 if (local->scan_sdata) { 1487 if (local->scan_sdata &&
1488 !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
1327 scanning = true; 1489 scanning = true;
1328 local->scan_sdata->vif.bss_conf.idle = false; 1490 local->scan_sdata->vif.bss_conf.idle = false;
1329 } 1491 }
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 87a89741432d..5bb600d93d77 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/export.h> 18#include <linux/export.h>
19#include <net/mac80211.h> 19#include <net/mac80211.h>
20#include <asm/unaligned.h>
20#include "ieee80211_i.h" 21#include "ieee80211_i.h"
21#include "driver-ops.h" 22#include "driver-ops.h"
22#include "debugfs_key.h" 23#include "debugfs_key.h"
@@ -54,14 +55,6 @@ static void assert_key_lock(struct ieee80211_local *local)
54 lockdep_assert_held(&local->key_mtx); 55 lockdep_assert_held(&local->key_mtx);
55} 56}
56 57
57static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
58{
59 if (key->sta)
60 return &key->sta->sta;
61
62 return NULL;
63}
64
65static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) 58static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
66{ 59{
67 /* 60 /*
@@ -95,7 +88,7 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
95static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) 88static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
96{ 89{
97 struct ieee80211_sub_if_data *sdata; 90 struct ieee80211_sub_if_data *sdata;
98 struct ieee80211_sta *sta; 91 struct sta_info *sta;
99 int ret; 92 int ret;
100 93
101 might_sleep(); 94 might_sleep();
@@ -105,7 +98,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
105 98
106 assert_key_lock(key->local); 99 assert_key_lock(key->local);
107 100
108 sta = get_sta_for_key(key); 101 sta = key->sta;
109 102
110 /* 103 /*
111 * If this is a per-STA GTK, check if it 104 * If this is a per-STA GTK, check if it
@@ -115,6 +108,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
115 !(key->local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)) 108 !(key->local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK))
116 goto out_unsupported; 109 goto out_unsupported;
117 110
111 if (sta && !sta->uploaded)
112 goto out_unsupported;
113
118 sdata = key->sdata; 114 sdata = key->sdata;
119 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 115 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
120 /* 116 /*
@@ -123,12 +119,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
123 */ 119 */
124 if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) 120 if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
125 goto out_unsupported; 121 goto out_unsupported;
126 sdata = container_of(sdata->bss,
127 struct ieee80211_sub_if_data,
128 u.ap);
129 } 122 }
130 123
131 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); 124 ret = drv_set_key(key->local, SET_KEY, sdata,
125 sta ? &sta->sta : NULL, &key->conf);
132 126
133 if (!ret) { 127 if (!ret) {
134 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 128 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
@@ -147,7 +141,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
147 if (ret != -ENOSPC && ret != -EOPNOTSUPP) 141 if (ret != -ENOSPC && ret != -EOPNOTSUPP)
148 wiphy_err(key->local->hw.wiphy, 142 wiphy_err(key->local->hw.wiphy,
149 "failed to set key (%d, %pM) to hardware (%d)\n", 143 "failed to set key (%d, %pM) to hardware (%d)\n",
150 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 144 key->conf.keyidx,
145 sta ? sta->sta.addr : bcast_addr, ret);
151 146
152 out_unsupported: 147 out_unsupported:
153 switch (key->conf.cipher) { 148 switch (key->conf.cipher) {
@@ -166,7 +161,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
166static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) 161static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
167{ 162{
168 struct ieee80211_sub_if_data *sdata; 163 struct ieee80211_sub_if_data *sdata;
169 struct ieee80211_sta *sta; 164 struct sta_info *sta;
170 int ret; 165 int ret;
171 166
172 might_sleep(); 167 might_sleep();
@@ -179,7 +174,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
179 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 174 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
180 return; 175 return;
181 176
182 sta = get_sta_for_key(key); 177 sta = key->sta;
183 sdata = key->sdata; 178 sdata = key->sdata;
184 179
185 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 180 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
@@ -187,18 +182,14 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
187 (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) 182 (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
188 increment_tailroom_need_count(sdata); 183 increment_tailroom_need_count(sdata);
189 184
190 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
191 sdata = container_of(sdata->bss,
192 struct ieee80211_sub_if_data,
193 u.ap);
194
195 ret = drv_set_key(key->local, DISABLE_KEY, sdata, 185 ret = drv_set_key(key->local, DISABLE_KEY, sdata,
196 sta, &key->conf); 186 sta ? &sta->sta : NULL, &key->conf);
197 187
198 if (ret) 188 if (ret)
199 wiphy_err(key->local->hw.wiphy, 189 wiphy_err(key->local->hw.wiphy,
200 "failed to remove key (%d, %pM) from hardware (%d)\n", 190 "failed to remove key (%d, %pM) from hardware (%d)\n",
201 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 191 key->conf.keyidx,
192 sta ? sta->sta.addr : bcast_addr, ret);
202 193
203 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 194 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
204} 195}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b142bd4c2390..f5548e953259 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -47,7 +47,8 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
47 if (atomic_read(&local->iff_allmultis)) 47 if (atomic_read(&local->iff_allmultis))
48 new_flags |= FIF_ALLMULTI; 48 new_flags |= FIF_ALLMULTI;
49 49
50 if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning)) 50 if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) ||
51 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning))
51 new_flags |= FIF_BCN_PRBRESP_PROMISC; 52 new_flags |= FIF_BCN_PRBRESP_PROMISC;
52 53
53 if (local->fif_probe_req || local->probe_req_reg) 54 if (local->fif_probe_req || local->probe_req_reg)
@@ -102,9 +103,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
102 103
103 might_sleep(); 104 might_sleep();
104 105
105 /* If this off-channel logic ever changes, ieee80211_on_oper_channel
106 * may need to change as well.
107 */
108 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; 106 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
109 if (local->scan_channel) { 107 if (local->scan_channel) {
110 chan = local->scan_channel; 108 chan = local->scan_channel;
@@ -151,11 +149,13 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
151 } 149 }
152 150
153 if (test_bit(SCAN_SW_SCANNING, &local->scanning) || 151 if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
152 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
154 test_bit(SCAN_HW_SCANNING, &local->scanning)) 153 test_bit(SCAN_HW_SCANNING, &local->scanning))
155 power = chan->max_power; 154 power = chan->max_power;
156 else 155 else
157 power = local->power_constr_level ? 156 power = local->power_constr_level ?
158 (chan->max_power - local->power_constr_level) : 157 min(chan->max_power,
158 (chan->max_reg_power - local->power_constr_level)) :
159 chan->max_power; 159 chan->max_power;
160 160
161 if (local->user_power_level >= 0) 161 if (local->user_power_level >= 0)
@@ -198,15 +198,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
198 return; 198 return;
199 199
200 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 200 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
201 /* 201 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
202 * While not associated, claim a BSSID of all-zeroes
203 * so that drivers don't do any weird things with the
204 * BSSID at that time.
205 */
206 if (sdata->vif.bss_conf.assoc)
207 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
208 else
209 sdata->vif.bss_conf.bssid = zero;
210 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 202 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
211 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; 203 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
212 else if (sdata->vif.type == NL80211_IFTYPE_AP) 204 else if (sdata->vif.type == NL80211_IFTYPE_AP)
@@ -293,11 +285,11 @@ static void ieee80211_tasklet_handler(unsigned long data)
293 /* Clear skb->pkt_type in order to not confuse kernel 285 /* Clear skb->pkt_type in order to not confuse kernel
294 * netstack. */ 286 * netstack. */
295 skb->pkt_type = 0; 287 skb->pkt_type = 0;
296 ieee80211_rx(local_to_hw(local), skb); 288 ieee80211_rx(&local->hw, skb);
297 break; 289 break;
298 case IEEE80211_TX_STATUS_MSG: 290 case IEEE80211_TX_STATUS_MSG:
299 skb->pkt_type = 0; 291 skb->pkt_type = 0;
300 ieee80211_tx_status(local_to_hw(local), skb); 292 ieee80211_tx_status(&local->hw, skb);
301 break; 293 break;
302 case IEEE80211_EOSP_MSG: 294 case IEEE80211_EOSP_MSG:
303 eosp_data = (void *)skb->cb; 295 eosp_data = (void *)skb->cb;
@@ -534,6 +526,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
534 int priv_size, i; 526 int priv_size, i;
535 struct wiphy *wiphy; 527 struct wiphy *wiphy;
536 528
529 if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
530 return NULL;
531
537 /* Ensure 32-byte alignment of our private data and hw private data. 532 /* Ensure 32-byte alignment of our private data and hw private data.
538 * We use the wiphy priv data for both our ieee80211_local and for 533 * We use the wiphy priv data for both our ieee80211_local and for
539 * the driver's private data 534 * the driver's private data
@@ -564,8 +559,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
564 WIPHY_FLAG_4ADDR_AP | 559 WIPHY_FLAG_4ADDR_AP |
565 WIPHY_FLAG_4ADDR_STATION | 560 WIPHY_FLAG_4ADDR_STATION |
566 WIPHY_FLAG_REPORTS_OBSS | 561 WIPHY_FLAG_REPORTS_OBSS |
567 WIPHY_FLAG_OFFCHAN_TX | 562 WIPHY_FLAG_OFFCHAN_TX;
568 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 563
564 if (ops->remain_on_channel)
565 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
569 566
570 wiphy->features = NL80211_FEATURE_SK_TX_STATUS | 567 wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
571 NL80211_FEATURE_HT_IBSS; 568 NL80211_FEATURE_HT_IBSS;
@@ -596,11 +593,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
596 local->hw.max_report_rates = 0; 593 local->hw.max_report_rates = 0;
597 local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; 594 local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
598 local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; 595 local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
596 local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
599 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 597 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
600 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 598 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
599 local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
600 IEEE80211_RADIOTAP_MCS_HAVE_GI |
601 IEEE80211_RADIOTAP_MCS_HAVE_BW;
601 local->user_power_level = -1; 602 local->user_power_level = -1;
602 local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
603 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
604 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; 603 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
605 604
606 INIT_LIST_HEAD(&local->interfaces); 605 INIT_LIST_HEAD(&local->interfaces);
@@ -672,7 +671,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
672 671
673 ieee80211_hw_roc_setup(local); 672 ieee80211_hw_roc_setup(local);
674 673
675 return local_to_hw(local); 674 return &local->hw;
676} 675}
677EXPORT_SYMBOL(ieee80211_alloc_hw); 676EXPORT_SYMBOL(ieee80211_alloc_hw);
678 677
@@ -694,6 +693,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
694 WLAN_CIPHER_SUITE_AES_CMAC 693 WLAN_CIPHER_SUITE_AES_CMAC
695 }; 694 };
696 695
696 if (hw->flags & IEEE80211_HW_QUEUE_CONTROL &&
697 (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE ||
698 local->hw.offchannel_tx_hw_queue >= local->hw.queues))
699 return -EINVAL;
700
697 if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) 701 if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
698#ifdef CONFIG_PM 702#ifdef CONFIG_PM
699 && (!local->ops->suspend || !local->ops->resume) 703 && (!local->ops->suspend || !local->ops->resume)
@@ -701,6 +705,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
701 ) 705 )
702 return -EINVAL; 706 return -EINVAL;
703 707
708 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
709 return -EINVAL;
710
704 if (hw->max_report_rates == 0) 711 if (hw->max_report_rates == 0)
705 hw->max_report_rates = hw->max_rates; 712 hw->max_report_rates = hw->max_rates;
706 713
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c707c8bf6d2c..2913113c5833 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,9 +13,6 @@
13#include "ieee80211_i.h" 13#include "ieee80211_i.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
17#define MESHCONF_CAPAB_FORWARDING 0x08
18
19#define TMR_RUNNING_HK 0 16#define TMR_RUNNING_HK 0
20#define TMR_RUNNING_MP 1 17#define TMR_RUNNING_MP 1
21#define TMR_RUNNING_MPR 2 18#define TMR_RUNNING_MPR 2
@@ -67,16 +64,19 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
67/** 64/**
68 * mesh_matches_local - check if the config of a mesh point matches ours 65 * mesh_matches_local - check if the config of a mesh point matches ours
69 * 66 *
70 * @ie: information elements of a management frame from the mesh peer
71 * @sdata: local mesh subif 67 * @sdata: local mesh subif
68 * @ie: information elements of a management frame from the mesh peer
72 * 69 *
73 * This function checks if the mesh configuration of a mesh point matches the 70 * This function checks if the mesh configuration of a mesh point matches the
74 * local mesh configuration, i.e. if both nodes belong to the same mesh network. 71 * local mesh configuration, i.e. if both nodes belong to the same mesh network.
75 */ 72 */
76bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) 73bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
74 struct ieee802_11_elems *ie)
77{ 75{
78 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 76 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
79 struct ieee80211_local *local = sdata->local; 77 struct ieee80211_local *local = sdata->local;
78 u32 basic_rates = 0;
79 enum nl80211_channel_type sta_channel_type = NL80211_CHAN_NO_HT;
80 80
81 /* 81 /*
82 * As support for each feature is added, check for matching 82 * As support for each feature is added, check for matching
@@ -97,10 +97,23 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
97 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) 97 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
98 goto mismatch; 98 goto mismatch;
99 99
100 /* disallow peering with mismatched channel types for now */ 100 ieee80211_sta_get_rates(local, ie, local->oper_channel->band,
101 if (ie->ht_info_elem && 101 &basic_rates);
102 (local->_oper_channel_type != 102
103 ieee80211_ht_info_to_channel_type(ie->ht_info_elem))) 103 if (sdata->vif.bss_conf.basic_rates != basic_rates)
104 goto mismatch;
105
106 if (ie->ht_operation)
107 sta_channel_type =
108 ieee80211_ht_oper_to_channel_type(ie->ht_operation);
109
110 /* Disallow HT40+/- mismatch */
111 if (ie->ht_operation &&
112 (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
113 local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
114 (sta_channel_type == NL80211_CHAN_HT40MINUS ||
115 sta_channel_type == NL80211_CHAN_HT40PLUS) &&
116 local->_oper_channel_type != sta_channel_type)
104 goto mismatch; 117 goto mismatch;
105 118
106 return true; 119 return true;
@@ -204,7 +217,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
204 kmem_cache_free(rm_cache, p); 217 kmem_cache_free(rm_cache, p);
205 --entries; 218 --entries;
206 } else if ((seqnum == p->seqnum) && 219 } else if ((seqnum == p->seqnum) &&
207 (memcmp(sa, p->sa, ETH_ALEN) == 0)) 220 (ether_addr_equal(sa, p->sa)))
208 return -1; 221 return -1;
209 } 222 }
210 223
@@ -251,8 +264,10 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
251 /* Mesh capability */ 264 /* Mesh capability */
252 ifmsh->accepting_plinks = mesh_plink_availables(sdata); 265 ifmsh->accepting_plinks = mesh_plink_availables(sdata);
253 *pos = MESHCONF_CAPAB_FORWARDING; 266 *pos = MESHCONF_CAPAB_FORWARDING;
254 *pos++ |= ifmsh->accepting_plinks ? 267 *pos |= ifmsh->accepting_plinks ?
255 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 268 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
269 *pos++ |= ifmsh->adjusting_tbtt ?
270 MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
256 *pos++ = 0x00; 271 *pos++ = 0x00;
257 272
258 return 0; 273 return 0;
@@ -371,7 +386,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
371 return 0; 386 return 0;
372} 387}
373 388
374int mesh_add_ht_info_ie(struct sk_buff *skb, 389int mesh_add_ht_oper_ie(struct sk_buff *skb,
375 struct ieee80211_sub_if_data *sdata) 390 struct ieee80211_sub_if_data *sdata)
376{ 391{
377 struct ieee80211_local *local = sdata->local; 392 struct ieee80211_local *local = sdata->local;
@@ -385,11 +400,12 @@ int mesh_add_ht_info_ie(struct sk_buff *skb,
385 if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) 400 if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
386 return 0; 401 return 0;
387 402
388 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info)) 403 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
389 return -ENOMEM; 404 return -ENOMEM;
390 405
391 pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info)); 406 pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
392 ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type); 407 ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type,
408 sdata->vif.bss_conf.ht_operation_mode);
393 409
394 return 0; 410 return 0;
395} 411}
@@ -573,14 +589,24 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
573 ieee80211_configure_filter(local); 589 ieee80211_configure_filter(local);
574 590
575 ifmsh->mesh_cc_id = 0; /* Disabled */ 591 ifmsh->mesh_cc_id = 0; /* Disabled */
576 ifmsh->mesh_sp_id = 0; /* Neighbor Offset */
577 ifmsh->mesh_auth_id = 0; /* Disabled */ 592 ifmsh->mesh_auth_id = 0; /* Disabled */
593 /* register sync ops from extensible synchronization framework */
594 ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
595 ifmsh->adjusting_tbtt = false;
596 ifmsh->sync_offset_clockdrift_max = 0;
578 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); 597 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
579 ieee80211_mesh_root_setup(ifmsh); 598 ieee80211_mesh_root_setup(ifmsh);
580 ieee80211_queue_work(&local->hw, &sdata->work); 599 ieee80211_queue_work(&local->hw, &sdata->work);
600 sdata->vif.bss_conf.ht_operation_mode =
601 ifmsh->mshcfg.ht_opmode;
581 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; 602 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
603 sdata->vif.bss_conf.basic_rates =
604 ieee80211_mandatory_rates(sdata->local,
605 sdata->local->hw.conf.channel->band);
582 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 606 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
583 BSS_CHANGED_BEACON_ENABLED | 607 BSS_CHANGED_BEACON_ENABLED |
608 BSS_CHANGED_HT |
609 BSS_CHANGED_BASIC_RATES |
584 BSS_CHANGED_BEACON_INT); 610 BSS_CHANGED_BEACON_INT);
585} 611}
586 612
@@ -616,16 +642,16 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
616 struct ieee80211_rx_status *rx_status) 642 struct ieee80211_rx_status *rx_status)
617{ 643{
618 struct ieee80211_local *local = sdata->local; 644 struct ieee80211_local *local = sdata->local;
645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
619 struct ieee802_11_elems elems; 646 struct ieee802_11_elems elems;
620 struct ieee80211_channel *channel; 647 struct ieee80211_channel *channel;
621 u32 supp_rates = 0;
622 size_t baselen; 648 size_t baselen;
623 int freq; 649 int freq;
624 enum ieee80211_band band = rx_status->band; 650 enum ieee80211_band band = rx_status->band;
625 651
626 /* ignore ProbeResp to foreign address */ 652 /* ignore ProbeResp to foreign address */
627 if (stype == IEEE80211_STYPE_PROBE_RESP && 653 if (stype == IEEE80211_STYPE_PROBE_RESP &&
628 compare_ether_addr(mgmt->da, sdata->vif.addr)) 654 !ether_addr_equal(mgmt->da, sdata->vif.addr))
629 return; 655 return;
630 656
631 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 657 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -650,10 +676,12 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
650 return; 676 return;
651 677
652 if (elems.mesh_id && elems.mesh_config && 678 if (elems.mesh_id && elems.mesh_config &&
653 mesh_matches_local(&elems, sdata)) { 679 mesh_matches_local(sdata, &elems))
654 supp_rates = ieee80211_sta_get_rates(local, &elems, band); 680 mesh_neighbour_update(sdata, mgmt->sa, &elems);
655 mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); 681
656 } 682 if (ifmsh->sync_ops)
683 ifmsh->sync_ops->rx_bcn_presp(sdata,
684 stype, mgmt, &elems, rx_status);
657} 685}
658 686
659static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, 687static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
@@ -721,6 +749,9 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
721 749
722 if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) 750 if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags))
723 ieee80211_mesh_rootpath(sdata); 751 ieee80211_mesh_rootpath(sdata);
752
753 if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
754 mesh_sync_adjust_tbtt(sdata);
724} 755}
725 756
726void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) 757void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
@@ -761,4 +792,5 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
761 (unsigned long) sdata); 792 (unsigned long) sdata);
762 INIT_LIST_HEAD(&ifmsh->preq_queue.list); 793 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
763 spin_lock_init(&ifmsh->mesh_preq_queue_lock); 794 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
795 spin_lock_init(&ifmsh->sync_offset_lock);
764} 796}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index bd14bd26a2b6..e3642756f8f4 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -13,13 +13,26 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/jhash.h> 15#include <linux/jhash.h>
16#include <asm/unaligned.h>
17#include "ieee80211_i.h" 16#include "ieee80211_i.h"
18 17
19 18
20/* Data structures */ 19/* Data structures */
21 20
22/** 21/**
22 * enum mesh_config_capab_flags - mesh config IE capability flags
23 *
24 * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
25 * additional mesh peerings with other mesh STAs
26 * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
27 * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing
28 */
29enum mesh_config_capab_flags {
30 MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0),
31 MESHCONF_CAPAB_FORWARDING = BIT(3),
32 MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5),
33};
34
35/**
23 * enum mesh_path_flags - mac80211 mesh path flags 36 * enum mesh_path_flags - mac80211 mesh path flags
24 * 37 *
25 * 38 *
@@ -57,12 +70,15 @@ enum mesh_path_flags {
57 * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to 70 * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
58 * grow 71 * grow
59 * @MESH_WORK_ROOT: the mesh root station needs to send a frame 72 * @MESH_WORK_ROOT: the mesh root station needs to send a frame
73 * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
74 * mesh nodes
60 */ 75 */
61enum mesh_deferred_task_flags { 76enum mesh_deferred_task_flags {
62 MESH_WORK_HOUSEKEEPING, 77 MESH_WORK_HOUSEKEEPING,
63 MESH_WORK_GROW_MPATH_TABLE, 78 MESH_WORK_GROW_MPATH_TABLE,
64 MESH_WORK_GROW_MPP_TABLE, 79 MESH_WORK_GROW_MPP_TABLE,
65 MESH_WORK_ROOT, 80 MESH_WORK_ROOT,
81 MESH_WORK_DRIFT_ADJUST,
66}; 82};
67 83
68/** 84/**
@@ -86,6 +102,9 @@ enum mesh_deferred_task_flags {
86 * @state_lock: mesh path state lock used to protect changes to the 102 * @state_lock: mesh path state lock used to protect changes to the
87 * mpath itself. No need to take this lock when adding or removing 103 * mpath itself. No need to take this lock when adding or removing
88 * an mpath to a hash bucket on a path table. 104 * an mpath to a hash bucket on a path table.
105 * @rann_snd_addr: the RANN sender address
106 * @rann_metric: the aggregated path metric towards the root node
107 * @is_root: the destination station of this path is a root node
89 * @is_gate: the destination station of this path is a mesh gate 108 * @is_gate: the destination station of this path is a mesh gate
90 * 109 *
91 * 110 *
@@ -110,6 +129,9 @@ struct mesh_path {
110 u8 discovery_retries; 129 u8 discovery_retries;
111 enum mesh_path_flags flags; 130 enum mesh_path_flags flags;
112 spinlock_t state_lock; 131 spinlock_t state_lock;
132 u8 rann_snd_addr[ETH_ALEN];
133 u32 rann_metric;
134 bool is_root;
113 bool is_gate; 135 bool is_gate;
114}; 136};
115 137
@@ -200,8 +222,8 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
200 char *addr6); 222 char *addr6);
201int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 223int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
202 struct ieee80211_sub_if_data *sdata); 224 struct ieee80211_sub_if_data *sdata);
203bool mesh_matches_local(struct ieee802_11_elems *ie, 225bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
204 struct ieee80211_sub_if_data *sdata); 226 struct ieee802_11_elems *ie);
205void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); 227void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
206void mesh_mgmt_ies_add(struct sk_buff *skb, 228void mesh_mgmt_ies_add(struct sk_buff *skb,
207 struct ieee80211_sub_if_data *sdata); 229 struct ieee80211_sub_if_data *sdata);
@@ -217,7 +239,7 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
217 struct ieee80211_sub_if_data *sdata); 239 struct ieee80211_sub_if_data *sdata);
218int mesh_add_ht_cap_ie(struct sk_buff *skb, 240int mesh_add_ht_cap_ie(struct sk_buff *skb,
219 struct ieee80211_sub_if_data *sdata); 241 struct ieee80211_sub_if_data *sdata);
220int mesh_add_ht_info_ie(struct sk_buff *skb, 242int mesh_add_ht_oper_ie(struct sk_buff *skb,
221 struct ieee80211_sub_if_data *sdata); 243 struct ieee80211_sub_if_data *sdata);
222void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); 244void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
223int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 245int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
@@ -229,6 +251,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
229void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 251void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
230void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); 252void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
231void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); 253void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
254struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
232 255
233/* Mesh paths */ 256/* Mesh paths */
234int mesh_nexthop_lookup(struct sk_buff *skb, 257int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -253,9 +276,9 @@ int mesh_path_add_gate(struct mesh_path *mpath);
253int mesh_path_send_to_gates(struct mesh_path *mpath); 276int mesh_path_send_to_gates(struct mesh_path *mpath);
254int mesh_gate_num(struct ieee80211_sub_if_data *sdata); 277int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
255/* Mesh plinks */ 278/* Mesh plinks */
256void mesh_neighbour_update(u8 *hw_addr, u32 rates, 279void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
257 struct ieee80211_sub_if_data *sdata, 280 u8 *hw_addr,
258 struct ieee802_11_elems *ie); 281 struct ieee802_11_elems *ie);
259bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 282bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
260void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 283void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
261void mesh_plink_broken(struct sta_info *sta); 284void mesh_plink_broken(struct sta_info *sta);
@@ -281,7 +304,6 @@ void mesh_pathtbl_unregister(void);
281int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); 304int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
282void mesh_path_timer(unsigned long data); 305void mesh_path_timer(unsigned long data);
283void mesh_path_flush_by_nexthop(struct sta_info *sta); 306void mesh_path_flush_by_nexthop(struct sta_info *sta);
284void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
285void mesh_path_discard_frame(struct sk_buff *skb, 307void mesh_path_discard_frame(struct sk_buff *skb,
286 struct ieee80211_sub_if_data *sdata); 308 struct ieee80211_sub_if_data *sdata);
287void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); 309void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
@@ -322,6 +344,8 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
322void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); 344void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
323void mesh_plink_quiesce(struct sta_info *sta); 345void mesh_plink_quiesce(struct sta_info *sta);
324void mesh_plink_restart(struct sta_info *sta); 346void mesh_plink_restart(struct sta_info *sta);
347void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
348void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
325#else 349#else
326#define mesh_allocated 0 350#define mesh_allocated 0
327static inline void 351static inline void
@@ -334,6 +358,8 @@ static inline void mesh_plink_quiesce(struct sta_info *sta) {}
334static inline void mesh_plink_restart(struct sta_info *sta) {} 358static inline void mesh_plink_restart(struct sta_info *sta) {}
335static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) 359static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
336{ return false; } 360{ return false; }
361static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
362{}
337#endif 363#endif
338 364
339#endif /* IEEE80211S_H */ 365#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 54df1b2bafd2..9b59658e8650 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -8,6 +8,8 @@
8 */ 8 */
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/etherdevice.h>
12#include <asm/unaligned.h>
11#include "wme.h" 13#include "wme.h"
12#include "mesh.h" 14#include "mesh.h"
13 15
@@ -84,8 +86,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
84#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0) 86#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0)
85 87
86#define MSEC_TO_TU(x) (x*1000/1024) 88#define MSEC_TO_TU(x) (x*1000/1024)
87#define SN_GT(x, y) ((long) (y) - (long) (x) < 0) 89#define SN_GT(x, y) ((s32)(y - x) < 0)
88#define SN_LT(x, y) ((long) (x) - (long) (y) < 0) 90#define SN_LT(x, y) ((s32)(x - y) < 0)
89 91
90#define net_traversal_jiffies(s) \ 92#define net_traversal_jiffies(s) \
91 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) 93 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -322,6 +324,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
322 struct sta_info *sta) 324 struct sta_info *sta)
323{ 325{
324 struct ieee80211_supported_band *sband; 326 struct ieee80211_supported_band *sband;
327 struct rate_info rinfo;
325 /* This should be adjusted for each device */ 328 /* This should be adjusted for each device */
326 int device_constant = 1 << ARITH_SHIFT; 329 int device_constant = 1 << ARITH_SHIFT;
327 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT; 330 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
@@ -335,7 +338,9 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
335 if (sta->fail_avg >= 100) 338 if (sta->fail_avg >= 100)
336 return MAX_METRIC; 339 return MAX_METRIC;
337 340
338 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS) 341 sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
342 rate = cfg80211_calculate_bitrate(&rinfo);
343 if (WARN_ON(!rate))
339 return MAX_METRIC; 344 return MAX_METRIC;
340 345
341 err = (sta->fail_avg << ARITH_SHIFT) / 100; 346 err = (sta->fail_avg << ARITH_SHIFT) / 100;
@@ -343,7 +348,6 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
343 /* bitrate is in units of 100 Kbps, while we need rate in units of 348 /* bitrate is in units of 100 Kbps, while we need rate in units of
344 * 1Mbps. This will be corrected on tx_time computation. 349 * 1Mbps. This will be corrected on tx_time computation.
345 */ 350 */
346 rate = sband->bitrates[sta->last_tx_rate.idx].bitrate;
347 tx_time = (device_constant + 10 * test_frame_len / rate); 351 tx_time = (device_constant + 10 * test_frame_len / rate);
348 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); 352 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
349 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; 353 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
@@ -418,7 +422,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
418 new_metric = MAX_METRIC; 422 new_metric = MAX_METRIC;
419 exp_time = TU_TO_EXP_TIME(orig_lifetime); 423 exp_time = TU_TO_EXP_TIME(orig_lifetime);
420 424
421 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) { 425 if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
422 /* This MP is the originator, we are not interested in this 426 /* This MP is the originator, we are not interested in this
423 * frame, except for updating transmitter's path info. 427 * frame, except for updating transmitter's path info.
424 */ 428 */
@@ -468,7 +472,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
468 472
469 /* Update and check transmitter routing info */ 473 /* Update and check transmitter routing info */
470 ta = mgmt->sa; 474 ta = mgmt->sa;
471 if (memcmp(orig_addr, ta, ETH_ALEN) == 0) 475 if (ether_addr_equal(orig_addr, ta))
472 fresh_info = false; 476 fresh_info = false;
473 else { 477 else {
474 fresh_info = true; 478 fresh_info = true;
@@ -512,8 +516,9 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
512 u8 *preq_elem, u32 metric) 516 u8 *preq_elem, u32 metric)
513{ 517{
514 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 518 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
515 struct mesh_path *mpath; 519 struct mesh_path *mpath = NULL;
516 u8 *target_addr, *orig_addr; 520 u8 *target_addr, *orig_addr;
521 const u8 *da;
517 u8 target_flags, ttl; 522 u8 target_flags, ttl;
518 u32 orig_sn, target_sn, lifetime; 523 u32 orig_sn, target_sn, lifetime;
519 bool reply = false; 524 bool reply = false;
@@ -528,7 +533,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
528 533
529 mhwmp_dbg("received PREQ from %pM", orig_addr); 534 mhwmp_dbg("received PREQ from %pM", orig_addr);
530 535
531 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) { 536 if (ether_addr_equal(target_addr, sdata->vif.addr)) {
532 mhwmp_dbg("PREQ is for us"); 537 mhwmp_dbg("PREQ is for us");
533 forward = false; 538 forward = false;
534 reply = true; 539 reply = true;
@@ -575,7 +580,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
575 ifmsh->mshstats.dropped_frames_ttl++; 580 ifmsh->mshstats.dropped_frames_ttl++;
576 } 581 }
577 582
578 if (forward) { 583 if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
579 u32 preq_id; 584 u32 preq_id;
580 u8 hopcount, flags; 585 u8 hopcount, flags;
581 586
@@ -590,13 +595,18 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
590 flags = PREQ_IE_FLAGS(preq_elem); 595 flags = PREQ_IE_FLAGS(preq_elem);
591 preq_id = PREQ_IE_PREQ_ID(preq_elem); 596 preq_id = PREQ_IE_PREQ_ID(preq_elem);
592 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 597 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
598 da = (mpath && mpath->is_root) ?
599 mpath->rann_snd_addr : broadcast_addr;
593 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 600 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
594 cpu_to_le32(orig_sn), target_flags, target_addr, 601 cpu_to_le32(orig_sn), target_flags, target_addr,
595 cpu_to_le32(target_sn), broadcast_addr, 602 cpu_to_le32(target_sn), da,
596 hopcount, ttl, cpu_to_le32(lifetime), 603 hopcount, ttl, cpu_to_le32(lifetime),
597 cpu_to_le32(metric), cpu_to_le32(preq_id), 604 cpu_to_le32(metric), cpu_to_le32(preq_id),
598 sdata); 605 sdata);
599 ifmsh->mshstats.fwded_mcast++; 606 if (!is_multicast_ether_addr(da))
607 ifmsh->mshstats.fwded_unicast++;
608 else
609 ifmsh->mshstats.fwded_mcast++;
600 ifmsh->mshstats.fwded_frames++; 610 ifmsh->mshstats.fwded_frames++;
601 } 611 }
602} 612}
@@ -614,6 +624,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
614 struct ieee80211_mgmt *mgmt, 624 struct ieee80211_mgmt *mgmt,
615 u8 *prep_elem, u32 metric) 625 u8 *prep_elem, u32 metric)
616{ 626{
627 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
617 struct mesh_path *mpath; 628 struct mesh_path *mpath;
618 u8 *target_addr, *orig_addr; 629 u8 *target_addr, *orig_addr;
619 u8 ttl, hopcount, flags; 630 u8 ttl, hopcount, flags;
@@ -623,10 +634,13 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
623 mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem)); 634 mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
624 635
625 orig_addr = PREP_IE_ORIG_ADDR(prep_elem); 636 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
626 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) 637 if (ether_addr_equal(orig_addr, sdata->vif.addr))
627 /* destination, no forwarding required */ 638 /* destination, no forwarding required */
628 return; 639 return;
629 640
641 if (!ifmsh->mshcfg.dot11MeshForwarding)
642 return;
643
630 ttl = PREP_IE_TTL(prep_elem); 644 ttl = PREP_IE_TTL(prep_elem);
631 if (ttl <= 1) { 645 if (ttl <= 1) {
632 sdata->u.mesh.mshstats.dropped_frames_ttl++; 646 sdata->u.mesh.mshstats.dropped_frames_ttl++;
@@ -693,21 +707,26 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
693 rcu_read_lock(); 707 rcu_read_lock();
694 mpath = mesh_path_lookup(target_addr, sdata); 708 mpath = mesh_path_lookup(target_addr, sdata);
695 if (mpath) { 709 if (mpath) {
710 struct sta_info *sta;
711
696 spin_lock_bh(&mpath->state_lock); 712 spin_lock_bh(&mpath->state_lock);
713 sta = next_hop_deref_protected(mpath);
697 if (mpath->flags & MESH_PATH_ACTIVE && 714 if (mpath->flags & MESH_PATH_ACTIVE &&
698 memcmp(ta, next_hop_deref_protected(mpath)->sta.addr, 715 ether_addr_equal(ta, sta->sta.addr) &&
699 ETH_ALEN) == 0 &&
700 (!(mpath->flags & MESH_PATH_SN_VALID) || 716 (!(mpath->flags & MESH_PATH_SN_VALID) ||
701 SN_GT(target_sn, mpath->sn))) { 717 SN_GT(target_sn, mpath->sn))) {
702 mpath->flags &= ~MESH_PATH_ACTIVE; 718 mpath->flags &= ~MESH_PATH_ACTIVE;
703 mpath->sn = target_sn; 719 mpath->sn = target_sn;
704 spin_unlock_bh(&mpath->state_lock); 720 spin_unlock_bh(&mpath->state_lock);
721 if (!ifmsh->mshcfg.dot11MeshForwarding)
722 goto endperr;
705 mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn), 723 mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
706 cpu_to_le16(target_rcode), 724 cpu_to_le16(target_rcode),
707 broadcast_addr, sdata); 725 broadcast_addr, sdata);
708 } else 726 } else
709 spin_unlock_bh(&mpath->state_lock); 727 spin_unlock_bh(&mpath->state_lock);
710 } 728 }
729endperr:
711 rcu_read_unlock(); 730 rcu_read_unlock();
712} 731}
713 732
@@ -716,11 +735,12 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
716 struct ieee80211_rann_ie *rann) 735 struct ieee80211_rann_ie *rann)
717{ 736{
718 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 737 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
738 struct ieee80211_local *local = sdata->local;
739 struct sta_info *sta;
719 struct mesh_path *mpath; 740 struct mesh_path *mpath;
720 u8 ttl, flags, hopcount; 741 u8 ttl, flags, hopcount;
721 u8 *orig_addr; 742 u8 *orig_addr;
722 u32 orig_sn, metric; 743 u32 orig_sn, metric, metric_txsta, interval;
723 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
724 bool root_is_gate; 744 bool root_is_gate;
725 745
726 ttl = rann->rann_ttl; 746 ttl = rann->rann_ttl;
@@ -732,19 +752,28 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
732 flags = rann->rann_flags; 752 flags = rann->rann_flags;
733 root_is_gate = !!(flags & RANN_FLAG_IS_GATE); 753 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
734 orig_addr = rann->rann_addr; 754 orig_addr = rann->rann_addr;
735 orig_sn = rann->rann_seq; 755 orig_sn = le32_to_cpu(rann->rann_seq);
756 interval = le32_to_cpu(rann->rann_interval);
736 hopcount = rann->rann_hopcount; 757 hopcount = rann->rann_hopcount;
737 hopcount++; 758 hopcount++;
738 metric = rann->rann_metric; 759 metric = le32_to_cpu(rann->rann_metric);
739 760
740 /* Ignore our own RANNs */ 761 /* Ignore our own RANNs */
741 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) 762 if (ether_addr_equal(orig_addr, sdata->vif.addr))
742 return; 763 return;
743 764
744 mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr, 765 mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
745 root_is_gate); 766 orig_addr, mgmt->sa, root_is_gate);
746 767
747 rcu_read_lock(); 768 rcu_read_lock();
769 sta = sta_info_get(sdata, mgmt->sa);
770 if (!sta) {
771 rcu_read_unlock();
772 return;
773 }
774
775 metric_txsta = airtime_link_metric_get(local, sta);
776
748 mpath = mesh_path_lookup(orig_addr, sdata); 777 mpath = mesh_path_lookup(orig_addr, sdata);
749 if (!mpath) { 778 if (!mpath) {
750 mesh_path_add(orig_addr, sdata); 779 mesh_path_add(orig_addr, sdata);
@@ -764,15 +793,23 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
764 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 793 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
765 } 794 }
766 795
767 if (mpath->sn < orig_sn) { 796 if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn &&
797 metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) {
768 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 798 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
769 cpu_to_le32(orig_sn), 799 cpu_to_le32(orig_sn),
770 0, NULL, 0, broadcast_addr, 800 0, NULL, 0, broadcast_addr,
771 hopcount, ttl, cpu_to_le32(interval), 801 hopcount, ttl, cpu_to_le32(interval),
772 cpu_to_le32(metric + mpath->metric), 802 cpu_to_le32(metric + metric_txsta),
773 0, sdata); 803 0, sdata);
774 mpath->sn = orig_sn; 804 mpath->sn = orig_sn;
805 mpath->rann_metric = metric + metric_txsta;
806 /* Recording RANNs sender address to send individually
807 * addressed PREQs destined for root mesh STA */
808 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
775 } 809 }
810
811 mpath->is_root = true;
812
776 if (root_is_gate) 813 if (root_is_gate)
777 mesh_path_add_gate(mpath); 814 mesh_path_add_gate(mpath);
778 815
@@ -908,6 +945,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
908 struct mesh_preq_queue *preq_node; 945 struct mesh_preq_queue *preq_node;
909 struct mesh_path *mpath; 946 struct mesh_path *mpath;
910 u8 ttl, target_flags; 947 u8 ttl, target_flags;
948 const u8 *da;
911 u32 lifetime; 949 u32 lifetime;
912 950
913 spin_lock_bh(&ifmsh->mesh_preq_queue_lock); 951 spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -970,9 +1008,10 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
970 target_flags = MP_F_RF; 1008 target_flags = MP_F_RF;
971 1009
972 spin_unlock_bh(&mpath->state_lock); 1010 spin_unlock_bh(&mpath->state_lock);
1011 da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
973 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, 1012 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
974 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst, 1013 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
975 cpu_to_le32(mpath->sn), broadcast_addr, 0, 1014 cpu_to_le32(mpath->sn), da, 0,
976 ttl, cpu_to_le32(lifetime), 0, 1015 ttl, cpu_to_le32(lifetime), 0,
977 cpu_to_le32(ifmsh->preq_id++), sdata); 1016 cpu_to_le32(ifmsh->preq_id++), sdata);
978 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1017 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
@@ -1063,7 +1102,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
1063 if (time_after(jiffies, 1102 if (time_after(jiffies,
1064 mpath->exp_time - 1103 mpath->exp_time -
1065 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && 1104 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1066 !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) && 1105 ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
1067 !(mpath->flags & MESH_PATH_RESOLVING) && 1106 !(mpath->flags & MESH_PATH_RESOLVING) &&
1068 !(mpath->flags & MESH_PATH_FIXED)) 1107 !(mpath->flags & MESH_PATH_FIXED))
1069 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 1108 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index edf167e3b8f3..b39224d8255c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -336,7 +336,7 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
336} 336}
337 337
338 338
339static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst, 339static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
340 struct ieee80211_sub_if_data *sdata) 340 struct ieee80211_sub_if_data *sdata)
341{ 341{
342 struct mesh_path *mpath; 342 struct mesh_path *mpath;
@@ -348,7 +348,7 @@ static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
348 hlist_for_each_entry_rcu(node, n, bucket, list) { 348 hlist_for_each_entry_rcu(node, n, bucket, list) {
349 mpath = node->mpath; 349 mpath = node->mpath;
350 if (mpath->sdata == sdata && 350 if (mpath->sdata == sdata &&
351 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 351 ether_addr_equal(dst, mpath->dst)) {
352 if (MPATH_EXPIRED(mpath)) { 352 if (MPATH_EXPIRED(mpath)) {
353 spin_lock_bh(&mpath->state_lock); 353 spin_lock_bh(&mpath->state_lock);
354 mpath->flags &= ~MESH_PATH_ACTIVE; 354 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -371,12 +371,12 @@ static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
371 */ 371 */
372struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 372struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
373{ 373{
374 return path_lookup(rcu_dereference(mesh_paths), dst, sdata); 374 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
375} 375}
376 376
377struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 377struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
378{ 378{
379 return path_lookup(rcu_dereference(mpp_paths), dst, sdata); 379 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
380} 380}
381 381
382 382
@@ -413,12 +413,6 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
413 return NULL; 413 return NULL;
414} 414}
415 415
416static void mesh_gate_node_reclaim(struct rcu_head *rp)
417{
418 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
419 kfree(node);
420}
421
422/** 416/**
423 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table 417 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
424 * @mpath: gate path to add to table 418 * @mpath: gate path to add to table
@@ -479,7 +473,7 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
479 if (gate->mpath == mpath) { 473 if (gate->mpath == mpath) {
480 spin_lock_bh(&tbl->gates_lock); 474 spin_lock_bh(&tbl->gates_lock);
481 hlist_del_rcu(&gate->list); 475 hlist_del_rcu(&gate->list);
482 call_rcu(&gate->rcu, mesh_gate_node_reclaim); 476 kfree_rcu(gate, rcu);
483 spin_unlock_bh(&tbl->gates_lock); 477 spin_unlock_bh(&tbl->gates_lock);
484 mpath->sdata->u.mesh.num_gates--; 478 mpath->sdata->u.mesh.num_gates--;
485 mpath->is_gate = false; 479 mpath->is_gate = false;
@@ -523,7 +517,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
523 int err = 0; 517 int err = 0;
524 u32 hash_idx; 518 u32 hash_idx;
525 519
526 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) 520 if (ether_addr_equal(dst, sdata->vif.addr))
527 /* never add ourselves as neighbours */ 521 /* never add ourselves as neighbours */
528 return -ENOTSUPP; 522 return -ENOTSUPP;
529 523
@@ -544,6 +538,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
544 538
545 read_lock_bh(&pathtbl_resize_lock); 539 read_lock_bh(&pathtbl_resize_lock);
546 memcpy(new_mpath->dst, dst, ETH_ALEN); 540 memcpy(new_mpath->dst, dst, ETH_ALEN);
541 memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
542 new_mpath->is_root = false;
547 new_mpath->sdata = sdata; 543 new_mpath->sdata = sdata;
548 new_mpath->flags = 0; 544 new_mpath->flags = 0;
549 skb_queue_head_init(&new_mpath->frame_queue); 545 skb_queue_head_init(&new_mpath->frame_queue);
@@ -559,12 +555,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
559 hash_idx = mesh_table_hash(dst, sdata, tbl); 555 hash_idx = mesh_table_hash(dst, sdata, tbl);
560 bucket = &tbl->hash_buckets[hash_idx]; 556 bucket = &tbl->hash_buckets[hash_idx];
561 557
562 spin_lock_bh(&tbl->hashwlock[hash_idx]); 558 spin_lock(&tbl->hashwlock[hash_idx]);
563 559
564 err = -EEXIST; 560 err = -EEXIST;
565 hlist_for_each_entry(node, n, bucket, list) { 561 hlist_for_each_entry(node, n, bucket, list) {
566 mpath = node->mpath; 562 mpath = node->mpath;
567 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 563 if (mpath->sdata == sdata &&
564 ether_addr_equal(dst, mpath->dst))
568 goto err_exists; 565 goto err_exists;
569 } 566 }
570 567
@@ -575,7 +572,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
575 572
576 mesh_paths_generation++; 573 mesh_paths_generation++;
577 574
578 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 575 spin_unlock(&tbl->hashwlock[hash_idx]);
579 read_unlock_bh(&pathtbl_resize_lock); 576 read_unlock_bh(&pathtbl_resize_lock);
580 if (grow) { 577 if (grow) {
581 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 578 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -584,7 +581,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
584 return 0; 581 return 0;
585 582
586err_exists: 583err_exists:
587 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 584 spin_unlock(&tbl->hashwlock[hash_idx]);
588 read_unlock_bh(&pathtbl_resize_lock); 585 read_unlock_bh(&pathtbl_resize_lock);
589 kfree(new_node); 586 kfree(new_node);
590err_node_alloc: 587err_node_alloc:
@@ -655,7 +652,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
655 int err = 0; 652 int err = 0;
656 u32 hash_idx; 653 u32 hash_idx;
657 654
658 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) 655 if (ether_addr_equal(dst, sdata->vif.addr))
659 /* never add ourselves as neighbours */ 656 /* never add ourselves as neighbours */
660 return -ENOTSUPP; 657 return -ENOTSUPP;
661 658
@@ -687,12 +684,13 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
687 hash_idx = mesh_table_hash(dst, sdata, tbl); 684 hash_idx = mesh_table_hash(dst, sdata, tbl);
688 bucket = &tbl->hash_buckets[hash_idx]; 685 bucket = &tbl->hash_buckets[hash_idx];
689 686
690 spin_lock_bh(&tbl->hashwlock[hash_idx]); 687 spin_lock(&tbl->hashwlock[hash_idx]);
691 688
692 err = -EEXIST; 689 err = -EEXIST;
693 hlist_for_each_entry(node, n, bucket, list) { 690 hlist_for_each_entry(node, n, bucket, list) {
694 mpath = node->mpath; 691 mpath = node->mpath;
695 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 692 if (mpath->sdata == sdata &&
693 ether_addr_equal(dst, mpath->dst))
696 goto err_exists; 694 goto err_exists;
697 } 695 }
698 696
@@ -701,7 +699,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
701 tbl->mean_chain_len * (tbl->hash_mask + 1)) 699 tbl->mean_chain_len * (tbl->hash_mask + 1))
702 grow = 1; 700 grow = 1;
703 701
704 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 702 spin_unlock(&tbl->hashwlock[hash_idx]);
705 read_unlock_bh(&pathtbl_resize_lock); 703 read_unlock_bh(&pathtbl_resize_lock);
706 if (grow) { 704 if (grow) {
707 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 705 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -710,7 +708,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
710 return 0; 708 return 0;
711 709
712err_exists: 710err_exists:
713 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 711 spin_unlock(&tbl->hashwlock[hash_idx]);
714 read_unlock_bh(&pathtbl_resize_lock); 712 read_unlock_bh(&pathtbl_resize_lock);
715 kfree(new_node); 713 kfree(new_node);
716err_node_alloc: 714err_node_alloc:
@@ -809,9 +807,9 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
809 for_each_mesh_entry(tbl, p, node, i) { 807 for_each_mesh_entry(tbl, p, node, i) {
810 mpath = node->mpath; 808 mpath = node->mpath;
811 if (rcu_dereference(mpath->next_hop) == sta) { 809 if (rcu_dereference(mpath->next_hop) == sta) {
812 spin_lock_bh(&tbl->hashwlock[i]); 810 spin_lock(&tbl->hashwlock[i]);
813 __mesh_path_del(tbl, node); 811 __mesh_path_del(tbl, node);
814 spin_unlock_bh(&tbl->hashwlock[i]); 812 spin_unlock(&tbl->hashwlock[i]);
815 } 813 }
816 } 814 }
817 read_unlock_bh(&pathtbl_resize_lock); 815 read_unlock_bh(&pathtbl_resize_lock);
@@ -882,11 +880,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
882 hash_idx = mesh_table_hash(addr, sdata, tbl); 880 hash_idx = mesh_table_hash(addr, sdata, tbl);
883 bucket = &tbl->hash_buckets[hash_idx]; 881 bucket = &tbl->hash_buckets[hash_idx];
884 882
885 spin_lock_bh(&tbl->hashwlock[hash_idx]); 883 spin_lock(&tbl->hashwlock[hash_idx]);
886 hlist_for_each_entry(node, n, bucket, list) { 884 hlist_for_each_entry(node, n, bucket, list) {
887 mpath = node->mpath; 885 mpath = node->mpath;
888 if (mpath->sdata == sdata && 886 if (mpath->sdata == sdata &&
889 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 887 ether_addr_equal(addr, mpath->dst)) {
890 __mesh_path_del(tbl, node); 888 __mesh_path_del(tbl, node);
891 goto enddel; 889 goto enddel;
892 } 890 }
@@ -895,7 +893,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
895 err = -ENXIO; 893 err = -ENXIO;
896enddel: 894enddel:
897 mesh_paths_generation++; 895 mesh_paths_generation++;
898 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 896 spin_unlock(&tbl->hashwlock[hash_idx]);
899 read_unlock_bh(&pathtbl_resize_lock); 897 read_unlock_bh(&pathtbl_resize_lock);
900 return err; 898 return err;
901} 899}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a17251730b9e..60ef235c9d9b 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -31,6 +31,12 @@
31#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout) 31#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
32#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) 32#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
33 33
34/* We only need a valid sta if user configured a minimum rssi_threshold. */
35#define rssi_threshold_check(sta, sdata) \
36 (sdata->u.mesh.mshcfg.rssi_threshold == 0 ||\
37 (sta && (s8) -ewma_read(&sta->avg_signal) > \
38 sdata->u.mesh.mshcfg.rssi_threshold))
39
34enum plink_event { 40enum plink_event {
35 PLINK_UNDEFINED, 41 PLINK_UNDEFINED,
36 OPN_ACPT, 42 OPN_ACPT,
@@ -76,42 +82,91 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
76} 82}
77 83
78/* 84/*
79 * NOTE: This is just an alias for sta_info_alloc(), see notes 85 * Allocate mesh sta entry and insert into station table
80 * on it in the lifecycle management section!
81 */ 86 */
82static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, 87static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
83 u8 *hw_addr, u32 rates, 88 u8 *hw_addr)
84 struct ieee802_11_elems *elems)
85{ 89{
86 struct ieee80211_local *local = sdata->local;
87 struct ieee80211_supported_band *sband;
88 struct sta_info *sta; 90 struct sta_info *sta;
89 91
90 sband = local->hw.wiphy->bands[local->oper_channel->band]; 92 if (sdata->local->num_sta >= MESH_MAX_PLINKS)
91
92 if (local->num_sta >= MESH_MAX_PLINKS)
93 return NULL; 93 return NULL;
94 94
95 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); 95 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
96 if (!sta) 96 if (!sta)
97 return NULL; 97 return NULL;
98 98
99 sta_info_move_state(sta, IEEE80211_STA_AUTH); 99 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
100 sta_info_move_state(sta, IEEE80211_STA_ASSOC); 100 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
101 sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 101 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
102 102
103 set_sta_flag(sta, WLAN_STA_WME); 103 set_sta_flag(sta, WLAN_STA_WME);
104 104
105 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
106 if (elems->ht_cap_elem)
107 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
108 elems->ht_cap_elem,
109 &sta->sta.ht_cap);
110 rate_control_rate_init(sta);
111
112 return sta; 105 return sta;
113} 106}
114 107
108/*
109 * mesh_set_ht_prot_mode - set correct HT protection mode
110 *
111 * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
112 * mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
113 * mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
114 * selected if any non-HT peers are present in our MBSS. 20MHz-protection mode
115 * is selected if all peers in our 20/40MHz MBSS support HT and atleast one
116 * HT20 peer is present. Otherwise no-protection mode is selected.
117 */
118static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
119{
120 struct ieee80211_local *local = sdata->local;
121 struct sta_info *sta;
122 u32 changed = 0;
123 u16 ht_opmode;
124 bool non_ht_sta = false, ht20_sta = false;
125
126 if (local->_oper_channel_type == NL80211_CHAN_NO_HT)
127 return 0;
128
129 rcu_read_lock();
130 list_for_each_entry_rcu(sta, &local->sta_list, list) {
131 if (sdata != sta->sdata ||
132 sta->plink_state != NL80211_PLINK_ESTAB)
133 continue;
134
135 switch (sta->ch_type) {
136 case NL80211_CHAN_NO_HT:
137 mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
138 sdata->vif.addr, sta->sta.addr);
139 non_ht_sta = true;
140 goto out;
141 case NL80211_CHAN_HT20:
142 mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
143 sdata->vif.addr, sta->sta.addr);
144 ht20_sta = true;
145 default:
146 break;
147 }
148 }
149out:
150 rcu_read_unlock();
151
152 if (non_ht_sta)
153 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
154 else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20)
155 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
156 else
157 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
158
159 if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
160 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
161 sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
162 changed = BSS_CHANGED_HT;
163 mpl_dbg("mesh_plink %pM: protection mode changed to %d",
164 sdata->vif.addr, ht_opmode);
165 }
166
167 return changed;
168}
169
115/** 170/**
116 * __mesh_plink_deactivate - deactivate mesh peer link 171 * __mesh_plink_deactivate - deactivate mesh peer link
117 * 172 *
@@ -181,7 +236,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
181 2 + sdata->u.mesh.mesh_id_len + 236 2 + sdata->u.mesh.mesh_id_len +
182 2 + sizeof(struct ieee80211_meshconf_ie) + 237 2 + sizeof(struct ieee80211_meshconf_ie) +
183 2 + sizeof(struct ieee80211_ht_cap) + 238 2 + sizeof(struct ieee80211_ht_cap) +
184 2 + sizeof(struct ieee80211_ht_info) + 239 2 + sizeof(struct ieee80211_ht_operation) +
185 2 + 8 + /* peering IE */ 240 2 + 8 + /* peering IE */
186 sdata->u.mesh.ie_len); 241 sdata->u.mesh.ie_len);
187 if (!skb) 242 if (!skb)
@@ -206,8 +261,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
206 pos = skb_put(skb, 2); 261 pos = skb_put(skb, 2);
207 memcpy(pos + 2, &plid, 2); 262 memcpy(pos + 2, &plid, 2);
208 } 263 }
209 if (ieee80211_add_srates_ie(&sdata->vif, skb) || 264 if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
210 ieee80211_add_ext_srates_ie(&sdata->vif, skb) || 265 ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
211 mesh_add_rsn_ie(skb, sdata) || 266 mesh_add_rsn_ie(skb, sdata) ||
212 mesh_add_meshid_ie(skb, sdata) || 267 mesh_add_meshid_ie(skb, sdata) ||
213 mesh_add_meshconf_ie(skb, sdata)) 268 mesh_add_meshconf_ie(skb, sdata))
@@ -257,7 +312,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
257 312
258 if (action != WLAN_SP_MESH_PEERING_CLOSE) { 313 if (action != WLAN_SP_MESH_PEERING_CLOSE) {
259 if (mesh_add_ht_cap_ie(skb, sdata) || 314 if (mesh_add_ht_cap_ie(skb, sdata) ||
260 mesh_add_ht_info_ie(skb, sdata)) 315 mesh_add_ht_oper_ie(skb, sdata))
261 return -1; 316 return -1;
262 } 317 }
263 318
@@ -268,42 +323,93 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
268 return 0; 323 return 0;
269} 324}
270 325
271void mesh_neighbour_update(u8 *hw_addr, u32 rates, 326/* mesh_peer_init - initialize new mesh peer and return resulting sta_info
272 struct ieee80211_sub_if_data *sdata, 327 *
273 struct ieee802_11_elems *elems) 328 * @sdata: local meshif
329 * @addr: peer's address
330 * @elems: IEs from beacon or mesh peering frame
331 *
332 * call under RCU
333 */
334static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
335 u8 *addr,
336 struct ieee802_11_elems *elems)
274{ 337{
275 struct ieee80211_local *local = sdata->local; 338 struct ieee80211_local *local = sdata->local;
339 enum ieee80211_band band = local->oper_channel->band;
340 struct ieee80211_supported_band *sband;
341 u32 rates, basic_rates = 0;
276 struct sta_info *sta; 342 struct sta_info *sta;
343 bool insert = false;
277 344
278 rcu_read_lock(); 345 sband = local->hw.wiphy->bands[band];
346 rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
279 347
280 sta = sta_info_get(sdata, hw_addr); 348 sta = sta_info_get(sdata, addr);
281 if (!sta) { 349 if (!sta) {
282 rcu_read_unlock(); 350 /* Userspace handles peer allocation when security is enabled */
283 /* Userspace handles peer allocation when security is enabled 351 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
284 * */ 352 cfg80211_notify_new_peer_candidate(sdata->dev, addr,
285 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) 353 elems->ie_start,
286 cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr, 354 elems->total_len,
287 elems->ie_start, elems->total_len, 355 GFP_ATOMIC);
288 GFP_KERNEL); 356 return NULL;
289 else
290 sta = mesh_plink_alloc(sdata, hw_addr, rates, elems);
291 if (!sta)
292 return;
293 if (sta_info_insert_rcu(sta)) {
294 rcu_read_unlock();
295 return;
296 } 357 }
358
359 sta = mesh_plink_alloc(sdata, addr);
360 if (!sta)
361 return NULL;
362 insert = true;
297 } 363 }
298 364
365 spin_lock_bh(&sta->lock);
299 sta->last_rx = jiffies; 366 sta->last_rx = jiffies;
300 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 367 sta->sta.supp_rates[band] = rates;
368 if (elems->ht_cap_elem &&
369 sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
370 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
371 elems->ht_cap_elem,
372 &sta->sta.ht_cap);
373 else
374 memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap));
375
376 if (elems->ht_operation) {
377 if (!(elems->ht_operation->ht_param &
378 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
379 sta->sta.ht_cap.cap &=
380 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
381 sta->ch_type =
382 ieee80211_ht_oper_to_channel_type(elems->ht_operation);
383 }
384
385 rate_control_rate_init(sta);
386 spin_unlock_bh(&sta->lock);
387
388 if (insert && sta_info_insert(sta))
389 return NULL;
390
391 return sta;
392}
393
394void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
395 u8 *hw_addr,
396 struct ieee802_11_elems *elems)
397{
398 struct sta_info *sta;
399
400 rcu_read_lock();
401 sta = mesh_peer_init(sdata, hw_addr, elems);
402 if (!sta)
403 goto out;
404
301 if (mesh_peer_accepts_plinks(elems) && 405 if (mesh_peer_accepts_plinks(elems) &&
302 sta->plink_state == NL80211_PLINK_LISTEN && 406 sta->plink_state == NL80211_PLINK_LISTEN &&
303 sdata->u.mesh.accepting_plinks && 407 sdata->u.mesh.accepting_plinks &&
304 sdata->u.mesh.mshcfg.auto_open_plinks) 408 sdata->u.mesh.mshcfg.auto_open_plinks &&
409 rssi_threshold_check(sta, sdata))
305 mesh_plink_open(sta); 410 mesh_plink_open(sta);
306 411
412out:
307 rcu_read_unlock(); 413 rcu_read_unlock();
308} 414}
309 415
@@ -449,15 +555,15 @@ void mesh_plink_block(struct sta_info *sta)
449void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, 555void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
450 size_t len, struct ieee80211_rx_status *rx_status) 556 size_t len, struct ieee80211_rx_status *rx_status)
451{ 557{
452 struct ieee80211_local *local = sdata->local;
453 struct ieee802_11_elems elems; 558 struct ieee802_11_elems elems;
454 struct sta_info *sta; 559 struct sta_info *sta;
455 enum plink_event event; 560 enum plink_event event;
456 enum ieee80211_self_protected_actioncode ftype; 561 enum ieee80211_self_protected_actioncode ftype;
457 size_t baselen; 562 size_t baselen;
458 bool deactivated, matches_local = true; 563 bool matches_local = true;
459 u8 ie_len; 564 u8 ie_len;
460 u8 *baseaddr; 565 u8 *baseaddr;
566 u32 changed = 0;
461 __le16 plid, llid, reason; 567 __le16 plid, llid, reason;
462#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG 568#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
463 static const char *mplstates[] = { 569 static const char *mplstates[] = {
@@ -531,6 +637,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
531 return; 637 return;
532 } 638 }
533 639
640 if (ftype == WLAN_SP_MESH_PEERING_OPEN &&
641 !rssi_threshold_check(sta, sdata)) {
642 mpl_dbg("Mesh plink: %pM does not meet rssi threshold\n",
643 mgmt->sa);
644 rcu_read_unlock();
645 return;
646 }
647
534 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) { 648 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
535 mpl_dbg("Mesh plink: Action frame from non-authed peer\n"); 649 mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
536 rcu_read_unlock(); 650 rcu_read_unlock();
@@ -545,7 +659,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
545 /* Now we will figure out the appropriate event... */ 659 /* Now we will figure out the appropriate event... */
546 event = PLINK_UNDEFINED; 660 event = PLINK_UNDEFINED;
547 if (ftype != WLAN_SP_MESH_PEERING_CLOSE && 661 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
548 (!mesh_matches_local(&elems, sdata))) { 662 !mesh_matches_local(sdata, &elems)) {
549 matches_local = false; 663 matches_local = false;
550 switch (ftype) { 664 switch (ftype) {
551 case WLAN_SP_MESH_PEERING_OPEN: 665 case WLAN_SP_MESH_PEERING_OPEN:
@@ -568,29 +682,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
568 return; 682 return;
569 } else if (!sta) { 683 } else if (!sta) {
570 /* ftype == WLAN_SP_MESH_PEERING_OPEN */ 684 /* ftype == WLAN_SP_MESH_PEERING_OPEN */
571 u32 rates;
572
573 rcu_read_unlock();
574
575 if (!mesh_plink_free_count(sdata)) { 685 if (!mesh_plink_free_count(sdata)) {
576 mpl_dbg("Mesh plink error: no more free plinks\n"); 686 mpl_dbg("Mesh plink error: no more free plinks\n");
577 return;
578 }
579
580 rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
581 sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems);
582 if (!sta) {
583 mpl_dbg("Mesh plink error: plink table full\n");
584 return;
585 }
586 if (sta_info_insert_rcu(sta)) {
587 rcu_read_unlock(); 687 rcu_read_unlock();
588 return; 688 return;
589 } 689 }
590 event = OPN_ACPT; 690 event = OPN_ACPT;
591 spin_lock_bh(&sta->lock);
592 } else if (matches_local) { 691 } else if (matches_local) {
593 spin_lock_bh(&sta->lock);
594 switch (ftype) { 692 switch (ftype) {
595 case WLAN_SP_MESH_PEERING_OPEN: 693 case WLAN_SP_MESH_PEERING_OPEN:
596 if (!mesh_plink_free_count(sdata) || 694 if (!mesh_plink_free_count(sdata) ||
@@ -627,12 +725,19 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
627 break; 725 break;
628 default: 726 default:
629 mpl_dbg("Mesh plink: unknown frame subtype\n"); 727 mpl_dbg("Mesh plink: unknown frame subtype\n");
630 spin_unlock_bh(&sta->lock);
631 rcu_read_unlock(); 728 rcu_read_unlock();
632 return; 729 return;
633 } 730 }
634 } else { 731 }
635 spin_lock_bh(&sta->lock); 732
733 if (event == OPN_ACPT) {
734 /* allocate sta entry if necessary and update info */
735 sta = mesh_peer_init(sdata, mgmt->sa, &elems);
736 if (!sta) {
737 mpl_dbg("Mesh plink: failed to init peer!\n");
738 rcu_read_unlock();
739 return;
740 }
636 } 741 }
637 742
638 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", 743 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
@@ -640,6 +745,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
640 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), 745 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
641 event); 746 event);
642 reason = 0; 747 reason = 0;
748 spin_lock_bh(&sta->lock);
643 switch (sta->plink_state) { 749 switch (sta->plink_state) {
644 /* spin_unlock as soon as state is updated at each case */ 750 /* spin_unlock as soon as state is updated at each case */
645 case NL80211_PLINK_LISTEN: 751 case NL80211_PLINK_LISTEN:
@@ -743,7 +849,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
743 sta->plink_state = NL80211_PLINK_ESTAB; 849 sta->plink_state = NL80211_PLINK_ESTAB;
744 spin_unlock_bh(&sta->lock); 850 spin_unlock_bh(&sta->lock);
745 mesh_plink_inc_estab_count(sdata); 851 mesh_plink_inc_estab_count(sdata);
746 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 852 changed |= mesh_set_ht_prot_mode(sdata);
853 changed |= BSS_CHANGED_BEACON;
747 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 854 mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
748 sta->sta.addr); 855 sta->sta.addr);
749 break; 856 break;
@@ -778,7 +885,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
778 sta->plink_state = NL80211_PLINK_ESTAB; 885 sta->plink_state = NL80211_PLINK_ESTAB;
779 spin_unlock_bh(&sta->lock); 886 spin_unlock_bh(&sta->lock);
780 mesh_plink_inc_estab_count(sdata); 887 mesh_plink_inc_estab_count(sdata);
781 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 888 changed |= mesh_set_ht_prot_mode(sdata);
889 changed |= BSS_CHANGED_BEACON;
782 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 890 mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
783 sta->sta.addr); 891 sta->sta.addr);
784 mesh_plink_frame_tx(sdata, 892 mesh_plink_frame_tx(sdata,
@@ -796,13 +904,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
796 case CLS_ACPT: 904 case CLS_ACPT:
797 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE); 905 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
798 sta->reason = reason; 906 sta->reason = reason;
799 deactivated = __mesh_plink_deactivate(sta); 907 __mesh_plink_deactivate(sta);
800 sta->plink_state = NL80211_PLINK_HOLDING; 908 sta->plink_state = NL80211_PLINK_HOLDING;
801 llid = sta->llid; 909 llid = sta->llid;
802 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 910 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
803 spin_unlock_bh(&sta->lock); 911 spin_unlock_bh(&sta->lock);
804 if (deactivated) 912 changed |= mesh_set_ht_prot_mode(sdata);
805 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 913 changed |= BSS_CHANGED_BEACON;
806 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, 914 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
807 sta->sta.addr, llid, plid, reason); 915 sta->sta.addr, llid, plid, reason);
808 break; 916 break;
@@ -849,4 +957,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
849 } 957 }
850 958
851 rcu_read_unlock(); 959 rcu_read_unlock();
960
961 if (changed)
962 ieee80211_bss_info_change_notify(sdata, changed);
852} 963}
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
new file mode 100644
index 000000000000..38d30e8ce6dc
--- /dev/null
+++ b/net/mac80211/mesh_sync.c
@@ -0,0 +1,316 @@
1/*
2 * Copyright 2011-2012, Pavel Zubarev <pavel.zubarev@gmail.com>
3 * Copyright 2011-2012, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
4 * Copyright 2011-2012, cozybit Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include "ieee80211_i.h"
12#include "mesh.h"
13#include "driver-ops.h"
14
15#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG
16#define msync_dbg(fmt, args...) \
17 printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args)
18#else
19#define msync_dbg(fmt, args...) do { (void)(0); } while (0)
20#endif
21
22/* This is not in the standard. It represents a tolerable tbtt drift below
23 * which we do no TSF adjustment.
24 */
25#define TOFFSET_MINIMUM_ADJUSTMENT 10
26
27/* This is not in the standard. It is a margin added to the
28 * Toffset setpoint to mitigate TSF overcorrection
29 * introduced by TSF adjustment latency.
30 */
31#define TOFFSET_SET_MARGIN 20
32
33/* This is not in the standard. It represents the maximum Toffset jump above
34 * which we'll invalidate the Toffset setpoint and choose a new setpoint. This
35 * could be, for instance, in case a neighbor is restarted and its TSF counter
36 * reset.
37 */
38#define TOFFSET_MAXIMUM_ADJUSTMENT 30000 /* 30 ms */
39
40struct sync_method {
41 u8 method;
42 struct ieee80211_mesh_sync_ops ops;
43};
44
45/**
46 * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT
47 *
48 * @ie: information elements of a management frame from the mesh peer
49 */
50static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
51{
52 return (ie->mesh_config->meshconf_cap &
53 MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
54}
55
56void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
57{
58 struct ieee80211_local *local = sdata->local;
59 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
60 /* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */
61 u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500;
62 u64 tsf;
63 u64 tsfdelta;
64
65 spin_lock_bh(&ifmsh->sync_offset_lock);
66
67 if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
68 msync_dbg("TBTT : max clockdrift=%lld; adjusting",
69 (long long) ifmsh->sync_offset_clockdrift_max);
70 tsfdelta = -ifmsh->sync_offset_clockdrift_max;
71 ifmsh->sync_offset_clockdrift_max = 0;
72 } else {
73 msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu",
74 (long long) ifmsh->sync_offset_clockdrift_max,
75 (unsigned long long) beacon_int_fraction);
76 tsfdelta = -beacon_int_fraction;
77 ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
78 }
79
80 tsf = drv_get_tsf(local, sdata);
81 if (tsf != -1ULL)
82 drv_set_tsf(local, sdata, tsf + tsfdelta);
83 spin_unlock_bh(&ifmsh->sync_offset_lock);
84}
85
86static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
87 u16 stype,
88 struct ieee80211_mgmt *mgmt,
89 struct ieee802_11_elems *elems,
90 struct ieee80211_rx_status *rx_status)
91{
92 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
93 struct ieee80211_local *local = sdata->local;
94 struct sta_info *sta;
95 u64 t_t, t_r;
96
97 WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
98
99 /* standard mentions only beacons */
100 if (stype != IEEE80211_STYPE_BEACON)
101 return;
102
103 /* The current tsf is a first approximation for the timestamp
104 * for the received beacon. Further down we try to get a
105 * better value from the rx_status->mactime field if
106 * available. Also we have to call drv_get_tsf() before
107 * entering the rcu-read section.*/
108 t_r = drv_get_tsf(local, sdata);
109
110 rcu_read_lock();
111 sta = sta_info_get(sdata, mgmt->sa);
112 if (!sta)
113 goto no_sync;
114
115 /* check offset sync conditions (13.13.2.2.1)
116 *
117 * TODO also sync to
118 * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors
119 */
120
121 if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
122 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
123 msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr);
124 goto no_sync;
125 }
126
127 if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) {
128 /*
129 * The mactime is defined as the time the first data symbol
130 * of the frame hits the PHY, and the timestamp of the beacon
131 * is defined as "the time that the data symbol containing the
132 * first bit of the timestamp is transmitted to the PHY plus
133 * the transmitting STA's delays through its local PHY from the
134 * MAC-PHY interface to its interface with the WM" (802.11
135 * 11.1.2)
136 *
137 * T_r, in 13.13.2.2.2, is just defined as "the frame reception
138 * time" but we unless we interpret that time to be the same
139 * time of the beacon timestamp, the offset calculation will be
140 * off. Below we adjust t_r to be "the time at which the first
141 * symbol of the timestamp element in the beacon is received".
142 * This correction depends on the rate.
143 *
144 * Based on similar code in ibss.c
145 */
146 int rate;
147
148 if (rx_status->flag & RX_FLAG_HT) {
149 /* TODO:
150 * In principle there could be HT-beacons (Dual Beacon
151 * HT Operation options), but for now ignore them and
152 * just use the primary (i.e. non-HT) beacons for
153 * synchronization.
154 * */
155 goto no_sync;
156 } else
157 rate = local->hw.wiphy->bands[rx_status->band]->
158 bitrates[rx_status->rate_idx].bitrate;
159
160 /* 24 bytes of header * 8 bits/byte *
161 * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/
162 t_r = rx_status->mactime + (24 * 8 * 10 / rate);
163 }
164
165 /* Timing offset calculation (see 13.13.2.2.2) */
166 t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
167 sta->t_offset = t_t - t_r;
168
169 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
170 s64 t_clockdrift = sta->t_offset_setpoint
171 - sta->t_offset;
172 msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld",
173 sta->sta.addr,
174 (long long) sta->t_offset,
175 (long long)
176 sta->t_offset_setpoint,
177 (long long) t_clockdrift);
178
179 if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
180 t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
181 msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset",
182 sta->sta.addr,
183 (long long) t_clockdrift);
184 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
185 goto no_sync;
186 }
187
188 rcu_read_unlock();
189
190 spin_lock_bh(&ifmsh->sync_offset_lock);
191 if (t_clockdrift >
192 ifmsh->sync_offset_clockdrift_max)
193 ifmsh->sync_offset_clockdrift_max
194 = t_clockdrift;
195 spin_unlock_bh(&ifmsh->sync_offset_lock);
196
197 } else {
198 sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
199 set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
200 msync_dbg("STA %pM : offset was invalid, "
201 " sta->t_offset=%lld",
202 sta->sta.addr,
203 (long long) sta->t_offset);
204 rcu_read_unlock();
205 }
206 return;
207
208no_sync:
209 rcu_read_unlock();
210}
211
212static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
213{
214 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
215
216 WARN_ON(ifmsh->mesh_sp_id
217 != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
218 BUG_ON(!rcu_read_lock_held());
219
220 spin_lock_bh(&ifmsh->sync_offset_lock);
221
222 if (ifmsh->sync_offset_clockdrift_max >
223 TOFFSET_MINIMUM_ADJUSTMENT) {
224 /* Since ajusting the tsf here would
225 * require a possibly blocking call
226 * to the driver tsf setter, we punt
227 * the tsf adjustment to the mesh tasklet
228 */
229 msync_dbg("TBTT : kicking off TBTT "
230 "adjustment with "
231 "clockdrift_max=%lld",
232 ifmsh->sync_offset_clockdrift_max);
233 set_bit(MESH_WORK_DRIFT_ADJUST,
234 &ifmsh->wrkq_flags);
235 } else {
236 msync_dbg("TBTT : max clockdrift=%lld; "
237 "too small to adjust",
238 (long long)
239 ifmsh->sync_offset_clockdrift_max);
240 ifmsh->sync_offset_clockdrift_max = 0;
241 }
242 spin_unlock_bh(&ifmsh->sync_offset_lock);
243}
244
245static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata)
246{
247 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
248 u8 offset;
249
250 if (!ifmsh->ie || !ifmsh->ie_len)
251 return NULL;
252
253 offset = ieee80211_ie_split_vendor(ifmsh->ie,
254 ifmsh->ie_len, 0);
255
256 if (!offset)
257 return NULL;
258
259 return ifmsh->ie + offset + 2;
260}
261
262static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
263 u16 stype,
264 struct ieee80211_mgmt *mgmt,
265 struct ieee802_11_elems *elems,
266 struct ieee80211_rx_status *rx_status)
267{
268 const u8 *oui;
269
270 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
271 msync_dbg("called mesh_sync_vendor_rx_bcn_presp");
272 oui = mesh_get_vendor_oui(sdata);
273 /* here you would implement the vendor offset tracking for this oui */
274}
275
276static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
277{
278 const u8 *oui;
279
280 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
281 msync_dbg("called mesh_sync_vendor_adjust_tbtt");
282 oui = mesh_get_vendor_oui(sdata);
283 /* here you would implement the vendor tsf adjustment for this oui */
284}
285
286/* global variable */
287static struct sync_method sync_methods[] = {
288 {
289 .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
290 .ops = {
291 .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp,
292 .adjust_tbtt = &mesh_sync_offset_adjust_tbtt,
293 }
294 },
295 {
296 .method = IEEE80211_SYNC_METHOD_VENDOR,
297 .ops = {
298 .rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp,
299 .adjust_tbtt = &mesh_sync_vendor_adjust_tbtt,
300 }
301 },
302};
303
304struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
305{
306 struct ieee80211_mesh_sync_ops *ops = NULL;
307 u8 i;
308
309 for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) {
310 if (sync_methods[i].method == method) {
311 ops = &sync_methods[i].ops;
312 break;
313 }
314 }
315 return ops;
316}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 295be92f7c77..04c306308987 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -30,6 +30,12 @@
30#include "rate.h" 30#include "rate.h"
31#include "led.h" 31#include "led.h"
32 32
33#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
34#define IEEE80211_AUTH_MAX_TRIES 3
35#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
36#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
37#define IEEE80211_ASSOC_MAX_TRIES 3
38
33static int max_nullfunc_tries = 2; 39static int max_nullfunc_tries = 2;
34module_param(max_nullfunc_tries, int, 0644); 40module_param(max_nullfunc_tries, int, 0644);
35MODULE_PARM_DESC(max_nullfunc_tries, 41MODULE_PARM_DESC(max_nullfunc_tries,
@@ -82,6 +88,8 @@ MODULE_PARM_DESC(probe_wait_ms,
82#define TMR_RUNNING_TIMER 0 88#define TMR_RUNNING_TIMER 0
83#define TMR_RUNNING_CHANSW 1 89#define TMR_RUNNING_CHANSW 1
84 90
91#define DEAUTH_DISASSOC_LEN (24 /* hdr */ + 2 /* reason */)
92
85/* 93/*
86 * All cfg80211 functions have to be called outside a locked 94 * All cfg80211 functions have to be called outside a locked
87 * section so that they can acquire a lock themselves... This 95 * section so that they can acquire a lock themselves... This
@@ -97,6 +105,15 @@ enum rx_mgmt_action {
97 105
98 /* caller must call cfg80211_send_disassoc() */ 106 /* caller must call cfg80211_send_disassoc() */
99 RX_MGMT_CFG80211_DISASSOC, 107 RX_MGMT_CFG80211_DISASSOC,
108
109 /* caller must call cfg80211_send_rx_auth() */
110 RX_MGMT_CFG80211_RX_AUTH,
111
112 /* caller must call cfg80211_send_rx_assoc() */
113 RX_MGMT_CFG80211_RX_ASSOC,
114
115 /* caller must call cfg80211_send_assoc_timeout() */
116 RX_MGMT_CFG80211_ASSOC_TIMEOUT,
100}; 117};
101 118
102/* utils */ 119/* utils */
@@ -115,8 +132,7 @@ static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd)
115 * has happened -- the work that runs from this timer will 132 * has happened -- the work that runs from this timer will
116 * do that. 133 * do that.
117 */ 134 */
118static void run_again(struct ieee80211_if_managed *ifmgd, 135static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout)
119 unsigned long timeout)
120{ 136{
121 ASSERT_MGD_MTX(ifmgd); 137 ASSERT_MGD_MTX(ifmgd);
122 138
@@ -127,7 +143,7 @@ static void run_again(struct ieee80211_if_managed *ifmgd,
127 143
128void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) 144void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
129{ 145{
130 if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER) 146 if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
131 return; 147 return;
132 148
133 mod_timer(&sdata->u.mgd.bcn_mon_timer, 149 mod_timer(&sdata->u.mgd.bcn_mon_timer,
@@ -155,177 +171,426 @@ static int ecw2cw(int ecw)
155 return (1 << ecw) - 1; 171 return (1 << ecw) - 1;
156} 172}
157 173
158/* 174static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
159 * ieee80211_enable_ht should be called only after the operating band 175 struct ieee80211_ht_operation *ht_oper,
160 * has been determined as ht configuration depends on the hw's 176 const u8 *bssid, bool reconfig)
161 * HT abilities for a specific band.
162 */
163static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
164 struct ieee80211_ht_info *hti,
165 const u8 *bssid, u16 ap_ht_cap_flags,
166 bool beacon_htcap_ie)
167{ 177{
168 struct ieee80211_local *local = sdata->local; 178 struct ieee80211_local *local = sdata->local;
169 struct ieee80211_supported_band *sband; 179 struct ieee80211_supported_band *sband;
170 struct sta_info *sta; 180 struct sta_info *sta;
171 u32 changed = 0; 181 u32 changed = 0;
172 int hti_cfreq;
173 u16 ht_opmode; 182 u16 ht_opmode;
174 bool enable_ht = true; 183 bool disable_40 = false;
175 enum nl80211_channel_type prev_chantype;
176 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
177 184
178 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 185 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
179 186
180 prev_chantype = sdata->vif.bss_conf.channel_type; 187 switch (sdata->vif.bss_conf.channel_type) {
188 case NL80211_CHAN_HT40PLUS:
189 if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
190 disable_40 = true;
191 break;
192 case NL80211_CHAN_HT40MINUS:
193 if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
194 disable_40 = true;
195 break;
196 default:
197 break;
198 }
181 199
182 /* HT is not supported */ 200 /* This can change during the lifetime of the BSS */
183 if (!sband->ht_cap.ht_supported) 201 if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
184 enable_ht = false; 202 disable_40 = true;
185 203
186 if (enable_ht) { 204 mutex_lock(&local->sta_mtx);
187 hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan, 205 sta = sta_info_get(sdata, bssid);
188 sband->band); 206
189 /* check that channel matches the right operating channel */ 207 WARN_ON_ONCE(!sta);
190 if (local->hw.conf.channel->center_freq != hti_cfreq) { 208
191 /* Some APs mess this up, evidently. 209 if (sta && !sta->supports_40mhz)
192 * Netgear WNDR3700 sometimes reports 4 higher than 210 disable_40 = true;
193 * the actual channel, for instance. 211
194 */ 212 if (sta && (!reconfig ||
195 printk(KERN_DEBUG 213 (disable_40 != !(sta->sta.ht_cap.cap &
196 "%s: Wrong control channel in association" 214 IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) {
197 " response: configured center-freq: %d" 215
198 " hti-cfreq: %d hti->control_chan: %d" 216 if (disable_40)
199 " band: %d. Disabling HT.\n", 217 sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
200 sdata->name, 218 else
201 local->hw.conf.channel->center_freq, 219 sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
202 hti_cfreq, hti->control_chan, 220
203 sband->band); 221 rate_control_rate_update(local, sband, sta,
204 enable_ht = false; 222 IEEE80211_RC_BW_CHANGED);
205 }
206 } 223 }
224 mutex_unlock(&local->sta_mtx);
207 225
208 if (enable_ht) { 226 ht_opmode = le16_to_cpu(ht_oper->operation_mode);
209 channel_type = NL80211_CHAN_HT20;
210 227
211 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && 228 /* if bss configuration changed store the new one */
212 !ieee80111_cfg_override_disables_ht40(sdata) && 229 if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) {
213 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && 230 changed |= BSS_CHANGED_HT;
214 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { 231 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
215 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 232 }
216 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 233
217 if (!(local->hw.conf.channel->flags & 234 return changed;
218 IEEE80211_CHAN_NO_HT40PLUS)) 235}
219 channel_type = NL80211_CHAN_HT40PLUS; 236
220 break; 237/* frame sending functions */
221 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 238
222 if (!(local->hw.conf.channel->flags & 239static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
223 IEEE80211_CHAN_NO_HT40MINUS)) 240 struct ieee80211_supported_band *sband,
224 channel_type = NL80211_CHAN_HT40MINUS; 241 u32 *rates)
242{
243 int i, j, count;
244 *rates = 0;
245 count = 0;
246 for (i = 0; i < supp_rates_len; i++) {
247 int rate = (supp_rates[i] & 0x7F) * 5;
248
249 for (j = 0; j < sband->n_bitrates; j++)
250 if (sband->bitrates[j].bitrate == rate) {
251 *rates |= BIT(j);
252 count++;
225 break; 253 break;
226 } 254 }
227 }
228 } 255 }
229 256
230 if (local->tmp_channel) 257 return count;
231 local->tmp_channel_type = channel_type; 258}
232 259
233 if (!ieee80211_set_channel_type(local, sdata, channel_type)) { 260static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
234 /* can only fail due to HT40+/- mismatch */ 261 struct sk_buff *skb, const u8 *ht_oper_ie,
235 channel_type = NL80211_CHAN_HT20; 262 struct ieee80211_supported_band *sband,
236 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); 263 struct ieee80211_channel *channel,
237 } 264 enum ieee80211_smps_mode smps)
265{
266 struct ieee80211_ht_operation *ht_oper;
267 u8 *pos;
268 u32 flags = channel->flags;
269 u16 cap;
270 struct ieee80211_sta_ht_cap ht_cap;
238 271
239 if (beacon_htcap_ie && (prev_chantype != channel_type)) { 272 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
240 /*
241 * Whenever the AP announces the HT mode change that can be
242 * 40MHz intolerant or etc., it would be safer to stop tx
243 * queues before doing hw config to avoid buffer overflow.
244 */
245 ieee80211_stop_queues_by_reason(&sdata->local->hw,
246 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
247 273
248 /* flush out all packets */ 274 if (!ht_oper_ie)
249 synchronize_net(); 275 return;
250 276
251 drv_flush(local, false); 277 if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation))
252 } 278 return;
253 279
254 /* channel_type change automatically detected */ 280 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
255 ieee80211_hw_config(local, 0); 281 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
256 282
257 if (prev_chantype != channel_type) { 283 ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2);
258 rcu_read_lock();
259 sta = sta_info_get(sdata, bssid);
260 if (sta)
261 rate_control_rate_update(local, sband, sta,
262 IEEE80211_RC_HT_CHANGED,
263 channel_type);
264 rcu_read_unlock();
265 284
266 if (beacon_htcap_ie) 285 /* determine capability flags */
267 ieee80211_wake_queues_by_reason(&sdata->local->hw, 286 cap = ht_cap.cap;
268 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
269 }
270 287
271 ht_opmode = le16_to_cpu(hti->operation_mode); 288 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
289 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
290 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
291 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
292 cap &= ~IEEE80211_HT_CAP_SGI_40;
293 }
294 break;
295 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
296 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
297 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
298 cap &= ~IEEE80211_HT_CAP_SGI_40;
299 }
300 break;
301 }
272 302
273 /* if bss configuration changed store the new one */ 303 /*
274 if (sdata->ht_opmode_valid != enable_ht || 304 * If 40 MHz was disabled associate as though we weren't
275 sdata->vif.bss_conf.ht_operation_mode != ht_opmode || 305 * capable of 40 MHz -- some broken APs will never fall
276 prev_chantype != channel_type) { 306 * back to trying to transmit in 20 MHz.
277 changed |= BSS_CHANGED_HT; 307 */
278 sdata->vif.bss_conf.ht_operation_mode = ht_opmode; 308 if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_40MHZ) {
279 sdata->ht_opmode_valid = enable_ht; 309 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
310 cap &= ~IEEE80211_HT_CAP_SGI_40;
311 }
312
313 /* set SM PS mode properly */
314 cap &= ~IEEE80211_HT_CAP_SM_PS;
315 switch (smps) {
316 case IEEE80211_SMPS_AUTOMATIC:
317 case IEEE80211_SMPS_NUM_MODES:
318 WARN_ON(1);
319 case IEEE80211_SMPS_OFF:
320 cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
321 IEEE80211_HT_CAP_SM_PS_SHIFT;
322 break;
323 case IEEE80211_SMPS_STATIC:
324 cap |= WLAN_HT_CAP_SM_PS_STATIC <<
325 IEEE80211_HT_CAP_SM_PS_SHIFT;
326 break;
327 case IEEE80211_SMPS_DYNAMIC:
328 cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
329 IEEE80211_HT_CAP_SM_PS_SHIFT;
330 break;
280 } 331 }
281 332
282 return changed; 333 /* reserve and fill IE */
334 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
335 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
283} 336}
284 337
285/* frame sending functions */ 338static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
286
287static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
288 const u8 *bssid, u16 stype, u16 reason,
289 void *cookie, bool send_frame)
290{ 339{
291 struct ieee80211_local *local = sdata->local; 340 struct ieee80211_local *local = sdata->local;
292 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 341 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
342 struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
293 struct sk_buff *skb; 343 struct sk_buff *skb;
294 struct ieee80211_mgmt *mgmt; 344 struct ieee80211_mgmt *mgmt;
345 u8 *pos, qos_info;
346 size_t offset = 0, noffset;
347 int i, count, rates_len, supp_rates_len;
348 u16 capab;
349 struct ieee80211_supported_band *sband;
350 u32 rates = 0;
351
352 lockdep_assert_held(&ifmgd->mtx);
295 353
296 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 354 sband = local->hw.wiphy->bands[local->oper_channel->band];
355
356 if (assoc_data->supp_rates_len) {
357 /*
358 * Get all rates supported by the device and the AP as
359 * some APs don't like getting a superset of their rates
360 * in the association request (e.g. D-Link DAP 1353 in
361 * b-only mode)...
362 */
363 rates_len = ieee80211_compatible_rates(assoc_data->supp_rates,
364 assoc_data->supp_rates_len,
365 sband, &rates);
366 } else {
367 /*
368 * In case AP not provide any supported rates information
369 * before association, we send information element(s) with
370 * all rates that we support.
371 */
372 rates = ~0;
373 rates_len = sband->n_bitrates;
374 }
375
376 skb = alloc_skb(local->hw.extra_tx_headroom +
377 sizeof(*mgmt) + /* bit too much but doesn't matter */
378 2 + assoc_data->ssid_len + /* SSID */
379 4 + rates_len + /* (extended) rates */
380 4 + /* power capability */
381 2 + 2 * sband->n_channels + /* supported channels */
382 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
383 assoc_data->ie_len + /* extra IEs */
384 9, /* WMM */
385 GFP_KERNEL);
297 if (!skb) 386 if (!skb)
298 return; 387 return;
299 388
300 skb_reserve(skb, local->hw.extra_tx_headroom); 389 skb_reserve(skb, local->hw.extra_tx_headroom);
301 390
391 capab = WLAN_CAPABILITY_ESS;
392
393 if (sband->band == IEEE80211_BAND_2GHZ) {
394 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
395 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
396 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
397 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
398 }
399
400 if (assoc_data->capability & WLAN_CAPABILITY_PRIVACY)
401 capab |= WLAN_CAPABILITY_PRIVACY;
402
403 if ((assoc_data->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
404 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
405 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
406
302 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 407 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
303 memset(mgmt, 0, 24); 408 memset(mgmt, 0, 24);
409 memcpy(mgmt->da, assoc_data->bss->bssid, ETH_ALEN);
410 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
411 memcpy(mgmt->bssid, assoc_data->bss->bssid, ETH_ALEN);
412
413 if (!is_zero_ether_addr(assoc_data->prev_bssid)) {
414 skb_put(skb, 10);
415 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
416 IEEE80211_STYPE_REASSOC_REQ);
417 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
418 mgmt->u.reassoc_req.listen_interval =
419 cpu_to_le16(local->hw.conf.listen_interval);
420 memcpy(mgmt->u.reassoc_req.current_ap, assoc_data->prev_bssid,
421 ETH_ALEN);
422 } else {
423 skb_put(skb, 4);
424 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
425 IEEE80211_STYPE_ASSOC_REQ);
426 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
427 mgmt->u.assoc_req.listen_interval =
428 cpu_to_le16(local->hw.conf.listen_interval);
429 }
430
431 /* SSID */
432 pos = skb_put(skb, 2 + assoc_data->ssid_len);
433 *pos++ = WLAN_EID_SSID;
434 *pos++ = assoc_data->ssid_len;
435 memcpy(pos, assoc_data->ssid, assoc_data->ssid_len);
436
437 /* add all rates which were marked to be used above */
438 supp_rates_len = rates_len;
439 if (supp_rates_len > 8)
440 supp_rates_len = 8;
441
442 pos = skb_put(skb, supp_rates_len + 2);
443 *pos++ = WLAN_EID_SUPP_RATES;
444 *pos++ = supp_rates_len;
445
446 count = 0;
447 for (i = 0; i < sband->n_bitrates; i++) {
448 if (BIT(i) & rates) {
449 int rate = sband->bitrates[i].bitrate;
450 *pos++ = (u8) (rate / 5);
451 if (++count == 8)
452 break;
453 }
454 }
455
456 if (rates_len > count) {
457 pos = skb_put(skb, rates_len - count + 2);
458 *pos++ = WLAN_EID_EXT_SUPP_RATES;
459 *pos++ = rates_len - count;
460
461 for (i++; i < sband->n_bitrates; i++) {
462 if (BIT(i) & rates) {
463 int rate = sband->bitrates[i].bitrate;
464 *pos++ = (u8) (rate / 5);
465 }
466 }
467 }
468
469 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
470 /* 1. power capabilities */
471 pos = skb_put(skb, 4);
472 *pos++ = WLAN_EID_PWR_CAPABILITY;
473 *pos++ = 2;
474 *pos++ = 0; /* min tx power */
475 *pos++ = local->oper_channel->max_power; /* max tx power */
476
477 /* 2. supported channels */
478 /* TODO: get this in reg domain format */
479 pos = skb_put(skb, 2 * sband->n_channels + 2);
480 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
481 *pos++ = 2 * sband->n_channels;
482 for (i = 0; i < sband->n_channels; i++) {
483 *pos++ = ieee80211_frequency_to_channel(
484 sband->channels[i].center_freq);
485 *pos++ = 1; /* one channel in the subband*/
486 }
487 }
488
489 /* if present, add any custom IEs that go before HT */
490 if (assoc_data->ie_len && assoc_data->ie) {
491 static const u8 before_ht[] = {
492 WLAN_EID_SSID,
493 WLAN_EID_SUPP_RATES,
494 WLAN_EID_EXT_SUPP_RATES,
495 WLAN_EID_PWR_CAPABILITY,
496 WLAN_EID_SUPPORTED_CHANNELS,
497 WLAN_EID_RSN,
498 WLAN_EID_QOS_CAPA,
499 WLAN_EID_RRM_ENABLED_CAPABILITIES,
500 WLAN_EID_MOBILITY_DOMAIN,
501 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
502 };
503 noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
504 before_ht, ARRAY_SIZE(before_ht),
505 offset);
506 pos = skb_put(skb, noffset - offset);
507 memcpy(pos, assoc_data->ie + offset, noffset - offset);
508 offset = noffset;
509 }
510
511 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
512 ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie,
513 sband, local->oper_channel, ifmgd->ap_smps);
514
515 /* if present, add any custom non-vendor IEs that go after HT */
516 if (assoc_data->ie_len && assoc_data->ie) {
517 noffset = ieee80211_ie_split_vendor(assoc_data->ie,
518 assoc_data->ie_len,
519 offset);
520 pos = skb_put(skb, noffset - offset);
521 memcpy(pos, assoc_data->ie + offset, noffset - offset);
522 offset = noffset;
523 }
524
525 if (assoc_data->wmm) {
526 if (assoc_data->uapsd) {
527 qos_info = ifmgd->uapsd_queues;
528 qos_info |= (ifmgd->uapsd_max_sp_len <<
529 IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
530 } else {
531 qos_info = 0;
532 }
533
534 pos = skb_put(skb, 9);
535 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
536 *pos++ = 7; /* len */
537 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
538 *pos++ = 0x50;
539 *pos++ = 0xf2;
540 *pos++ = 2; /* WME */
541 *pos++ = 0; /* WME info */
542 *pos++ = 1; /* WME ver */
543 *pos++ = qos_info;
544 }
545
546 /* add any remaining custom (i.e. vendor specific here) IEs */
547 if (assoc_data->ie_len && assoc_data->ie) {
548 noffset = assoc_data->ie_len;
549 pos = skb_put(skb, noffset - offset);
550 memcpy(pos, assoc_data->ie + offset, noffset - offset);
551 }
552
553 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
554 ieee80211_tx_skb(sdata, skb);
555}
556
557static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
558 const u8 *bssid, u16 stype,
559 u16 reason, bool send_frame,
560 u8 *frame_buf)
561{
562 struct ieee80211_local *local = sdata->local;
563 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
564 struct sk_buff *skb;
565 struct ieee80211_mgmt *mgmt = (void *)frame_buf;
566
567 /* build frame */
568 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
569 mgmt->duration = 0; /* initialize only */
570 mgmt->seq_ctrl = 0; /* initialize only */
304 memcpy(mgmt->da, bssid, ETH_ALEN); 571 memcpy(mgmt->da, bssid, ETH_ALEN);
305 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 572 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
306 memcpy(mgmt->bssid, bssid, ETH_ALEN); 573 memcpy(mgmt->bssid, bssid, ETH_ALEN);
307 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
308 skb_put(skb, 2);
309 /* u.deauth.reason_code == u.disassoc.reason_code */ 574 /* u.deauth.reason_code == u.disassoc.reason_code */
310 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 575 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
311 576
312 if (stype == IEEE80211_STYPE_DEAUTH) 577 if (send_frame) {
313 if (cookie) 578 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
314 __cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 579 DEAUTH_DISASSOC_LEN);
315 else 580 if (!skb)
316 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 581 return;
317 else 582
318 if (cookie) 583 skb_reserve(skb, local->hw.extra_tx_headroom);
319 __cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); 584
320 else 585 /* copy in frame */
321 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); 586 memcpy(skb_put(skb, DEAUTH_DISASSOC_LEN),
322 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) 587 mgmt, DEAUTH_DISASSOC_LEN);
323 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
324 588
325 if (send_frame) 589 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
590 IEEE80211_SKB_CB(skb)->flags |=
591 IEEE80211_TX_INTFL_DONT_ENCRYPT;
326 ieee80211_tx_skb(sdata, skb); 592 ieee80211_tx_skb(sdata, skb);
327 else 593 }
328 kfree_skb(skb);
329} 594}
330 595
331void ieee80211_send_pspoll(struct ieee80211_local *local, 596void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -547,7 +812,7 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
547 if (pwr_constr_elem_len != 1) 812 if (pwr_constr_elem_len != 1)
548 return; 813 return;
549 814
550 if ((*pwr_constr_elem <= conf->channel->max_power) && 815 if ((*pwr_constr_elem <= conf->channel->max_reg_power) &&
551 (*pwr_constr_elem != sdata->local->power_constr_level)) { 816 (*pwr_constr_elem != sdata->local->power_constr_level)) {
552 sdata->local->power_constr_level = *pwr_constr_elem; 817 sdata->local->power_constr_level = *pwr_constr_elem;
553 ieee80211_hw_config(sdata->local, 0); 818 ieee80211_hw_config(sdata->local, 0);
@@ -869,7 +1134,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
869 if (!local->ops->conf_tx) 1134 if (!local->ops->conf_tx)
870 return; 1135 return;
871 1136
872 if (local->hw.queues < 4) 1137 if (local->hw.queues < IEEE80211_NUM_ACS)
873 return; 1138 return;
874 1139
875 if (!wmm_param) 1140 if (!wmm_param)
@@ -879,7 +1144,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
879 return; 1144 return;
880 1145
881 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) 1146 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
882 uapsd_queues = local->uapsd_queues; 1147 uapsd_queues = ifmgd->uapsd_queues;
883 1148
884 count = wmm_param[6] & 0x0f; 1149 count = wmm_param[6] & 0x0f;
885 if (count == ifmgd->wmm_last_param_set) 1150 if (count == ifmgd->wmm_last_param_set)
@@ -953,7 +1218,6 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
953 1218
954 /* enable WMM or activate new settings */ 1219 /* enable WMM or activate new settings */
955 sdata->vif.bss_conf.qos = true; 1220 sdata->vif.bss_conf.qos = true;
956 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
957} 1221}
958 1222
959static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 1223static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
@@ -1006,7 +1270,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1006 bss_info_changed |= BSS_CHANGED_ASSOC; 1270 bss_info_changed |= BSS_CHANGED_ASSOC;
1007 /* set timing information */ 1271 /* set timing information */
1008 bss_conf->beacon_int = cbss->beacon_interval; 1272 bss_conf->beacon_int = cbss->beacon_interval;
1009 bss_conf->timestamp = cbss->tsf; 1273 bss_conf->last_tsf = cbss->tsf;
1010 1274
1011 bss_info_changed |= BSS_CHANGED_BEACON_INT; 1275 bss_info_changed |= BSS_CHANGED_BEACON_INT;
1012 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 1276 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
@@ -1032,18 +1296,9 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1032 bss_conf->dtim_period = 0; 1296 bss_conf->dtim_period = 0;
1033 1297
1034 bss_conf->assoc = 1; 1298 bss_conf->assoc = 1;
1035 /*
1036 * For now just always ask the driver to update the basic rateset
1037 * when we have associated, we aren't checking whether it actually
1038 * changed or not.
1039 */
1040 bss_info_changed |= BSS_CHANGED_BASIC_RATES;
1041
1042 /* And the BSSID changed - we're associated now */
1043 bss_info_changed |= BSS_CHANGED_BSSID;
1044 1299
1045 /* Tell the driver to monitor connection quality (if supported) */ 1300 /* Tell the driver to monitor connection quality (if supported) */
1046 if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && 1301 if (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI &&
1047 bss_conf->cqm_rssi_thold) 1302 bss_conf->cqm_rssi_thold)
1048 bss_info_changed |= BSS_CHANGED_CQM; 1303 bss_info_changed |= BSS_CHANGED_CQM;
1049 1304
@@ -1065,16 +1320,20 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1065} 1320}
1066 1321
1067static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, 1322static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1068 bool remove_sta, bool tx) 1323 u16 stype, u16 reason, bool tx,
1324 u8 *frame_buf)
1069{ 1325{
1070 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1326 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1071 struct ieee80211_local *local = sdata->local; 1327 struct ieee80211_local *local = sdata->local;
1072 struct sta_info *sta; 1328 struct sta_info *sta;
1073 u32 changed = 0, config_changed = 0; 1329 u32 changed = 0;
1074 u8 bssid[ETH_ALEN]; 1330 u8 bssid[ETH_ALEN];
1075 1331
1076 ASSERT_MGD_MTX(ifmgd); 1332 ASSERT_MGD_MTX(ifmgd);
1077 1333
1334 if (WARN_ON_ONCE(tx && !frame_buf))
1335 return;
1336
1078 if (WARN_ON(!ifmgd->associated)) 1337 if (WARN_ON(!ifmgd->associated))
1079 return; 1338 return;
1080 1339
@@ -1108,19 +1367,26 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1108 } 1367 }
1109 mutex_unlock(&local->sta_mtx); 1368 mutex_unlock(&local->sta_mtx);
1110 1369
1370 /* deauthenticate/disassociate now */
1371 if (tx || frame_buf)
1372 ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
1373 tx, frame_buf);
1374
1375 /* flush out frame */
1376 if (tx)
1377 drv_flush(local, false);
1378
1379 /* remove AP and TDLS peers */
1380 sta_info_flush(local, sdata);
1381
1382 /* finally reset all BSS / config parameters */
1111 changed |= ieee80211_reset_erp_info(sdata); 1383 changed |= ieee80211_reset_erp_info(sdata);
1112 1384
1113 ieee80211_led_assoc(local, 0); 1385 ieee80211_led_assoc(local, 0);
1114 changed |= BSS_CHANGED_ASSOC; 1386 changed |= BSS_CHANGED_ASSOC;
1115 sdata->vif.bss_conf.assoc = false; 1387 sdata->vif.bss_conf.assoc = false;
1116 1388
1117 ieee80211_set_wmm_default(sdata);
1118
1119 /* channel(_type) changes are handled by ieee80211_hw_config */
1120 WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
1121
1122 /* on the next assoc, re-program HT parameters */ 1389 /* on the next assoc, re-program HT parameters */
1123 sdata->ht_opmode_valid = false;
1124 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); 1390 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
1125 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); 1391 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
1126 1392
@@ -1131,25 +1397,29 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1131 1397
1132 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 1398 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1133 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 1399 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1134 config_changed |= IEEE80211_CONF_CHANGE_PS; 1400 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
1135 } 1401 }
1136 local->ps_sdata = NULL; 1402 local->ps_sdata = NULL;
1137 1403
1138 ieee80211_hw_config(local, config_changed);
1139
1140 /* Disable ARP filtering */ 1404 /* Disable ARP filtering */
1141 if (sdata->vif.bss_conf.arp_filter_enabled) { 1405 if (sdata->vif.bss_conf.arp_filter_enabled) {
1142 sdata->vif.bss_conf.arp_filter_enabled = false; 1406 sdata->vif.bss_conf.arp_filter_enabled = false;
1143 changed |= BSS_CHANGED_ARP_FILTER; 1407 changed |= BSS_CHANGED_ARP_FILTER;
1144 } 1408 }
1145 1409
1410 sdata->vif.bss_conf.qos = false;
1411 changed |= BSS_CHANGED_QOS;
1412
1146 /* The BSSID (not really interesting) and HT changed */ 1413 /* The BSSID (not really interesting) and HT changed */
1147 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; 1414 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
1148 ieee80211_bss_info_change_notify(sdata, changed); 1415 ieee80211_bss_info_change_notify(sdata, changed);
1149 1416
1150 /* remove AP and TDLS peers */ 1417 /* channel(_type) changes are handled by ieee80211_hw_config */
1151 if (remove_sta) 1418 WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
1152 sta_info_flush(local, sdata); 1419 ieee80211_hw_config(local, 0);
1420
1421 /* disassociated - set to defaults now */
1422 ieee80211_set_wmm_default(sdata, false);
1153 1423
1154 del_timer_sync(&sdata->u.mgd.conn_mon_timer); 1424 del_timer_sync(&sdata->u.mgd.conn_mon_timer);
1155 del_timer_sync(&sdata->u.mgd.bcn_mon_timer); 1425 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
@@ -1177,19 +1447,24 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1177static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) 1447static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1178{ 1448{
1179 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1449 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1450 struct ieee80211_local *local = sdata->local;
1180 1451
1452 mutex_lock(&local->mtx);
1181 if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 1453 if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
1182 IEEE80211_STA_CONNECTION_POLL))) 1454 IEEE80211_STA_CONNECTION_POLL))) {
1183 return; 1455 mutex_unlock(&local->mtx);
1456 return;
1457 }
1184 1458
1185 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1459 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
1186 IEEE80211_STA_BEACON_POLL); 1460 IEEE80211_STA_BEACON_POLL);
1187 mutex_lock(&sdata->local->iflist_mtx); 1461
1188 ieee80211_recalc_ps(sdata->local, -1); 1462 mutex_lock(&local->iflist_mtx);
1189 mutex_unlock(&sdata->local->iflist_mtx); 1463 ieee80211_recalc_ps(local, -1);
1464 mutex_unlock(&local->iflist_mtx);
1190 1465
1191 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) 1466 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
1192 return; 1467 goto out;
1193 1468
1194 /* 1469 /*
1195 * We've received a probe response, but are not sure whether 1470 * We've received a probe response, but are not sure whether
@@ -1201,6 +1476,9 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1201 mod_timer(&ifmgd->conn_mon_timer, 1476 mod_timer(&ifmgd->conn_mon_timer,
1202 round_jiffies_up(jiffies + 1477 round_jiffies_up(jiffies +
1203 IEEE80211_CONNECTION_IDLE_TIME)); 1478 IEEE80211_CONNECTION_IDLE_TIME));
1479out:
1480 ieee80211_run_deferred_scan(local);
1481 mutex_unlock(&local->mtx);
1204} 1482}
1205 1483
1206void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, 1484void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1244,18 +1522,28 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1244 * anymore. The timeout will be reset if the frame is ACKed by 1522 * anymore. The timeout will be reset if the frame is ACKed by
1245 * the AP. 1523 * the AP.
1246 */ 1524 */
1525 ifmgd->probe_send_count++;
1526
1247 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { 1527 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
1248 ifmgd->nullfunc_failed = false; 1528 ifmgd->nullfunc_failed = false;
1249 ieee80211_send_nullfunc(sdata->local, sdata, 0); 1529 ieee80211_send_nullfunc(sdata->local, sdata, 0);
1250 } else { 1530 } else {
1531 int ssid_len;
1532
1251 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1533 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1252 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0, 1534 if (WARN_ON_ONCE(ssid == NULL))
1253 (u32) -1, true, false); 1535 ssid_len = 0;
1536 else
1537 ssid_len = ssid[1];
1538
1539 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
1540 0, (u32) -1, true, false);
1254 } 1541 }
1255 1542
1256 ifmgd->probe_send_count++;
1257 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); 1543 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
1258 run_again(ifmgd, ifmgd->probe_timeout); 1544 run_again(ifmgd, ifmgd->probe_timeout);
1545 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
1546 drv_flush(sdata->local, false);
1259} 1547}
1260 1548
1261static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, 1549static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
@@ -1267,21 +1555,22 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1267 if (!ieee80211_sdata_running(sdata)) 1555 if (!ieee80211_sdata_running(sdata))
1268 return; 1556 return;
1269 1557
1270 if (sdata->local->scanning)
1271 return;
1272
1273 if (sdata->local->tmp_channel)
1274 return;
1275
1276 mutex_lock(&ifmgd->mtx); 1558 mutex_lock(&ifmgd->mtx);
1277 1559
1278 if (!ifmgd->associated) 1560 if (!ifmgd->associated)
1279 goto out; 1561 goto out;
1280 1562
1563 mutex_lock(&sdata->local->mtx);
1564
1565 if (sdata->local->tmp_channel || sdata->local->scanning) {
1566 mutex_unlock(&sdata->local->mtx);
1567 goto out;
1568 }
1569
1281#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1570#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1282 if (beacon && net_ratelimit()) 1571 if (beacon)
1283 printk(KERN_DEBUG "%s: detected beacon loss from AP " 1572 net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n",
1284 "- sending probe request\n", sdata->name); 1573 sdata->name);
1285#endif 1574#endif
1286 1575
1287 /* 1576 /*
@@ -1304,6 +1593,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1304 else 1593 else
1305 ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; 1594 ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
1306 1595
1596 mutex_unlock(&sdata->local->mtx);
1597
1307 if (already) 1598 if (already)
1308 goto out; 1599 goto out;
1309 1600
@@ -1324,6 +1615,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1324 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1615 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1325 struct sk_buff *skb; 1616 struct sk_buff *skb;
1326 const u8 *ssid; 1617 const u8 *ssid;
1618 int ssid_len;
1327 1619
1328 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 1620 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
1329 return NULL; 1621 return NULL;
@@ -1334,8 +1626,13 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1334 return NULL; 1626 return NULL;
1335 1627
1336 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1628 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1629 if (WARN_ON_ONCE(ssid == NULL))
1630 ssid_len = 0;
1631 else
1632 ssid_len = ssid[1];
1633
1337 skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid, 1634 skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
1338 (u32) -1, ssid + 2, ssid[1], 1635 (u32) -1, ssid + 2, ssid_len,
1339 NULL, 0, true); 1636 NULL, 0, true);
1340 1637
1341 return skb; 1638 return skb;
@@ -1347,6 +1644,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1347 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1644 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1348 struct ieee80211_local *local = sdata->local; 1645 struct ieee80211_local *local = sdata->local;
1349 u8 bssid[ETH_ALEN]; 1646 u8 bssid[ETH_ALEN];
1647 u8 frame_buf[DEAUTH_DISASSOC_LEN];
1350 1648
1351 mutex_lock(&ifmgd->mtx); 1649 mutex_lock(&ifmgd->mtx);
1352 if (!ifmgd->associated) { 1650 if (!ifmgd->associated) {
@@ -1359,17 +1657,16 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1359 printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n", 1657 printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
1360 sdata->name, bssid); 1658 sdata->name, bssid);
1361 1659
1362 ieee80211_set_disassoc(sdata, true, true); 1660 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1661 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1662 false, frame_buf);
1363 mutex_unlock(&ifmgd->mtx); 1663 mutex_unlock(&ifmgd->mtx);
1364 1664
1365 /* 1665 /*
1366 * must be outside lock due to cfg80211, 1666 * must be outside lock due to cfg80211,
1367 * but that's not a problem. 1667 * but that's not a problem.
1368 */ 1668 */
1369 ieee80211_send_deauth_disassoc(sdata, bssid, 1669 cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
1370 IEEE80211_STYPE_DEAUTH,
1371 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1372 NULL, true);
1373 1670
1374 mutex_lock(&local->mtx); 1671 mutex_lock(&local->mtx);
1375 ieee80211_recalc_idle(local); 1672 ieee80211_recalc_idle(local);
@@ -1423,6 +1720,126 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif)
1423EXPORT_SYMBOL(ieee80211_connection_loss); 1720EXPORT_SYMBOL(ieee80211_connection_loss);
1424 1721
1425 1722
1723static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
1724 bool assoc)
1725{
1726 struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
1727
1728 lockdep_assert_held(&sdata->u.mgd.mtx);
1729
1730 if (!assoc) {
1731 sta_info_destroy_addr(sdata, auth_data->bss->bssid);
1732
1733 memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
1734 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
1735 }
1736
1737 cfg80211_put_bss(auth_data->bss);
1738 kfree(auth_data);
1739 sdata->u.mgd.auth_data = NULL;
1740}
1741
1742static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1743 struct ieee80211_mgmt *mgmt, size_t len)
1744{
1745 struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
1746 u8 *pos;
1747 struct ieee802_11_elems elems;
1748
1749 pos = mgmt->u.auth.variable;
1750 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1751 if (!elems.challenge)
1752 return;
1753 auth_data->expected_transaction = 4;
1754 ieee80211_send_auth(sdata, 3, auth_data->algorithm,
1755 elems.challenge - 2, elems.challenge_len + 2,
1756 auth_data->bss->bssid, auth_data->bss->bssid,
1757 auth_data->key, auth_data->key_len,
1758 auth_data->key_idx);
1759}
1760
1761static enum rx_mgmt_action __must_check
1762ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1763 struct ieee80211_mgmt *mgmt, size_t len)
1764{
1765 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1766 u8 bssid[ETH_ALEN];
1767 u16 auth_alg, auth_transaction, status_code;
1768 struct sta_info *sta;
1769
1770 lockdep_assert_held(&ifmgd->mtx);
1771
1772 if (len < 24 + 6)
1773 return RX_MGMT_NONE;
1774
1775 if (!ifmgd->auth_data || ifmgd->auth_data->done)
1776 return RX_MGMT_NONE;
1777
1778 memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
1779
1780 if (!ether_addr_equal(bssid, mgmt->bssid))
1781 return RX_MGMT_NONE;
1782
1783 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1784 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1785 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1786
1787 if (auth_alg != ifmgd->auth_data->algorithm ||
1788 auth_transaction != ifmgd->auth_data->expected_transaction)
1789 return RX_MGMT_NONE;
1790
1791 if (status_code != WLAN_STATUS_SUCCESS) {
1792 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
1793 sdata->name, mgmt->sa, status_code);
1794 goto out;
1795 }
1796
1797 switch (ifmgd->auth_data->algorithm) {
1798 case WLAN_AUTH_OPEN:
1799 case WLAN_AUTH_LEAP:
1800 case WLAN_AUTH_FT:
1801 break;
1802 case WLAN_AUTH_SHARED_KEY:
1803 if (ifmgd->auth_data->expected_transaction != 4) {
1804 ieee80211_auth_challenge(sdata, mgmt, len);
1805 /* need another frame */
1806 return RX_MGMT_NONE;
1807 }
1808 break;
1809 default:
1810 WARN_ONCE(1, "invalid auth alg %d",
1811 ifmgd->auth_data->algorithm);
1812 return RX_MGMT_NONE;
1813 }
1814
1815 printk(KERN_DEBUG "%s: authenticated\n", sdata->name);
1816 out:
1817 ifmgd->auth_data->done = true;
1818 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
1819 run_again(ifmgd, ifmgd->auth_data->timeout);
1820
1821 /* move station state to auth */
1822 mutex_lock(&sdata->local->sta_mtx);
1823 sta = sta_info_get(sdata, bssid);
1824 if (!sta) {
1825 WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
1826 goto out_err;
1827 }
1828 if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
1829 printk(KERN_DEBUG "%s: failed moving %pM to auth\n",
1830 sdata->name, bssid);
1831 goto out_err;
1832 }
1833 mutex_unlock(&sdata->local->sta_mtx);
1834
1835 return RX_MGMT_CFG80211_RX_AUTH;
1836 out_err:
1837 mutex_unlock(&sdata->local->sta_mtx);
1838 /* ignore frame -- wait for timeout */
1839 return RX_MGMT_NONE;
1840}
1841
1842
1426static enum rx_mgmt_action __must_check 1843static enum rx_mgmt_action __must_check
1427ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, 1844ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1428 struct ieee80211_mgmt *mgmt, size_t len) 1845 struct ieee80211_mgmt *mgmt, size_t len)
@@ -1431,10 +1848,14 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1431 const u8 *bssid = NULL; 1848 const u8 *bssid = NULL;
1432 u16 reason_code; 1849 u16 reason_code;
1433 1850
1851 lockdep_assert_held(&ifmgd->mtx);
1852
1434 if (len < 24 + 2) 1853 if (len < 24 + 2)
1435 return RX_MGMT_NONE; 1854 return RX_MGMT_NONE;
1436 1855
1437 ASSERT_MGD_MTX(ifmgd); 1856 if (!ifmgd->associated ||
1857 !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
1858 return RX_MGMT_NONE;
1438 1859
1439 bssid = ifmgd->associated->bssid; 1860 bssid = ifmgd->associated->bssid;
1440 1861
@@ -1443,7 +1864,8 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1443 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 1864 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
1444 sdata->name, bssid, reason_code); 1865 sdata->name, bssid, reason_code);
1445 1866
1446 ieee80211_set_disassoc(sdata, true, false); 1867 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1868
1447 mutex_lock(&sdata->local->mtx); 1869 mutex_lock(&sdata->local->mtx);
1448 ieee80211_recalc_idle(sdata->local); 1870 ieee80211_recalc_idle(sdata->local);
1449 mutex_unlock(&sdata->local->mtx); 1871 mutex_unlock(&sdata->local->mtx);
@@ -1459,15 +1881,13 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1459 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1881 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1460 u16 reason_code; 1882 u16 reason_code;
1461 1883
1462 if (len < 24 + 2) 1884 lockdep_assert_held(&ifmgd->mtx);
1463 return RX_MGMT_NONE;
1464
1465 ASSERT_MGD_MTX(ifmgd);
1466 1885
1467 if (WARN_ON(!ifmgd->associated)) 1886 if (len < 24 + 2)
1468 return RX_MGMT_NONE; 1887 return RX_MGMT_NONE;
1469 1888
1470 if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN))) 1889 if (!ifmgd->associated ||
1890 !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
1471 return RX_MGMT_NONE; 1891 return RX_MGMT_NONE;
1472 1892
1473 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1893 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -1475,10 +1895,12 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1475 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 1895 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
1476 sdata->name, mgmt->sa, reason_code); 1896 sdata->name, mgmt->sa, reason_code);
1477 1897
1478 ieee80211_set_disassoc(sdata, true, false); 1898 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1899
1479 mutex_lock(&sdata->local->mtx); 1900 mutex_lock(&sdata->local->mtx);
1480 ieee80211_recalc_idle(sdata->local); 1901 ieee80211_recalc_idle(sdata->local);
1481 mutex_unlock(&sdata->local->mtx); 1902 mutex_unlock(&sdata->local->mtx);
1903
1482 return RX_MGMT_CFG80211_DISASSOC; 1904 return RX_MGMT_CFG80211_DISASSOC;
1483} 1905}
1484 1906
@@ -1524,25 +1946,38 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
1524 } 1946 }
1525} 1947}
1526 1948
1527static bool ieee80211_assoc_success(struct ieee80211_work *wk, 1949static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
1950 bool assoc)
1951{
1952 struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
1953
1954 lockdep_assert_held(&sdata->u.mgd.mtx);
1955
1956 if (!assoc) {
1957 sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
1958
1959 memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
1960 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
1961 }
1962
1963 kfree(assoc_data);
1964 sdata->u.mgd.assoc_data = NULL;
1965}
1966
1967static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
1968 struct cfg80211_bss *cbss,
1528 struct ieee80211_mgmt *mgmt, size_t len) 1969 struct ieee80211_mgmt *mgmt, size_t len)
1529{ 1970{
1530 struct ieee80211_sub_if_data *sdata = wk->sdata;
1531 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1971 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1532 struct ieee80211_local *local = sdata->local; 1972 struct ieee80211_local *local = sdata->local;
1533 struct ieee80211_supported_band *sband; 1973 struct ieee80211_supported_band *sband;
1534 struct sta_info *sta; 1974 struct sta_info *sta;
1535 struct cfg80211_bss *cbss = wk->assoc.bss;
1536 u8 *pos; 1975 u8 *pos;
1537 u32 rates, basic_rates;
1538 u16 capab_info, aid; 1976 u16 capab_info, aid;
1539 struct ieee802_11_elems elems; 1977 struct ieee802_11_elems elems;
1540 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 1978 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1541 u32 changed = 0; 1979 u32 changed = 0;
1542 int err; 1980 int err;
1543 bool have_higher_than_11mbit = false;
1544 u16 ap_ht_cap_flags;
1545 int min_rate = INT_MAX, min_rate_index = -1;
1546 1981
1547 /* AssocResp and ReassocResp have identical structure */ 1982 /* AssocResp and ReassocResp have identical structure */
1548 1983
@@ -1581,55 +2016,20 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1581 * station info was already allocated and inserted before 2016 * station info was already allocated and inserted before
1582 * the association and should be available to us 2017 * the association and should be available to us
1583 */ 2018 */
1584 sta = sta_info_get_rx(sdata, cbss->bssid); 2019 sta = sta_info_get(sdata, cbss->bssid);
1585 if (WARN_ON(!sta)) { 2020 if (WARN_ON(!sta)) {
1586 mutex_unlock(&sdata->local->sta_mtx); 2021 mutex_unlock(&sdata->local->sta_mtx);
1587 return false; 2022 return false;
1588 } 2023 }
1589 2024
1590 sta_info_move_state(sta, IEEE80211_STA_AUTH); 2025 sband = local->hw.wiphy->bands[local->oper_channel->band];
1591 sta_info_move_state(sta, IEEE80211_STA_ASSOC);
1592 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1593 sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
1594
1595 rates = 0;
1596 basic_rates = 0;
1597 sband = local->hw.wiphy->bands[wk->chan->band];
1598
1599 ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len,
1600 &rates, &basic_rates, &have_higher_than_11mbit,
1601 &min_rate, &min_rate_index);
1602
1603 ieee80211_get_rates(sband, elems.ext_supp_rates,
1604 elems.ext_supp_rates_len, &rates, &basic_rates,
1605 &have_higher_than_11mbit,
1606 &min_rate, &min_rate_index);
1607
1608 /*
1609 * some buggy APs don't advertise basic_rates. use the lowest
1610 * supported rate instead.
1611 */
1612 if (unlikely(!basic_rates) && min_rate_index >= 0) {
1613 printk(KERN_DEBUG "%s: No basic rates in AssocResp. "
1614 "Using min supported rate instead.\n", sdata->name);
1615 basic_rates = BIT(min_rate_index);
1616 }
1617
1618 sta->sta.supp_rates[wk->chan->band] = rates;
1619 sdata->vif.bss_conf.basic_rates = basic_rates;
1620
1621 /* cf. IEEE 802.11 9.2.12 */
1622 if (wk->chan->band == IEEE80211_BAND_2GHZ &&
1623 have_higher_than_11mbit)
1624 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
1625 else
1626 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
1627 2026
1628 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 2027 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1629 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2028 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
1630 elems.ht_cap_elem, &sta->sta.ht_cap); 2029 elems.ht_cap_elem, &sta->sta.ht_cap);
1631 2030
1632 ap_ht_cap_flags = sta->sta.ht_cap.cap; 2031 sta->supports_40mhz =
2032 sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1633 2033
1634 rate_control_rate_init(sta); 2034 rate_control_rate_init(sta);
1635 2035
@@ -1639,15 +2039,22 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1639 if (elems.wmm_param) 2039 if (elems.wmm_param)
1640 set_sta_flag(sta, WLAN_STA_WME); 2040 set_sta_flag(sta, WLAN_STA_WME);
1641 2041
1642 /* sta_info_reinsert will also unlock the mutex lock */ 2042 err = sta_info_move_state(sta, IEEE80211_STA_AUTH);
1643 err = sta_info_reinsert(sta); 2043 if (!err)
1644 sta = NULL; 2044 err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
2045 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
2046 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
1645 if (err) { 2047 if (err) {
1646 printk(KERN_DEBUG "%s: failed to insert STA entry for" 2048 printk(KERN_DEBUG
1647 " the AP (error %d)\n", sdata->name, err); 2049 "%s: failed to move station %pM to desired state\n",
2050 sdata->name, sta->sta.addr);
2051 WARN_ON(__sta_info_destroy(sta));
2052 mutex_unlock(&sdata->local->sta_mtx);
1648 return false; 2053 return false;
1649 } 2054 }
1650 2055
2056 mutex_unlock(&sdata->local->sta_mtx);
2057
1651 /* 2058 /*
1652 * Always handle WMM once after association regardless 2059 * Always handle WMM once after association regardless
1653 * of the first value the AP uses. Setting -1 here has 2060 * of the first value the AP uses. Setting -1 here has
@@ -1660,16 +2067,13 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1660 ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, 2067 ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
1661 elems.wmm_param_len); 2068 elems.wmm_param_len);
1662 else 2069 else
1663 ieee80211_set_wmm_default(sdata); 2070 ieee80211_set_wmm_default(sdata, false);
1664 2071 changed |= BSS_CHANGED_QOS;
1665 local->oper_channel = wk->chan;
1666 2072
1667 if (elems.ht_info_elem && elems.wmm_param && 2073 if (elems.ht_operation && elems.wmm_param &&
1668 (sdata->local->hw.queues >= 4) &&
1669 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 2074 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1670 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 2075 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
1671 cbss->bssid, ap_ht_cap_flags, 2076 cbss->bssid, false);
1672 false);
1673 2077
1674 /* set AID and assoc capability, 2078 /* set AID and assoc capability,
1675 * ieee80211_set_associated() will tell the driver */ 2079 * ieee80211_set_associated() will tell the driver */
@@ -1694,7 +2098,88 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1694 return true; 2098 return true;
1695} 2099}
1696 2100
2101static enum rx_mgmt_action __must_check
2102ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2103 struct ieee80211_mgmt *mgmt, size_t len,
2104 struct cfg80211_bss **bss)
2105{
2106 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2107 struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
2108 u16 capab_info, status_code, aid;
2109 struct ieee802_11_elems elems;
2110 u8 *pos;
2111 bool reassoc;
2112
2113 lockdep_assert_held(&ifmgd->mtx);
2114
2115 if (!assoc_data)
2116 return RX_MGMT_NONE;
2117 if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid))
2118 return RX_MGMT_NONE;
2119
2120 /*
2121 * AssocResp and ReassocResp have identical structure, so process both
2122 * of them in this function.
2123 */
2124
2125 if (len < 24 + 6)
2126 return RX_MGMT_NONE;
2127
2128 reassoc = ieee80211_is_reassoc_req(mgmt->frame_control);
2129 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
2130 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2131 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
2132
2133 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
2134 "status=%d aid=%d)\n",
2135 sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
2136 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
2137
2138 pos = mgmt->u.assoc_resp.variable;
2139 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
2140
2141 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
2142 elems.timeout_int && elems.timeout_int_len == 5 &&
2143 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
2144 u32 tu, ms;
2145 tu = get_unaligned_le32(elems.timeout_int + 1);
2146 ms = tu * 1024 / 1000;
2147 printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
2148 "comeback duration %u TU (%u ms)\n",
2149 sdata->name, mgmt->sa, tu, ms);
2150 assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
2151 if (ms > IEEE80211_ASSOC_TIMEOUT)
2152 run_again(ifmgd, assoc_data->timeout);
2153 return RX_MGMT_NONE;
2154 }
2155
2156 *bss = assoc_data->bss;
2157
2158 if (status_code != WLAN_STATUS_SUCCESS) {
2159 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
2160 sdata->name, mgmt->sa, status_code);
2161 ieee80211_destroy_assoc_data(sdata, false);
2162 } else {
2163 printk(KERN_DEBUG "%s: associated\n", sdata->name);
2164
2165 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
2166 /* oops -- internal error -- send timeout for now */
2167 ieee80211_destroy_assoc_data(sdata, true);
2168 sta_info_destroy_addr(sdata, mgmt->bssid);
2169 cfg80211_put_bss(*bss);
2170 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
2171 }
1697 2172
2173 /*
2174 * destroy assoc_data afterwards, as otherwise an idle
2175 * recalc after assoc_data is NULL but before associated
2176 * is set can cause the interface to go idle
2177 */
2178 ieee80211_destroy_assoc_data(sdata, true);
2179 }
2180
2181 return RX_MGMT_CFG80211_RX_ASSOC;
2182}
1698static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 2183static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1699 struct ieee80211_mgmt *mgmt, 2184 struct ieee80211_mgmt *mgmt,
1700 size_t len, 2185 size_t len,
@@ -1708,7 +2193,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1708 struct ieee80211_channel *channel; 2193 struct ieee80211_channel *channel;
1709 bool need_ps = false; 2194 bool need_ps = false;
1710 2195
1711 if (sdata->u.mgd.associated) { 2196 if (sdata->u.mgd.associated &&
2197 ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) {
1712 bss = (void *)sdata->u.mgd.associated->priv; 2198 bss = (void *)sdata->u.mgd.associated->priv;
1713 /* not previously set so we may need to recalc */ 2199 /* not previously set so we may need to recalc */
1714 need_ps = !bss->dtim_period; 2200 need_ps = !bss->dtim_period;
@@ -1763,7 +2249,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1763 2249
1764 ASSERT_MGD_MTX(ifmgd); 2250 ASSERT_MGD_MTX(ifmgd);
1765 2251
1766 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN)) 2252 if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
1767 return; /* ignore ProbeResp to foreign address */ 2253 return; /* ignore ProbeResp to foreign address */
1768 2254
1769 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 2255 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -1776,8 +2262,17 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1776 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 2262 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1777 2263
1778 if (ifmgd->associated && 2264 if (ifmgd->associated &&
1779 memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0) 2265 ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
1780 ieee80211_reset_ap_probe(sdata); 2266 ieee80211_reset_ap_probe(sdata);
2267
2268 if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
2269 ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
2270 /* got probe response, continue with auth */
2271 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
2272 ifmgd->auth_data->tries = 0;
2273 ifmgd->auth_data->timeout = jiffies;
2274 run_again(ifmgd, ifmgd->auth_data->timeout);
2275 }
1781} 2276}
1782 2277
1783/* 2278/*
@@ -1799,7 +2294,7 @@ static const u64 care_about_ies =
1799 (1ULL << WLAN_EID_CHANNEL_SWITCH) | 2294 (1ULL << WLAN_EID_CHANNEL_SWITCH) |
1800 (1ULL << WLAN_EID_PWR_CONSTRAINT) | 2295 (1ULL << WLAN_EID_PWR_CONSTRAINT) |
1801 (1ULL << WLAN_EID_HT_CAPABILITY) | 2296 (1ULL << WLAN_EID_HT_CAPABILITY) |
1802 (1ULL << WLAN_EID_HT_INFORMATION); 2297 (1ULL << WLAN_EID_HT_OPERATION);
1803 2298
1804static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 2299static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1805 struct ieee80211_mgmt *mgmt, 2300 struct ieee80211_mgmt *mgmt,
@@ -1817,7 +2312,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1817 u32 ncrc; 2312 u32 ncrc;
1818 u8 *bssid; 2313 u8 *bssid;
1819 2314
1820 ASSERT_MGD_MTX(ifmgd); 2315 lockdep_assert_held(&ifmgd->mtx);
1821 2316
1822 /* Process beacon from the current BSS */ 2317 /* Process beacon from the current BSS */
1823 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; 2318 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
@@ -1827,21 +2322,25 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1827 if (rx_status->freq != local->hw.conf.channel->center_freq) 2322 if (rx_status->freq != local->hw.conf.channel->center_freq)
1828 return; 2323 return;
1829 2324
1830 /* 2325 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
1831 * We might have received a number of frames, among them a 2326 ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
1832 * disassoc frame and a beacon... 2327 ieee802_11_parse_elems(mgmt->u.beacon.variable,
1833 */ 2328 len - baselen, &elems);
1834 if (!ifmgd->associated)
1835 return;
1836 2329
1837 bssid = ifmgd->associated->bssid; 2330 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
2331 false);
2332 ifmgd->assoc_data->have_beacon = true;
2333 ifmgd->assoc_data->sent_assoc = false;
2334 /* continue assoc process */
2335 ifmgd->assoc_data->timeout = jiffies;
2336 run_again(ifmgd, ifmgd->assoc_data->timeout);
2337 return;
2338 }
1838 2339
1839 /* 2340 if (!ifmgd->associated ||
1840 * And in theory even frames from a different AP we were just 2341 !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
1841 * associated to a split-second ago!
1842 */
1843 if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0)
1844 return; 2342 return;
2343 bssid = ifmgd->associated->bssid;
1845 2344
1846 /* Track average RSSI from the Beacon frames of the current AP */ 2345 /* Track average RSSI from the Beacon frames of the current AP */
1847 ifmgd->last_beacon_signal = rx_status->signal; 2346 ifmgd->last_beacon_signal = rx_status->signal;
@@ -1882,7 +2381,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1882 2381
1883 if (bss_conf->cqm_rssi_thold && 2382 if (bss_conf->cqm_rssi_thold &&
1884 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT && 2383 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
1885 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { 2384 !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) {
1886 int sig = ifmgd->ave_beacon_signal / 16; 2385 int sig = ifmgd->ave_beacon_signal / 16;
1887 int last_event = ifmgd->last_cqm_event_signal; 2386 int last_event = ifmgd->last_cqm_event_signal;
1888 int thold = bss_conf->cqm_rssi_thold; 2387 int thold = bss_conf->cqm_rssi_thold;
@@ -1906,10 +2405,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1906 2405
1907 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { 2406 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
1908#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2407#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1909 if (net_ratelimit()) { 2408 net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
1910 printk(KERN_DEBUG "%s: cancelling probereq poll due " 2409 sdata->name);
1911 "to a received beacon\n", sdata->name);
1912 }
1913#endif 2410#endif
1914 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 2411 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
1915 mutex_lock(&local->iflist_mtx); 2412 mutex_lock(&local->iflist_mtx);
@@ -1943,11 +2440,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1943 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { 2440 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
1944 if (directed_tim) { 2441 if (directed_tim) {
1945 if (local->hw.conf.dynamic_ps_timeout > 0) { 2442 if (local->hw.conf.dynamic_ps_timeout > 0) {
1946 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 2443 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1947 ieee80211_hw_config(local, 2444 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1948 IEEE80211_CONF_CHANGE_PS); 2445 ieee80211_hw_config(local,
2446 IEEE80211_CONF_CHANGE_PS);
2447 }
1949 ieee80211_send_nullfunc(local, sdata, 0); 2448 ieee80211_send_nullfunc(local, sdata, 0);
1950 } else { 2449 } else if (!local->pspolling && sdata->u.mgd.powersave) {
1951 local->pspolling = true; 2450 local->pspolling = true;
1952 2451
1953 /* 2452 /*
@@ -1979,31 +2478,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1979 erp_valid, erp_value); 2478 erp_valid, erp_value);
1980 2479
1981 2480
1982 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && 2481 if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param &&
1983 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { 2482 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
1984 struct sta_info *sta;
1985 struct ieee80211_supported_band *sband; 2483 struct ieee80211_supported_band *sband;
1986 u16 ap_ht_cap_flags;
1987
1988 rcu_read_lock();
1989
1990 sta = sta_info_get(sdata, bssid);
1991 if (WARN_ON(!sta)) {
1992 rcu_read_unlock();
1993 return;
1994 }
1995 2484
1996 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2485 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1997 2486
1998 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2487 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
1999 elems.ht_cap_elem, &sta->sta.ht_cap); 2488 bssid, true);
2000
2001 ap_ht_cap_flags = sta->sta.ht_cap.cap;
2002
2003 rcu_read_unlock();
2004
2005 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
2006 bssid, ap_ht_cap_flags, true);
2007 } 2489 }
2008 2490
2009 /* Note: country IE parsing is done for us by cfg80211 */ 2491 /* Note: country IE parsing is done for us by cfg80211 */
@@ -2025,6 +2507,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2025 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2507 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2026 struct ieee80211_rx_status *rx_status; 2508 struct ieee80211_rx_status *rx_status;
2027 struct ieee80211_mgmt *mgmt; 2509 struct ieee80211_mgmt *mgmt;
2510 struct cfg80211_bss *bss = NULL;
2028 enum rx_mgmt_action rma = RX_MGMT_NONE; 2511 enum rx_mgmt_action rma = RX_MGMT_NONE;
2029 u16 fc; 2512 u16 fc;
2030 2513
@@ -2034,92 +2517,59 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2034 2517
2035 mutex_lock(&ifmgd->mtx); 2518 mutex_lock(&ifmgd->mtx);
2036 2519
2037 if (ifmgd->associated && 2520 switch (fc & IEEE80211_FCTL_STYPE) {
2038 memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) { 2521 case IEEE80211_STYPE_BEACON:
2039 switch (fc & IEEE80211_FCTL_STYPE) { 2522 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
2040 case IEEE80211_STYPE_BEACON: 2523 break;
2041 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, 2524 case IEEE80211_STYPE_PROBE_RESP:
2042 rx_status); 2525 ieee80211_rx_mgmt_probe_resp(sdata, skb);
2043 break; 2526 break;
2044 case IEEE80211_STYPE_PROBE_RESP: 2527 case IEEE80211_STYPE_AUTH:
2045 ieee80211_rx_mgmt_probe_resp(sdata, skb); 2528 rma = ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len);
2046 break; 2529 break;
2047 case IEEE80211_STYPE_DEAUTH: 2530 case IEEE80211_STYPE_DEAUTH:
2048 rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); 2531 rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
2049 break; 2532 break;
2050 case IEEE80211_STYPE_DISASSOC: 2533 case IEEE80211_STYPE_DISASSOC:
2051 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 2534 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
2052 break; 2535 break;
2053 case IEEE80211_STYPE_ACTION: 2536 case IEEE80211_STYPE_ASSOC_RESP:
2054 switch (mgmt->u.action.category) { 2537 case IEEE80211_STYPE_REASSOC_RESP:
2055 case WLAN_CATEGORY_SPECTRUM_MGMT: 2538 rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss);
2056 ieee80211_sta_process_chanswitch(sdata, 2539 break;
2057 &mgmt->u.action.u.chan_switch.sw_elem, 2540 case IEEE80211_STYPE_ACTION:
2058 (void *)ifmgd->associated->priv, 2541 switch (mgmt->u.action.category) {
2059 rx_status->mactime); 2542 case WLAN_CATEGORY_SPECTRUM_MGMT:
2060 break; 2543 ieee80211_sta_process_chanswitch(sdata,
2061 } 2544 &mgmt->u.action.u.chan_switch.sw_elem,
2062 } 2545 (void *)ifmgd->associated->priv,
2063 mutex_unlock(&ifmgd->mtx); 2546 rx_status->mactime);
2064
2065 switch (rma) {
2066 case RX_MGMT_NONE:
2067 /* no action */
2068 break;
2069 case RX_MGMT_CFG80211_DEAUTH:
2070 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
2071 break;
2072 case RX_MGMT_CFG80211_DISASSOC:
2073 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
2074 break; 2547 break;
2075 default:
2076 WARN(1, "unexpected: %d", rma);
2077 } 2548 }
2078 return;
2079 } 2549 }
2080
2081 mutex_unlock(&ifmgd->mtx); 2550 mutex_unlock(&ifmgd->mtx);
2082 2551
2083 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && 2552 switch (rma) {
2084 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { 2553 case RX_MGMT_NONE:
2085 struct ieee80211_local *local = sdata->local; 2554 /* no action */
2086 struct ieee80211_work *wk; 2555 break;
2087 2556 case RX_MGMT_CFG80211_DEAUTH:
2088 mutex_lock(&local->mtx);
2089 list_for_each_entry(wk, &local->work_list, list) {
2090 if (wk->sdata != sdata)
2091 continue;
2092
2093 if (wk->type != IEEE80211_WORK_ASSOC &&
2094 wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
2095 continue;
2096
2097 if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
2098 continue;
2099 if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
2100 continue;
2101
2102 /*
2103 * Printing the message only here means we can't
2104 * spuriously print it, but it also means that it
2105 * won't be printed when the frame comes in before
2106 * we even tried to associate or in similar cases.
2107 *
2108 * Ultimately, I suspect cfg80211 should print the
2109 * messages instead.
2110 */
2111 printk(KERN_DEBUG
2112 "%s: deauthenticated from %pM (Reason: %u)\n",
2113 sdata->name, mgmt->bssid,
2114 le16_to_cpu(mgmt->u.deauth.reason_code));
2115
2116 list_del_rcu(&wk->list);
2117 free_work(wk);
2118 break;
2119 }
2120 mutex_unlock(&local->mtx);
2121
2122 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 2557 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
2558 break;
2559 case RX_MGMT_CFG80211_DISASSOC:
2560 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
2561 break;
2562 case RX_MGMT_CFG80211_RX_AUTH:
2563 cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, skb->len);
2564 break;
2565 case RX_MGMT_CFG80211_RX_ASSOC:
2566 cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, skb->len);
2567 break;
2568 case RX_MGMT_CFG80211_ASSOC_TIMEOUT:
2569 cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid);
2570 break;
2571 default:
2572 WARN(1, "unexpected: %d", rma);
2123 } 2573 }
2124} 2574}
2125 2575
@@ -2143,19 +2593,20 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2143{ 2593{
2144 struct ieee80211_local *local = sdata->local; 2594 struct ieee80211_local *local = sdata->local;
2145 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2595 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2596 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2146 2597
2147 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 2598 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
2148 IEEE80211_STA_BEACON_POLL); 2599 IEEE80211_STA_BEACON_POLL);
2149 2600
2150 ieee80211_set_disassoc(sdata, true, true); 2601 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2602 false, frame_buf);
2151 mutex_unlock(&ifmgd->mtx); 2603 mutex_unlock(&ifmgd->mtx);
2604
2152 /* 2605 /*
2153 * must be outside lock due to cfg80211, 2606 * must be outside lock due to cfg80211,
2154 * but that's not a problem. 2607 * but that's not a problem.
2155 */ 2608 */
2156 ieee80211_send_deauth_disassoc(sdata, bssid, 2609 cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
2157 IEEE80211_STYPE_DEAUTH, reason,
2158 NULL, true);
2159 2610
2160 mutex_lock(&local->mtx); 2611 mutex_lock(&local->mtx);
2161 ieee80211_recalc_idle(local); 2612 ieee80211_recalc_idle(local);
@@ -2164,14 +2615,144 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2164 mutex_lock(&ifmgd->mtx); 2615 mutex_lock(&ifmgd->mtx);
2165} 2616}
2166 2617
2618static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2619{
2620 struct ieee80211_local *local = sdata->local;
2621 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2622 struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
2623
2624 lockdep_assert_held(&ifmgd->mtx);
2625
2626 if (WARN_ON_ONCE(!auth_data))
2627 return -EINVAL;
2628
2629 auth_data->tries++;
2630
2631 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
2632 printk(KERN_DEBUG "%s: authentication with %pM timed out\n",
2633 sdata->name, auth_data->bss->bssid);
2634
2635 /*
2636 * Most likely AP is not in the range so remove the
2637 * bss struct for that AP.
2638 */
2639 cfg80211_unlink_bss(local->hw.wiphy, auth_data->bss);
2640
2641 return -ETIMEDOUT;
2642 }
2643
2644 if (auth_data->bss->proberesp_ies) {
2645 printk(KERN_DEBUG "%s: send auth to %pM (try %d/%d)\n",
2646 sdata->name, auth_data->bss->bssid, auth_data->tries,
2647 IEEE80211_AUTH_MAX_TRIES);
2648
2649 auth_data->expected_transaction = 2;
2650 ieee80211_send_auth(sdata, 1, auth_data->algorithm,
2651 auth_data->ie, auth_data->ie_len,
2652 auth_data->bss->bssid,
2653 auth_data->bss->bssid, NULL, 0, 0);
2654 } else {
2655 const u8 *ssidie;
2656
2657 printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
2658 sdata->name, auth_data->bss->bssid, auth_data->tries,
2659 IEEE80211_AUTH_MAX_TRIES);
2660
2661 ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
2662 if (!ssidie)
2663 return -EINVAL;
2664 /*
2665 * Direct probe is sent to broadcast address as some APs
2666 * will not answer to direct packet in unassociated state.
2667 */
2668 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
2669 NULL, 0, (u32) -1, true, false);
2670 }
2671
2672 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
2673 run_again(ifmgd, auth_data->timeout);
2674
2675 return 0;
2676}
2677
2678static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2679{
2680 struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
2681 struct ieee80211_local *local = sdata->local;
2682
2683 lockdep_assert_held(&sdata->u.mgd.mtx);
2684
2685 assoc_data->tries++;
2686 if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
2687 printk(KERN_DEBUG "%s: association with %pM timed out\n",
2688 sdata->name, assoc_data->bss->bssid);
2689
2690 /*
2691 * Most likely AP is not in the range so remove the
2692 * bss struct for that AP.
2693 */
2694 cfg80211_unlink_bss(local->hw.wiphy, assoc_data->bss);
2695
2696 return -ETIMEDOUT;
2697 }
2698
2699 printk(KERN_DEBUG "%s: associate with %pM (try %d/%d)\n",
2700 sdata->name, assoc_data->bss->bssid, assoc_data->tries,
2701 IEEE80211_ASSOC_MAX_TRIES);
2702 ieee80211_send_assoc(sdata);
2703
2704 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
2705 run_again(&sdata->u.mgd, assoc_data->timeout);
2706
2707 return 0;
2708}
2709
2167void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) 2710void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2168{ 2711{
2169 struct ieee80211_local *local = sdata->local; 2712 struct ieee80211_local *local = sdata->local;
2170 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2713 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2171 2714
2172 /* then process the rest of the work */
2173 mutex_lock(&ifmgd->mtx); 2715 mutex_lock(&ifmgd->mtx);
2174 2716
2717 if (ifmgd->auth_data &&
2718 time_after(jiffies, ifmgd->auth_data->timeout)) {
2719 if (ifmgd->auth_data->done) {
2720 /*
2721 * ok ... we waited for assoc but userspace didn't,
2722 * so let's just kill the auth data
2723 */
2724 ieee80211_destroy_auth_data(sdata, false);
2725 } else if (ieee80211_probe_auth(sdata)) {
2726 u8 bssid[ETH_ALEN];
2727
2728 memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
2729
2730 ieee80211_destroy_auth_data(sdata, false);
2731
2732 mutex_unlock(&ifmgd->mtx);
2733 cfg80211_send_auth_timeout(sdata->dev, bssid);
2734 mutex_lock(&ifmgd->mtx);
2735 }
2736 } else if (ifmgd->auth_data)
2737 run_again(ifmgd, ifmgd->auth_data->timeout);
2738
2739 if (ifmgd->assoc_data &&
2740 time_after(jiffies, ifmgd->assoc_data->timeout)) {
2741 if (!ifmgd->assoc_data->have_beacon ||
2742 ieee80211_do_assoc(sdata)) {
2743 u8 bssid[ETH_ALEN];
2744
2745 memcpy(bssid, ifmgd->assoc_data->bss->bssid, ETH_ALEN);
2746
2747 ieee80211_destroy_assoc_data(sdata, false);
2748
2749 mutex_unlock(&ifmgd->mtx);
2750 cfg80211_send_assoc_timeout(sdata->dev, bssid);
2751 mutex_lock(&ifmgd->mtx);
2752 }
2753 } else if (ifmgd->assoc_data)
2754 run_again(ifmgd, ifmgd->assoc_data->timeout);
2755
2175 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 2756 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
2176 IEEE80211_STA_CONNECTION_POLL) && 2757 IEEE80211_STA_CONNECTION_POLL) &&
2177 ifmgd->associated) { 2758 ifmgd->associated) {
@@ -2247,6 +2828,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2247 } 2828 }
2248 2829
2249 mutex_unlock(&ifmgd->mtx); 2830 mutex_unlock(&ifmgd->mtx);
2831
2832 mutex_lock(&local->mtx);
2833 ieee80211_recalc_idle(local);
2834 mutex_unlock(&local->mtx);
2250} 2835}
2251 2836
2252static void ieee80211_sta_bcn_mon_timer(unsigned long data) 2837static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2286,13 +2871,17 @@ static void ieee80211_sta_monitor_work(struct work_struct *work)
2286 2871
2287static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) 2872static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
2288{ 2873{
2874 u32 flags;
2875
2289 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2876 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
2290 sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | 2877 sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL |
2291 IEEE80211_STA_CONNECTION_POLL); 2878 IEEE80211_STA_CONNECTION_POLL);
2292 2879
2293 /* let's probe the connection once */ 2880 /* let's probe the connection once */
2294 ieee80211_queue_work(&sdata->local->hw, 2881 flags = sdata->local->hw.flags;
2295 &sdata->u.mgd.monitor_work); 2882 if (!(flags & IEEE80211_HW_CONNECTION_MONITOR))
2883 ieee80211_queue_work(&sdata->local->hw,
2884 &sdata->u.mgd.monitor_work);
2296 /* and do all the other regular work too */ 2885 /* and do all the other regular work too */
2297 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 2886 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
2298 } 2887 }
@@ -2356,7 +2945,6 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2356 add_timer(&ifmgd->chswitch_timer); 2945 add_timer(&ifmgd->chswitch_timer);
2357 ieee80211_sta_reset_beacon_monitor(sdata); 2946 ieee80211_sta_reset_beacon_monitor(sdata);
2358 ieee80211_restart_sta_timer(sdata); 2947 ieee80211_restart_sta_timer(sdata);
2359 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.monitor_work);
2360} 2948}
2361#endif 2949#endif
2362 2950
@@ -2382,6 +2970,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2382 2970
2383 ifmgd->flags = 0; 2971 ifmgd->flags = 0;
2384 ifmgd->powersave = sdata->wdev.ps; 2972 ifmgd->powersave = sdata->wdev.ps;
2973 ifmgd->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
2974 ifmgd->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
2385 2975
2386 mutex_init(&ifmgd->mtx); 2976 mutex_init(&ifmgd->mtx);
2387 2977
@@ -2418,54 +3008,183 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
2418 return 0; 3008 return 0;
2419} 3009}
2420 3010
2421/* config hooks */ 3011static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
2422static enum work_done_result 3012 struct cfg80211_bss *cbss, bool assoc)
2423ieee80211_probe_auth_done(struct ieee80211_work *wk,
2424 struct sk_buff *skb)
2425{ 3013{
2426 struct ieee80211_local *local = wk->sdata->local; 3014 struct ieee80211_local *local = sdata->local;
3015 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3016 struct ieee80211_bss *bss = (void *)cbss->priv;
3017 struct sta_info *sta;
3018 bool have_sta = false;
3019 int err;
3020 int ht_cfreq;
3021 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
3022 const u8 *ht_oper_ie;
3023 const struct ieee80211_ht_operation *ht_oper = NULL;
3024 struct ieee80211_supported_band *sband;
3025
3026 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
3027 return -EINVAL;
3028
3029 if (assoc) {
3030 rcu_read_lock();
3031 have_sta = sta_info_get(sdata, cbss->bssid);
3032 rcu_read_unlock();
3033 }
3034
3035 if (!have_sta) {
3036 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
3037 if (!sta)
3038 return -ENOMEM;
3039 }
3040
3041 mutex_lock(&local->mtx);
3042 ieee80211_recalc_idle(sdata->local);
3043 mutex_unlock(&local->mtx);
3044
3045 /* switch to the right channel */
3046 sband = local->hw.wiphy->bands[cbss->channel->band];
3047
3048 ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
3049
3050 if (sband->ht_cap.ht_supported) {
3051 ht_oper_ie = cfg80211_find_ie(WLAN_EID_HT_OPERATION,
3052 cbss->information_elements,
3053 cbss->len_information_elements);
3054 if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
3055 ht_oper = (void *)(ht_oper_ie + 2);
3056 }
2427 3057
2428 if (!skb) { 3058 if (ht_oper) {
2429 cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta); 3059 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
2430 goto destroy; 3060 cbss->channel->band);
3061 /* check that channel matches the right operating channel */
3062 if (cbss->channel->center_freq != ht_cfreq) {
3063 /*
3064 * It's possible that some APs are confused here;
3065 * Netgear WNDR3700 sometimes reports 4 higher than
3066 * the actual channel in association responses, but
3067 * since we look at probe response/beacon data here
3068 * it should be OK.
3069 */
3070 printk(KERN_DEBUG
3071 "%s: Wrong control channel: center-freq: %d"
3072 " ht-cfreq: %d ht->primary_chan: %d"
3073 " band: %d. Disabling HT.\n",
3074 sdata->name, cbss->channel->center_freq,
3075 ht_cfreq, ht_oper->primary_chan,
3076 cbss->channel->band);
3077 ht_oper = NULL;
3078 }
3079 }
3080
3081 if (ht_oper) {
3082 channel_type = NL80211_CHAN_HT20;
3083
3084 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
3085 switch (ht_oper->ht_param &
3086 IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
3087 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3088 channel_type = NL80211_CHAN_HT40PLUS;
3089 break;
3090 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3091 channel_type = NL80211_CHAN_HT40MINUS;
3092 break;
3093 }
3094 }
2431 } 3095 }
2432 3096
2433 if (wk->type == IEEE80211_WORK_AUTH) { 3097 if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
2434 cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len); 3098 /* can only fail due to HT40+/- mismatch */
2435 goto destroy; 3099 channel_type = NL80211_CHAN_HT20;
3100 printk(KERN_DEBUG
3101 "%s: disabling 40 MHz due to multi-vif mismatch\n",
3102 sdata->name);
3103 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3104 WARN_ON(!ieee80211_set_channel_type(local, sdata,
3105 channel_type));
2436 } 3106 }
2437 3107
2438 mutex_lock(&wk->sdata->u.mgd.mtx); 3108 local->oper_channel = cbss->channel;
2439 ieee80211_rx_mgmt_probe_resp(wk->sdata, skb); 3109 ieee80211_hw_config(local, 0);
2440 mutex_unlock(&wk->sdata->u.mgd.mtx); 3110
3111 if (!have_sta) {
3112 u32 rates = 0, basic_rates = 0;
3113 bool have_higher_than_11mbit;
3114 int min_rate = INT_MAX, min_rate_index = -1;
3115
3116 ieee80211_get_rates(sband, bss->supp_rates,
3117 bss->supp_rates_len,
3118 &rates, &basic_rates,
3119 &have_higher_than_11mbit,
3120 &min_rate, &min_rate_index);
3121
3122 /*
3123 * This used to be a workaround for basic rates missing
3124 * in the association response frame. Now that we no
3125 * longer use the basic rates from there, it probably
3126 * doesn't happen any more, but keep the workaround so
3127 * in case some *other* APs are buggy in different ways
3128 * we can connect -- with a warning.
3129 */
3130 if (!basic_rates && min_rate_index >= 0) {
3131 printk(KERN_DEBUG
3132 "%s: No basic rates, using min rate instead.\n",
3133 sdata->name);
3134 basic_rates = BIT(min_rate_index);
3135 }
3136
3137 sta->sta.supp_rates[cbss->channel->band] = rates;
3138 sdata->vif.bss_conf.basic_rates = basic_rates;
2441 3139
2442 wk->type = IEEE80211_WORK_AUTH; 3140 /* cf. IEEE 802.11 9.2.12 */
2443 wk->probe_auth.tries = 0; 3141 if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
2444 return WORK_DONE_REQUEUE; 3142 have_higher_than_11mbit)
2445 destroy: 3143 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
2446 if (wk->probe_auth.synced) 3144 else
2447 drv_finish_tx_sync(local, wk->sdata, wk->filter_ta, 3145 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
2448 IEEE80211_TX_SYNC_AUTH); 3146
3147 memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN);
3148
3149 /* tell driver about BSSID and basic rates */
3150 ieee80211_bss_info_change_notify(sdata,
3151 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES);
3152
3153 if (assoc)
3154 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
3155
3156 err = sta_info_insert(sta);
3157 sta = NULL;
3158 if (err) {
3159 printk(KERN_DEBUG
3160 "%s: failed to insert STA entry for the AP (error %d)\n",
3161 sdata->name, err);
3162 return err;
3163 }
3164 } else
3165 WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid));
2449 3166
2450 return WORK_DONE_DESTROY; 3167 return 0;
2451} 3168}
2452 3169
3170/* config hooks */
2453int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 3171int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2454 struct cfg80211_auth_request *req) 3172 struct cfg80211_auth_request *req)
2455{ 3173{
2456 const u8 *ssid; 3174 struct ieee80211_local *local = sdata->local;
2457 struct ieee80211_work *wk; 3175 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3176 struct ieee80211_mgd_auth_data *auth_data;
2458 u16 auth_alg; 3177 u16 auth_alg;
3178 int err;
2459 3179
2460 if (req->local_state_change) 3180 /* prepare auth data structure */
2461 return 0; /* no need to update mac80211 state */
2462 3181
2463 switch (req->auth_type) { 3182 switch (req->auth_type) {
2464 case NL80211_AUTHTYPE_OPEN_SYSTEM: 3183 case NL80211_AUTHTYPE_OPEN_SYSTEM:
2465 auth_alg = WLAN_AUTH_OPEN; 3184 auth_alg = WLAN_AUTH_OPEN;
2466 break; 3185 break;
2467 case NL80211_AUTHTYPE_SHARED_KEY: 3186 case NL80211_AUTHTYPE_SHARED_KEY:
2468 if (IS_ERR(sdata->local->wep_tx_tfm)) 3187 if (IS_ERR(local->wep_tx_tfm))
2469 return -EOPNOTSUPP; 3188 return -EOPNOTSUPP;
2470 auth_alg = WLAN_AUTH_SHARED_KEY; 3189 auth_alg = WLAN_AUTH_SHARED_KEY;
2471 break; 3190 break;
@@ -2479,201 +3198,154 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2479 return -EOPNOTSUPP; 3198 return -EOPNOTSUPP;
2480 } 3199 }
2481 3200
2482 wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); 3201 auth_data = kzalloc(sizeof(*auth_data) + req->ie_len, GFP_KERNEL);
2483 if (!wk) 3202 if (!auth_data)
2484 return -ENOMEM; 3203 return -ENOMEM;
2485 3204
2486 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN); 3205 auth_data->bss = req->bss;
2487 3206
2488 if (req->ie && req->ie_len) { 3207 if (req->ie && req->ie_len) {
2489 memcpy(wk->ie, req->ie, req->ie_len); 3208 memcpy(auth_data->ie, req->ie, req->ie_len);
2490 wk->ie_len = req->ie_len; 3209 auth_data->ie_len = req->ie_len;
2491 } 3210 }
2492 3211
2493 if (req->key && req->key_len) { 3212 if (req->key && req->key_len) {
2494 wk->probe_auth.key_len = req->key_len; 3213 auth_data->key_len = req->key_len;
2495 wk->probe_auth.key_idx = req->key_idx; 3214 auth_data->key_idx = req->key_idx;
2496 memcpy(wk->probe_auth.key, req->key, req->key_len); 3215 memcpy(auth_data->key, req->key, req->key_len);
2497 } 3216 }
2498 3217
2499 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 3218 auth_data->algorithm = auth_alg;
2500 memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
2501 wk->probe_auth.ssid_len = ssid[1];
2502
2503 wk->probe_auth.algorithm = auth_alg;
2504 wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
2505
2506 /* if we already have a probe, don't probe again */
2507 if (req->bss->proberesp_ies)
2508 wk->type = IEEE80211_WORK_AUTH;
2509 else
2510 wk->type = IEEE80211_WORK_DIRECT_PROBE;
2511 wk->chan = req->bss->channel;
2512 wk->chan_type = NL80211_CHAN_NO_HT;
2513 wk->sdata = sdata;
2514 wk->done = ieee80211_probe_auth_done;
2515
2516 ieee80211_add_work(wk);
2517 return 0;
2518}
2519
2520/* create and insert a dummy station entry */
2521static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata,
2522 u8 *bssid) {
2523 struct sta_info *sta;
2524 int err;
2525
2526 sta = sta_info_alloc(sdata, bssid, GFP_KERNEL);
2527 if (!sta)
2528 return -ENOMEM;
2529
2530 sta->dummy = true;
2531 3219
2532 err = sta_info_insert(sta); 3220 /* try to authenticate/probe */
2533 sta = NULL;
2534 if (err) {
2535 printk(KERN_DEBUG "%s: failed to insert Dummy STA entry for"
2536 " the AP (error %d)\n", sdata->name, err);
2537 return err;
2538 }
2539
2540 return 0;
2541}
2542 3221
2543static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, 3222 mutex_lock(&ifmgd->mtx);
2544 struct sk_buff *skb)
2545{
2546 struct ieee80211_local *local = wk->sdata->local;
2547 struct ieee80211_mgmt *mgmt;
2548 struct ieee80211_rx_status *rx_status;
2549 struct ieee802_11_elems elems;
2550 struct cfg80211_bss *cbss = wk->assoc.bss;
2551 u16 status;
2552 3223
2553 if (!skb) { 3224 if ((ifmgd->auth_data && !ifmgd->auth_data->done) ||
2554 sta_info_destroy_addr(wk->sdata, cbss->bssid); 3225 ifmgd->assoc_data) {
2555 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta); 3226 err = -EBUSY;
2556 goto destroy; 3227 goto err_free;
2557 } 3228 }
2558 3229
2559 if (wk->type == IEEE80211_WORK_ASSOC_BEACON_WAIT) { 3230 if (ifmgd->auth_data)
2560 mutex_lock(&wk->sdata->u.mgd.mtx); 3231 ieee80211_destroy_auth_data(sdata, false);
2561 rx_status = (void *) skb->cb;
2562 ieee802_11_parse_elems(skb->data + 24 + 12, skb->len - 24 - 12, &elems);
2563 ieee80211_rx_bss_info(wk->sdata, (void *)skb->data, skb->len, rx_status,
2564 &elems, true);
2565 mutex_unlock(&wk->sdata->u.mgd.mtx);
2566 3232
2567 wk->type = IEEE80211_WORK_ASSOC; 3233 /* prep auth_data so we don't go into idle on disassoc */
2568 /* not really done yet */ 3234 ifmgd->auth_data = auth_data;
2569 return WORK_DONE_REQUEUE;
2570 }
2571 3235
2572 mgmt = (void *)skb->data; 3236 if (ifmgd->associated)
2573 status = le16_to_cpu(mgmt->u.assoc_resp.status_code); 3237 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
2574 3238
2575 if (status == WLAN_STATUS_SUCCESS) { 3239 printk(KERN_DEBUG "%s: authenticate with %pM\n",
2576 if (wk->assoc.synced) 3240 sdata->name, req->bss->bssid);
2577 drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
2578 IEEE80211_TX_SYNC_ASSOC);
2579 3241
2580 mutex_lock(&wk->sdata->u.mgd.mtx); 3242 err = ieee80211_prep_connection(sdata, req->bss, false);
2581 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) { 3243 if (err)
2582 mutex_unlock(&wk->sdata->u.mgd.mtx); 3244 goto err_clear;
2583 /* oops -- internal error -- send timeout for now */
2584 sta_info_destroy_addr(wk->sdata, cbss->bssid);
2585 cfg80211_send_assoc_timeout(wk->sdata->dev,
2586 wk->filter_ta);
2587 return WORK_DONE_DESTROY;
2588 }
2589 3245
2590 mutex_unlock(&wk->sdata->u.mgd.mtx); 3246 err = ieee80211_probe_auth(sdata);
2591 } else { 3247 if (err) {
2592 /* assoc failed - destroy the dummy station entry */ 3248 sta_info_destroy_addr(sdata, req->bss->bssid);
2593 sta_info_destroy_addr(wk->sdata, cbss->bssid); 3249 goto err_clear;
2594 } 3250 }
2595 3251
2596 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); 3252 /* hold our own reference */
2597 destroy: 3253 cfg80211_ref_bss(auth_data->bss);
2598 if (wk->assoc.synced) 3254 err = 0;
2599 drv_finish_tx_sync(local, wk->sdata, wk->filter_ta, 3255 goto out_unlock;
2600 IEEE80211_TX_SYNC_ASSOC); 3256
3257 err_clear:
3258 ifmgd->auth_data = NULL;
3259 err_free:
3260 kfree(auth_data);
3261 out_unlock:
3262 mutex_unlock(&ifmgd->mtx);
2601 3263
2602 return WORK_DONE_DESTROY; 3264 return err;
2603} 3265}
2604 3266
2605int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, 3267int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2606 struct cfg80211_assoc_request *req) 3268 struct cfg80211_assoc_request *req)
2607{ 3269{
3270 struct ieee80211_local *local = sdata->local;
2608 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3271 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2609 struct ieee80211_bss *bss = (void *)req->bss->priv; 3272 struct ieee80211_bss *bss = (void *)req->bss->priv;
2610 struct ieee80211_work *wk; 3273 struct ieee80211_mgd_assoc_data *assoc_data;
2611 const u8 *ssid; 3274 struct ieee80211_supported_band *sband;
3275 const u8 *ssidie;
2612 int i, err; 3276 int i, err;
2613 3277
3278 ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
3279 if (!ssidie)
3280 return -EINVAL;
3281
3282 assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
3283 if (!assoc_data)
3284 return -ENOMEM;
3285
2614 mutex_lock(&ifmgd->mtx); 3286 mutex_lock(&ifmgd->mtx);
2615 if (ifmgd->associated) {
2616 if (!req->prev_bssid ||
2617 memcmp(req->prev_bssid, ifmgd->associated->bssid,
2618 ETH_ALEN)) {
2619 /*
2620 * We are already associated and the request was not a
2621 * reassociation request from the current BSS, so
2622 * reject it.
2623 */
2624 mutex_unlock(&ifmgd->mtx);
2625 return -EALREADY;
2626 }
2627 3287
2628 /* Trying to reassociate - clear previous association state */ 3288 if (ifmgd->associated)
2629 ieee80211_set_disassoc(sdata, true, false); 3289 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
3290
3291 if (ifmgd->auth_data && !ifmgd->auth_data->done) {
3292 err = -EBUSY;
3293 goto err_free;
2630 } 3294 }
2631 mutex_unlock(&ifmgd->mtx);
2632 3295
2633 wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); 3296 if (ifmgd->assoc_data) {
2634 if (!wk) 3297 err = -EBUSY;
2635 return -ENOMEM; 3298 goto err_free;
3299 }
2636 3300
2637 /* 3301 if (ifmgd->auth_data) {
2638 * create a dummy station info entry in order 3302 bool match;
2639 * to start accepting incoming EAPOL packets from the station 3303
2640 */ 3304 /* keep sta info, bssid if matching */
2641 err = ieee80211_pre_assoc(sdata, req->bss->bssid); 3305 match = ether_addr_equal(ifmgd->bssid, req->bss->bssid);
2642 if (err) { 3306 ieee80211_destroy_auth_data(sdata, match);
2643 kfree(wk);
2644 return err;
2645 } 3307 }
2646 3308
3309 /* prepare assoc data */
3310
2647 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 3311 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
2648 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; 3312 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
2649 3313
2650 ifmgd->beacon_crc_valid = false; 3314 ifmgd->beacon_crc_valid = false;
2651 3315
3316 /*
3317 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
3318 * We still associate in non-HT mode (11a/b/g) if any one of these
3319 * ciphers is configured as pairwise.
3320 * We can set this to true for non-11n hardware, that'll be checked
3321 * separately along with the peer capabilities.
3322 */
2652 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) 3323 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
2653 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || 3324 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
2654 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || 3325 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
2655 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) 3326 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
2656 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3327 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2657 3328
2658
2659 if (req->flags & ASSOC_REQ_DISABLE_HT) 3329 if (req->flags & ASSOC_REQ_DISABLE_HT)
2660 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3330 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2661 3331
3332 /* Also disable HT if we don't support it or the AP doesn't use WMM */
3333 sband = local->hw.wiphy->bands[req->bss->channel->band];
3334 if (!sband->ht_cap.ht_supported ||
3335 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used)
3336 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3337
2662 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); 3338 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
2663 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, 3339 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
2664 sizeof(ifmgd->ht_capa_mask)); 3340 sizeof(ifmgd->ht_capa_mask));
2665 3341
2666 if (req->ie && req->ie_len) { 3342 if (req->ie && req->ie_len) {
2667 memcpy(wk->ie, req->ie, req->ie_len); 3343 memcpy(assoc_data->ie, req->ie, req->ie_len);
2668 wk->ie_len = req->ie_len; 3344 assoc_data->ie_len = req->ie_len;
2669 } else 3345 }
2670 wk->ie_len = 0;
2671
2672 wk->assoc.bss = req->bss;
2673 3346
2674 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN); 3347 assoc_data->bss = req->bss;
2675 3348
2676 /* new association always uses requested smps mode */
2677 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) { 3349 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
2678 if (ifmgd->powersave) 3350 if (ifmgd->powersave)
2679 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC; 3351 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
@@ -2682,47 +3354,28 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2682 } else 3354 } else
2683 ifmgd->ap_smps = ifmgd->req_smps; 3355 ifmgd->ap_smps = ifmgd->req_smps;
2684 3356
2685 wk->assoc.smps = ifmgd->ap_smps; 3357 assoc_data->capability = req->bss->capability;
2686 /* 3358 assoc_data->wmm = bss->wmm_used &&
2687 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode. 3359 (local->hw.queues >= IEEE80211_NUM_ACS);
2688 * We still associate in non-HT mode (11a/b/g) if any one of these 3360 assoc_data->supp_rates = bss->supp_rates;
2689 * ciphers is configured as pairwise. 3361 assoc_data->supp_rates_len = bss->supp_rates_len;
2690 * We can set this to true for non-11n hardware, that'll be checked 3362 assoc_data->ht_operation_ie =
2691 * separately along with the peer capabilities. 3363 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
2692 */
2693 wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
2694 wk->assoc.capability = req->bss->capability;
2695 wk->assoc.wmm_used = bss->wmm_used;
2696 wk->assoc.supp_rates = bss->supp_rates;
2697 wk->assoc.supp_rates_len = bss->supp_rates_len;
2698 wk->assoc.ht_information_ie =
2699 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
2700 3364
2701 if (bss->wmm_used && bss->uapsd_supported && 3365 if (bss->wmm_used && bss->uapsd_supported &&
2702 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { 3366 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
2703 wk->assoc.uapsd_used = true; 3367 assoc_data->uapsd = true;
2704 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; 3368 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
2705 } else { 3369 } else {
2706 wk->assoc.uapsd_used = false; 3370 assoc_data->uapsd = false;
2707 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED; 3371 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
2708 } 3372 }
2709 3373
2710 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 3374 memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]);
2711 memcpy(wk->assoc.ssid, ssid + 2, ssid[1]); 3375 assoc_data->ssid_len = ssidie[1];
2712 wk->assoc.ssid_len = ssid[1];
2713 3376
2714 if (req->prev_bssid) 3377 if (req->prev_bssid)
2715 memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN); 3378 memcpy(assoc_data->prev_bssid, req->prev_bssid, ETH_ALEN);
2716
2717 wk->chan = req->bss->channel;
2718 wk->chan_type = NL80211_CHAN_NO_HT;
2719 wk->sdata = sdata;
2720 wk->done = ieee80211_assoc_done;
2721 if (!bss->dtim_period &&
2722 sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
2723 wk->type = IEEE80211_WORK_ASSOC_BEACON_WAIT;
2724 else
2725 wk->type = IEEE80211_WORK_ASSOC;
2726 3379
2727 if (req->use_mfp) { 3380 if (req->use_mfp) {
2728 ifmgd->mfp = IEEE80211_MFP_REQUIRED; 3381 ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2740,91 +3393,86 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2740 sdata->control_port_protocol = req->crypto.control_port_ethertype; 3393 sdata->control_port_protocol = req->crypto.control_port_ethertype;
2741 sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt; 3394 sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
2742 3395
2743 ieee80211_add_work(wk); 3396 /* kick off associate process */
2744 return 0; 3397
3398 ifmgd->assoc_data = assoc_data;
3399
3400 err = ieee80211_prep_connection(sdata, req->bss, true);
3401 if (err)
3402 goto err_clear;
3403
3404 if (!bss->dtim_period &&
3405 sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
3406 /*
3407 * Wait up to one beacon interval ...
3408 * should this be more if we miss one?
3409 */
3410 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
3411 sdata->name, ifmgd->bssid);
3412 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
3413 } else {
3414 assoc_data->have_beacon = true;
3415 assoc_data->sent_assoc = false;
3416 assoc_data->timeout = jiffies;
3417 }
3418 run_again(ifmgd, assoc_data->timeout);
3419
3420 if (bss->corrupt_data) {
3421 char *corrupt_type = "data";
3422 if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_BEACON) {
3423 if (bss->corrupt_data &
3424 IEEE80211_BSS_CORRUPT_PROBE_RESP)
3425 corrupt_type = "beacon and probe response";
3426 else
3427 corrupt_type = "beacon";
3428 } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
3429 corrupt_type = "probe response";
3430 printk(KERN_DEBUG "%s: associating with AP with corrupt %s\n",
3431 sdata->name, corrupt_type);
3432 }
3433
3434 err = 0;
3435 goto out;
3436 err_clear:
3437 ifmgd->assoc_data = NULL;
3438 err_free:
3439 kfree(assoc_data);
3440 out:
3441 mutex_unlock(&ifmgd->mtx);
3442
3443 return err;
2745} 3444}
2746 3445
2747int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, 3446int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2748 struct cfg80211_deauth_request *req, 3447 struct cfg80211_deauth_request *req)
2749 void *cookie)
2750{ 3448{
2751 struct ieee80211_local *local = sdata->local;
2752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3449 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2753 u8 bssid[ETH_ALEN]; 3450 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2754 bool assoc_bss = false;
2755 3451
2756 mutex_lock(&ifmgd->mtx); 3452 mutex_lock(&ifmgd->mtx);
2757 3453
2758 memcpy(bssid, req->bss->bssid, ETH_ALEN); 3454 if (ifmgd->auth_data) {
2759 if (ifmgd->associated == req->bss) { 3455 ieee80211_destroy_auth_data(sdata, false);
2760 ieee80211_set_disassoc(sdata, false, true);
2761 mutex_unlock(&ifmgd->mtx); 3456 mutex_unlock(&ifmgd->mtx);
2762 assoc_bss = true; 3457 return 0;
2763 } else {
2764 bool not_auth_yet = false;
2765 struct ieee80211_work *tmp, *wk = NULL;
2766
2767 mutex_unlock(&ifmgd->mtx);
2768
2769 mutex_lock(&local->mtx);
2770 list_for_each_entry(tmp, &local->work_list, list) {
2771 if (tmp->sdata != sdata)
2772 continue;
2773
2774 if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
2775 tmp->type != IEEE80211_WORK_AUTH &&
2776 tmp->type != IEEE80211_WORK_ASSOC &&
2777 tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
2778 continue;
2779
2780 if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
2781 continue;
2782
2783 not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
2784 list_del_rcu(&tmp->list);
2785 synchronize_rcu();
2786 wk = tmp;
2787 break;
2788 }
2789 mutex_unlock(&local->mtx);
2790
2791 if (wk && wk->type == IEEE80211_WORK_ASSOC) {
2792 /* clean up dummy sta & TX sync */
2793 sta_info_destroy_addr(wk->sdata, wk->filter_ta);
2794 if (wk->assoc.synced)
2795 drv_finish_tx_sync(local, wk->sdata,
2796 wk->filter_ta,
2797 IEEE80211_TX_SYNC_ASSOC);
2798 } else if (wk && wk->type == IEEE80211_WORK_AUTH) {
2799 if (wk->probe_auth.synced)
2800 drv_finish_tx_sync(local, wk->sdata,
2801 wk->filter_ta,
2802 IEEE80211_TX_SYNC_AUTH);
2803 }
2804 kfree(wk);
2805
2806 /*
2807 * If somebody requests authentication and we haven't
2808 * sent out an auth frame yet there's no need to send
2809 * out a deauth frame either. If the state was PROBE,
2810 * then this is the case. If it's AUTH we have sent a
2811 * frame, and if it's IDLE we have completed the auth
2812 * process already.
2813 */
2814 if (not_auth_yet) {
2815 __cfg80211_auth_canceled(sdata->dev, bssid);
2816 return 0;
2817 }
2818 } 3458 }
2819 3459
2820 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", 3460 printk(KERN_DEBUG
2821 sdata->name, bssid, req->reason_code); 3461 "%s: deauthenticating from %pM by local choice (reason=%d)\n",
3462 sdata->name, req->bssid, req->reason_code);
2822 3463
2823 ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, 3464 if (ifmgd->associated &&
2824 req->reason_code, cookie, 3465 ether_addr_equal(ifmgd->associated->bssid, req->bssid))
2825 !req->local_state_change); 3466 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
2826 if (assoc_bss) 3467 req->reason_code, true, frame_buf);
2827 sta_info_flush(sdata->local, sdata); 3468 else
3469 ieee80211_send_deauth_disassoc(sdata, req->bssid,
3470 IEEE80211_STYPE_DEAUTH,
3471 req->reason_code, true,
3472 frame_buf);
3473 mutex_unlock(&ifmgd->mtx);
3474
3475 __cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
2828 3476
2829 mutex_lock(&sdata->local->mtx); 3477 mutex_lock(&sdata->local->mtx);
2830 ieee80211_recalc_idle(sdata->local); 3478 ieee80211_recalc_idle(sdata->local);
@@ -2834,11 +3482,11 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2834} 3482}
2835 3483
2836int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, 3484int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2837 struct cfg80211_disassoc_request *req, 3485 struct cfg80211_disassoc_request *req)
2838 void *cookie)
2839{ 3486{
2840 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3487 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2841 u8 bssid[ETH_ALEN]; 3488 u8 bssid[ETH_ALEN];
3489 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2842 3490
2843 mutex_lock(&ifmgd->mtx); 3491 mutex_lock(&ifmgd->mtx);
2844 3492
@@ -2857,14 +3505,12 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2857 sdata->name, req->bss->bssid, req->reason_code); 3505 sdata->name, req->bss->bssid, req->reason_code);
2858 3506
2859 memcpy(bssid, req->bss->bssid, ETH_ALEN); 3507 memcpy(bssid, req->bss->bssid, ETH_ALEN);
2860 ieee80211_set_disassoc(sdata, false, true); 3508 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
2861 3509 req->reason_code, !req->local_state_change,
3510 frame_buf);
2862 mutex_unlock(&ifmgd->mtx); 3511 mutex_unlock(&ifmgd->mtx);
2863 3512
2864 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, 3513 __cfg80211_send_disassoc(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
2865 IEEE80211_STYPE_DISASSOC, req->reason_code,
2866 cookie, !req->local_state_change);
2867 sta_info_flush(sdata->local, sdata);
2868 3514
2869 mutex_lock(&sdata->local->mtx); 3515 mutex_lock(&sdata->local->mtx);
2870 ieee80211_recalc_idle(sdata->local); 3516 ieee80211_recalc_idle(sdata->local);
@@ -2873,6 +3519,19 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2873 return 0; 3519 return 0;
2874} 3520}
2875 3521
3522void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
3523{
3524 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3525
3526 mutex_lock(&ifmgd->mtx);
3527 if (ifmgd->assoc_data)
3528 ieee80211_destroy_assoc_data(sdata, false);
3529 if (ifmgd->auth_data)
3530 ieee80211_destroy_auth_data(sdata, false);
3531 del_timer_sync(&ifmgd->timer);
3532 mutex_unlock(&ifmgd->mtx);
3533}
3534
2876void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, 3535void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2877 enum nl80211_cqm_rssi_threshold_event rssi_event, 3536 enum nl80211_cqm_rssi_threshold_event rssi_event,
2878 gfp_t gfp) 3537 gfp_t gfp)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 596efaf50e09..af1c4e26e965 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -98,13 +98,12 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
98 mutex_lock(&local->sta_mtx); 98 mutex_lock(&local->sta_mtx);
99 list_for_each_entry(sta, &local->sta_list, list) { 99 list_for_each_entry(sta, &local->sta_list, list) {
100 if (sta->uploaded) { 100 if (sta->uploaded) {
101 sdata = sta->sdata; 101 enum ieee80211_sta_state state;
102 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
103 sdata = container_of(sdata->bss,
104 struct ieee80211_sub_if_data,
105 u.ap);
106 102
107 drv_sta_remove(local, sdata, &sta->sta); 103 state = sta->sta_state;
104 for (; state > IEEE80211_STA_NOTEXIST; state--)
105 WARN_ON(drv_sta_state(local, sta->sdata, sta,
106 state, state - 1));
108 } 107 }
109 108
110 mesh_plink_quiesce(sta); 109 mesh_plink_quiesce(sta);
@@ -128,6 +127,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
128 drv_remove_interface(local, sdata); 127 drv_remove_interface(local, sdata);
129 } 128 }
130 129
130 sdata = rtnl_dereference(local->monitor_sdata);
131 if (sdata)
132 drv_remove_interface(local, sdata);
133
131 /* stop hardware - this must stop RX */ 134 /* stop hardware - this must stop RX */
132 if (local->open_count) 135 if (local->open_count)
133 ieee80211_stop_device(local); 136 ieee80211_stop_device(local);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index f9b8e819ca63..3313c117b322 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -145,7 +145,7 @@ static ssize_t rcname_read(struct file *file, char __user *userbuf,
145 145
146static const struct file_operations rcname_ops = { 146static const struct file_operations rcname_ops = {
147 .read = rcname_read, 147 .read = rcname_read,
148 .open = mac80211_open_file_generic, 148 .open = simple_open,
149 .llseek = default_llseek, 149 .llseek = default_llseek,
150}; 150};
151#endif 151#endif
@@ -159,7 +159,6 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
159 ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); 159 ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
160 if (!ref) 160 if (!ref)
161 goto fail_ref; 161 goto fail_ref;
162 kref_init(&ref->kref);
163 ref->local = local; 162 ref->local = local;
164 ref->ops = ieee80211_rate_control_ops_get(name); 163 ref->ops = ieee80211_rate_control_ops_get(name);
165 if (!ref->ops) 164 if (!ref->ops)
@@ -184,11 +183,8 @@ fail_ref:
184 return NULL; 183 return NULL;
185} 184}
186 185
187static void rate_control_release(struct kref *kref) 186static void rate_control_free(struct rate_control_ref *ctrl_ref)
188{ 187{
189 struct rate_control_ref *ctrl_ref;
190
191 ctrl_ref = container_of(kref, struct rate_control_ref, kref);
192 ctrl_ref->ops->free(ctrl_ref->priv); 188 ctrl_ref->ops->free(ctrl_ref->priv);
193 189
194#ifdef CONFIG_MAC80211_DEBUGFS 190#ifdef CONFIG_MAC80211_DEBUGFS
@@ -293,8 +289,8 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
293} 289}
294EXPORT_SYMBOL(rate_control_send_low); 290EXPORT_SYMBOL(rate_control_send_low);
295 291
296static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, 292static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
297 int n_bitrates, u32 mask) 293 int n_bitrates, u32 mask)
298{ 294{
299 int j; 295 int j;
300 296
@@ -303,7 +299,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
303 if (mask & (1 << j)) { 299 if (mask & (1 << j)) {
304 /* Okay, found a suitable rate. Use it. */ 300 /* Okay, found a suitable rate. Use it. */
305 rate->idx = j; 301 rate->idx = j;
306 return; 302 return true;
307 } 303 }
308 } 304 }
309 305
@@ -312,6 +308,112 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
312 if (mask & (1 << j)) { 308 if (mask & (1 << j)) {
313 /* Okay, found a suitable rate. Use it. */ 309 /* Okay, found a suitable rate. Use it. */
314 rate->idx = j; 310 rate->idx = j;
311 return true;
312 }
313 }
314 return false;
315}
316
317static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
318 u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
319{
320 int i, j;
321 int ridx, rbit;
322
323 ridx = rate->idx / 8;
324 rbit = rate->idx % 8;
325
326 /* sanity check */
327 if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
328 return false;
329
330 /* See whether the selected rate or anything below it is allowed. */
331 for (i = ridx; i >= 0; i--) {
332 for (j = rbit; j >= 0; j--)
333 if (mcs_mask[i] & BIT(j)) {
334 rate->idx = i * 8 + j;
335 return true;
336 }
337 rbit = 7;
338 }
339
340 /* Try to find a higher rate that would be allowed */
341 ridx = (rate->idx + 1) / 8;
342 rbit = (rate->idx + 1) % 8;
343
344 for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
345 for (j = rbit; j < 8; j++)
346 if (mcs_mask[i] & BIT(j)) {
347 rate->idx = i * 8 + j;
348 return true;
349 }
350 rbit = 0;
351 }
352 return false;
353}
354
355
356
357static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
358 struct ieee80211_tx_rate_control *txrc,
359 u32 mask,
360 u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
361{
362 struct ieee80211_tx_rate alt_rate;
363
364 /* handle HT rates */
365 if (rate->flags & IEEE80211_TX_RC_MCS) {
366 if (rate_idx_match_mcs_mask(rate, mcs_mask))
367 return;
368
369 /* also try the legacy rates. */
370 alt_rate.idx = 0;
371 /* keep protection flags */
372 alt_rate.flags = rate->flags &
373 (IEEE80211_TX_RC_USE_RTS_CTS |
374 IEEE80211_TX_RC_USE_CTS_PROTECT |
375 IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
376 alt_rate.count = rate->count;
377 if (rate_idx_match_legacy_mask(&alt_rate,
378 txrc->sband->n_bitrates,
379 mask)) {
380 *rate = alt_rate;
381 return;
382 }
383 } else {
384 struct sk_buff *skb = txrc->skb;
385 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
386 __le16 fc;
387
388 /* handle legacy rates */
389 if (rate_idx_match_legacy_mask(rate, txrc->sband->n_bitrates,
390 mask))
391 return;
392
393 /* if HT BSS, and we handle a data frame, also try HT rates */
394 if (txrc->bss_conf->channel_type == NL80211_CHAN_NO_HT)
395 return;
396
397 fc = hdr->frame_control;
398 if (!ieee80211_is_data(fc))
399 return;
400
401 alt_rate.idx = 0;
402 /* keep protection flags */
403 alt_rate.flags = rate->flags &
404 (IEEE80211_TX_RC_USE_RTS_CTS |
405 IEEE80211_TX_RC_USE_CTS_PROTECT |
406 IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
407 alt_rate.count = rate->count;
408
409 alt_rate.flags |= IEEE80211_TX_RC_MCS;
410
411 if ((txrc->bss_conf->channel_type == NL80211_CHAN_HT40MINUS) ||
412 (txrc->bss_conf->channel_type == NL80211_CHAN_HT40PLUS))
413 alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
414
415 if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
416 *rate = alt_rate;
315 return; 417 return;
316 } 418 }
317 } 419 }
@@ -335,6 +437,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
335 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); 437 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
336 int i; 438 int i;
337 u32 mask; 439 u32 mask;
440 u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
338 441
339 if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) { 442 if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) {
340 ista = &sta->sta; 443 ista = &sta->sta;
@@ -358,10 +461,14 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
358 * the common case. 461 * the common case.
359 */ 462 */
360 mask = sdata->rc_rateidx_mask[info->band]; 463 mask = sdata->rc_rateidx_mask[info->band];
464 memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
465 sizeof(mcs_mask));
361 if (mask != (1 << txrc->sband->n_bitrates) - 1) { 466 if (mask != (1 << txrc->sband->n_bitrates) - 1) {
362 if (sta) { 467 if (sta) {
363 /* Filter out rates that the STA does not support */ 468 /* Filter out rates that the STA does not support */
364 mask &= sta->sta.supp_rates[info->band]; 469 mask &= sta->sta.supp_rates[info->band];
470 for (i = 0; i < sizeof(mcs_mask); i++)
471 mcs_mask[i] &= sta->sta.ht_cap.mcs.rx_mask[i];
365 } 472 }
366 /* 473 /*
367 * Make sure the rate index selected for each TX rate is 474 * Make sure the rate index selected for each TX rate is
@@ -372,32 +479,18 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
372 /* Skip invalid rates */ 479 /* Skip invalid rates */
373 if (info->control.rates[i].idx < 0) 480 if (info->control.rates[i].idx < 0)
374 break; 481 break;
375 /* Rate masking supports only legacy rates for now */ 482 rate_idx_match_mask(&info->control.rates[i], txrc,
376 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) 483 mask, mcs_mask);
377 continue;
378 rate_idx_match_mask(&info->control.rates[i],
379 txrc->sband->n_bitrates, mask);
380 } 484 }
381 } 485 }
382 486
383 BUG_ON(info->control.rates[0].idx < 0); 487 BUG_ON(info->control.rates[0].idx < 0);
384} 488}
385 489
386struct rate_control_ref *rate_control_get(struct rate_control_ref *ref)
387{
388 kref_get(&ref->kref);
389 return ref;
390}
391
392void rate_control_put(struct rate_control_ref *ref)
393{
394 kref_put(&ref->kref, rate_control_release);
395}
396
397int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 490int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
398 const char *name) 491 const char *name)
399{ 492{
400 struct rate_control_ref *ref, *old; 493 struct rate_control_ref *ref;
401 494
402 ASSERT_RTNL(); 495 ASSERT_RTNL();
403 496
@@ -417,12 +510,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
417 return -ENOENT; 510 return -ENOENT;
418 } 511 }
419 512
420 old = local->rate_ctrl; 513 WARN_ON(local->rate_ctrl);
421 local->rate_ctrl = ref; 514 local->rate_ctrl = ref;
422 if (old) {
423 rate_control_put(old);
424 sta_info_flush(local, NULL);
425 }
426 515
427 wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n", 516 wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n",
428 ref->ops->name); 517 ref->ops->name);
@@ -440,6 +529,6 @@ void rate_control_deinitialize(struct ieee80211_local *local)
440 return; 529 return;
441 530
442 local->rate_ctrl = NULL; 531 local->rate_ctrl = NULL;
443 rate_control_put(ref); 532 rate_control_free(ref);
444} 533}
445 534
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 80cfc006dd74..6e4fd32c6617 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -14,23 +14,20 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/kref.h>
18#include <net/mac80211.h> 17#include <net/mac80211.h>
19#include "ieee80211_i.h" 18#include "ieee80211_i.h"
20#include "sta_info.h" 19#include "sta_info.h"
20#include "driver-ops.h"
21 21
22struct rate_control_ref { 22struct rate_control_ref {
23 struct ieee80211_local *local; 23 struct ieee80211_local *local;
24 struct rate_control_ops *ops; 24 struct rate_control_ops *ops;
25 void *priv; 25 void *priv;
26 struct kref kref;
27}; 26};
28 27
29void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 28void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
30 struct sta_info *sta, 29 struct sta_info *sta,
31 struct ieee80211_tx_rate_control *txrc); 30 struct ieee80211_tx_rate_control *txrc);
32struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
33void rate_control_put(struct rate_control_ref *ref);
34 31
35static inline void rate_control_tx_status(struct ieee80211_local *local, 32static inline void rate_control_tx_status(struct ieee80211_local *local,
36 struct ieee80211_supported_band *sband, 33 struct ieee80211_supported_band *sband,
@@ -67,8 +64,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
67 64
68static inline void rate_control_rate_update(struct ieee80211_local *local, 65static inline void rate_control_rate_update(struct ieee80211_local *local,
69 struct ieee80211_supported_band *sband, 66 struct ieee80211_supported_band *sband,
70 struct sta_info *sta, u32 changed, 67 struct sta_info *sta, u32 changed)
71 enum nl80211_channel_type oper_chan_type)
72{ 68{
73 struct rate_control_ref *ref = local->rate_ctrl; 69 struct rate_control_ref *ref = local->rate_ctrl;
74 struct ieee80211_sta *ista = &sta->sta; 70 struct ieee80211_sta *ista = &sta->sta;
@@ -76,7 +72,8 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
76 72
77 if (ref && ref->ops->rate_update) 73 if (ref && ref->ops->rate_update)
78 ref->ops->rate_update(ref->priv, sband, ista, 74 ref->ops->rate_update(ref->priv, sband, ista,
79 priv_sta, changed, oper_chan_type); 75 priv_sta, changed);
76 drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
80} 77}
81 78
82static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, 79static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index b39dda523f39..79633ae06fd6 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -334,14 +334,15 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
334 334
335 335
336static void 336static void
337calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d, 337calc_rate_durations(enum ieee80211_band band,
338 struct minstrel_rate *d,
338 struct ieee80211_rate *rate) 339 struct ieee80211_rate *rate)
339{ 340{
340 int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); 341 int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
341 342
342 d->perfect_tx_time = ieee80211_frame_duration(local, 1200, 343 d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
343 rate->bitrate, erp, 1); 344 rate->bitrate, erp, 1);
344 d->ack_time = ieee80211_frame_duration(local, 10, 345 d->ack_time = ieee80211_frame_duration(band, 10,
345 rate->bitrate, erp, 1); 346 rate->bitrate, erp, 1);
346} 347}
347 348
@@ -379,14 +380,14 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
379{ 380{
380 struct minstrel_sta_info *mi = priv_sta; 381 struct minstrel_sta_info *mi = priv_sta;
381 struct minstrel_priv *mp = priv; 382 struct minstrel_priv *mp = priv;
382 struct ieee80211_local *local = hw_to_local(mp->hw);
383 struct ieee80211_rate *ctl_rate; 383 struct ieee80211_rate *ctl_rate;
384 unsigned int i, n = 0; 384 unsigned int i, n = 0;
385 unsigned int t_slot = 9; /* FIXME: get real slot time */ 385 unsigned int t_slot = 9; /* FIXME: get real slot time */
386 386
387 mi->lowest_rix = rate_lowest_index(sband, sta); 387 mi->lowest_rix = rate_lowest_index(sband, sta);
388 ctl_rate = &sband->bitrates[mi->lowest_rix]; 388 ctl_rate = &sband->bitrates[mi->lowest_rix];
389 mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate, 389 mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
390 ctl_rate->bitrate,
390 !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1); 391 !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
391 392
392 for (i = 0; i < sband->n_bitrates; i++) { 393 for (i = 0; i < sband->n_bitrates; i++) {
@@ -402,7 +403,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
402 403
403 mr->rix = i; 404 mr->rix = i;
404 mr->bitrate = sband->bitrates[i].bitrate / 5; 405 mr->bitrate = sband->bitrates[i].bitrate / 5;
405 calc_rate_durations(local, mr, &sband->bitrates[i]); 406 calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
406 407
407 /* calculate maximum number of retransmissions before 408 /* calculate maximum number of retransmissions before
408 * fallback (based on maximum segment size) */ 409 * fallback (based on maximum segment size) */
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index ff5f7b84e825..2d1acc6c5445 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -568,6 +568,13 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
568 minstrel_next_sample_idx(mi); 568 minstrel_next_sample_idx(mi);
569 569
570 /* 570 /*
571 * Sampling might add some overhead (RTS, no aggregation)
572 * to the frame. Hence, don't use sampling for the currently
573 * used max TP rate.
574 */
575 if (sample_idx == mi->max_tp_rate)
576 return -1;
577 /*
571 * When not using MRR, do not sample if the probability is already 578 * When not using MRR, do not sample if the probability is already
572 * higher than 95% to avoid wasting airtime 579 * higher than 95% to avoid wasting airtime
573 */ 580 */
@@ -679,19 +686,18 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
679 686
680static void 687static void
681minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, 688minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
682 struct ieee80211_sta *sta, void *priv_sta, 689 struct ieee80211_sta *sta, void *priv_sta)
683 enum nl80211_channel_type oper_chan_type)
684{ 690{
685 struct minstrel_priv *mp = priv; 691 struct minstrel_priv *mp = priv;
686 struct minstrel_ht_sta_priv *msp = priv_sta; 692 struct minstrel_ht_sta_priv *msp = priv_sta;
687 struct minstrel_ht_sta *mi = &msp->ht; 693 struct minstrel_ht_sta *mi = &msp->ht;
688 struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; 694 struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
689 struct ieee80211_local *local = hw_to_local(mp->hw);
690 u16 sta_cap = sta->ht_cap.cap; 695 u16 sta_cap = sta->ht_cap.cap;
691 int n_supported = 0; 696 int n_supported = 0;
692 int ack_dur; 697 int ack_dur;
693 int stbc; 698 int stbc;
694 int i; 699 int i;
700 unsigned int smps;
695 701
696 /* fall back to the old minstrel for legacy stations */ 702 /* fall back to the old minstrel for legacy stations */
697 if (!sta->ht_cap.ht_supported) 703 if (!sta->ht_cap.ht_supported)
@@ -704,8 +710,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
704 memset(mi, 0, sizeof(*mi)); 710 memset(mi, 0, sizeof(*mi));
705 mi->stats_update = jiffies; 711 mi->stats_update = jiffies;
706 712
707 ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1); 713 ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
708 mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur; 714 mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur;
709 mi->overhead_rtscts = mi->overhead + 2 * ack_dur; 715 mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
710 716
711 mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); 717 mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
@@ -727,9 +733,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
727 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) 733 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
728 mi->tx_flags |= IEEE80211_TX_CTL_LDPC; 734 mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
729 735
730 if (oper_chan_type != NL80211_CHAN_HT40MINUS && 736 smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
731 oper_chan_type != NL80211_CHAN_HT40PLUS) 737 IEEE80211_HT_CAP_SM_PS_SHIFT;
732 sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
733 738
734 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { 739 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
735 u16 req = 0; 740 u16 req = 0;
@@ -748,6 +753,11 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
748 if ((sta_cap & req) != req) 753 if ((sta_cap & req) != req)
749 continue; 754 continue;
750 755
756 /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
757 if (smps == WLAN_HT_CAP_SM_PS_STATIC &&
758 minstrel_mcs_groups[i].streams > 1)
759 continue;
760
751 mi->groups[i].supported = 761 mi->groups[i].supported =
752 mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; 762 mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
753 763
@@ -772,17 +782,15 @@ static void
772minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, 782minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
773 struct ieee80211_sta *sta, void *priv_sta) 783 struct ieee80211_sta *sta, void *priv_sta)
774{ 784{
775 struct minstrel_priv *mp = priv; 785 minstrel_ht_update_caps(priv, sband, sta, priv_sta);
776
777 minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type);
778} 786}
779 787
780static void 788static void
781minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, 789minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
782 struct ieee80211_sta *sta, void *priv_sta, 790 struct ieee80211_sta *sta, void *priv_sta,
783 u32 changed, enum nl80211_channel_type oper_chan_type) 791 u32 changed)
784{ 792{
785 minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type); 793 minstrel_ht_update_caps(priv, sband, sta, priv_sta);
786} 794}
787 795
788static void * 796static void *
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5a5e504a8ffb..7bcecf73aafb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -19,6 +19,7 @@
19#include <linux/export.h> 19#include <linux/export.h>
20#include <net/mac80211.h> 20#include <net/mac80211.h>
21#include <net/ieee80211_radiotap.h> 21#include <net/ieee80211_radiotap.h>
22#include <asm/unaligned.h>
22 23
23#include "ieee80211_i.h" 24#include "ieee80211_i.h"
24#include "driver-ops.h" 25#include "driver-ops.h"
@@ -102,7 +103,7 @@ static void
102ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 103ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
103 struct sk_buff *skb, 104 struct sk_buff *skb,
104 struct ieee80211_rate *rate, 105 struct ieee80211_rate *rate,
105 int rtap_len) 106 int rtap_len, bool has_fcs)
106{ 107{
107 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
108 struct ieee80211_radiotap_header *rthdr; 109 struct ieee80211_radiotap_header *rthdr;
@@ -133,7 +134,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
133 } 134 }
134 135
135 /* IEEE80211_RADIOTAP_FLAGS */ 136 /* IEEE80211_RADIOTAP_FLAGS */
136 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 137 if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
137 *pos |= IEEE80211_RADIOTAP_F_FCS; 138 *pos |= IEEE80211_RADIOTAP_F_FCS;
138 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
139 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 140 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
@@ -176,7 +177,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
176 pos += 2; 177 pos += 2;
177 178
178 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 179 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
179 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { 180 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
181 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
180 *pos = status->signal; 182 *pos = status->signal;
181 rthdr->it_present |= 183 rthdr->it_present |=
182 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 184 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
@@ -202,14 +204,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
202 204
203 if (status->flag & RX_FLAG_HT) { 205 if (status->flag & RX_FLAG_HT) {
204 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 206 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
205 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS | 207 *pos++ = local->hw.radiotap_mcs_details;
206 IEEE80211_RADIOTAP_MCS_HAVE_GI |
207 IEEE80211_RADIOTAP_MCS_HAVE_BW;
208 *pos = 0; 208 *pos = 0;
209 if (status->flag & RX_FLAG_SHORT_GI) 209 if (status->flag & RX_FLAG_SHORT_GI)
210 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 210 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
211 if (status->flag & RX_FLAG_40MHZ) 211 if (status->flag & RX_FLAG_40MHZ)
212 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 212 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
213 if (status->flag & RX_FLAG_HT_GF)
214 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
213 pos++; 215 pos++;
214 *pos++ = status->rate_idx; 216 *pos++ = status->rate_idx;
215 } 217 }
@@ -226,7 +228,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
226{ 228{
227 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
228 struct ieee80211_sub_if_data *sdata; 230 struct ieee80211_sub_if_data *sdata;
229 int needed_headroom = 0; 231 int needed_headroom;
230 struct sk_buff *skb, *skb2; 232 struct sk_buff *skb, *skb2;
231 struct net_device *prev_dev = NULL; 233 struct net_device *prev_dev = NULL;
232 int present_fcs_len = 0; 234 int present_fcs_len = 0;
@@ -292,7 +294,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
292 } 294 }
293 295
294 /* prepend radiotap information */ 296 /* prepend radiotap information */
295 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 297 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
298 true);
296 299
297 skb_reset_mac_header(skb); 300 skb_reset_mac_header(skb);
298 skb->ip_summed = CHECKSUM_UNNECESSARY; 301 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -423,6 +426,7 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
423 426
424 if (test_bit(SCAN_HW_SCANNING, &local->scanning) || 427 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
425 test_bit(SCAN_SW_SCANNING, &local->scanning) || 428 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
429 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
426 local->sched_scanning) 430 local->sched_scanning)
427 return ieee80211_scan_rx(rx->sdata, skb); 431 return ieee80211_scan_rx(rx->sdata, skb);
428 432
@@ -488,12 +492,12 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
488 if (ieee80211_has_tods(hdr->frame_control) || 492 if (ieee80211_has_tods(hdr->frame_control) ||
489 !ieee80211_has_fromds(hdr->frame_control)) 493 !ieee80211_has_fromds(hdr->frame_control))
490 return RX_DROP_MONITOR; 494 return RX_DROP_MONITOR;
491 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0) 495 if (ether_addr_equal(hdr->addr3, dev_addr))
492 return RX_DROP_MONITOR; 496 return RX_DROP_MONITOR;
493 } else { 497 } else {
494 if (!ieee80211_has_a4(hdr->frame_control)) 498 if (!ieee80211_has_a4(hdr->frame_control))
495 return RX_DROP_MONITOR; 499 return RX_DROP_MONITOR;
496 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0) 500 if (ether_addr_equal(hdr->addr4, dev_addr))
497 return RX_DROP_MONITOR; 501 return RX_DROP_MONITOR;
498 } 502 }
499 } 503 }
@@ -791,8 +795,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
791 795
792 /* reset session timer */ 796 /* reset session timer */
793 if (tid_agg_rx->timeout) 797 if (tid_agg_rx->timeout)
794 mod_timer(&tid_agg_rx->session_timer, 798 tid_agg_rx->last_rx = jiffies;
795 TU_TO_EXP_TIME(tid_agg_rx->timeout));
796 799
797 /* if this mpdu is fragmented - terminate rx aggregation session */ 800 /* if this mpdu is fragmented - terminate rx aggregation session */
798 sc = le16_to_cpu(hdr->seq_ctrl); 801 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -859,7 +862,12 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
859 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 862 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
860 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 863 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
861 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 864 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
862 if (rx->sta && rx->sta->dummy && 865 /*
866 * accept port control frames from the AP even when it's not
867 * yet marked ASSOC to prevent a race where we don't set the
868 * assoc bit quickly enough before it sends the first frame
869 */
870 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
863 ieee80211_is_data_present(hdr->frame_control)) { 871 ieee80211_is_data_present(hdr->frame_control)) {
864 u16 ethertype; 872 u16 ethertype;
865 u8 *payload; 873 u8 *payload;
@@ -1056,20 +1064,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1056 return RX_DROP_MONITOR; 1064 return RX_DROP_MONITOR;
1057 } 1065 }
1058 1066
1059 if (skb_linearize(rx->skb))
1060 return RX_DROP_UNUSABLE;
1061 /* the hdr variable is invalid now! */
1062
1063 switch (rx->key->conf.cipher) { 1067 switch (rx->key->conf.cipher) {
1064 case WLAN_CIPHER_SUITE_WEP40: 1068 case WLAN_CIPHER_SUITE_WEP40:
1065 case WLAN_CIPHER_SUITE_WEP104: 1069 case WLAN_CIPHER_SUITE_WEP104:
1066 /* Check for weak IVs if possible */
1067 if (rx->sta && ieee80211_is_data(fc) &&
1068 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1069 !(status->flag & RX_FLAG_DECRYPTED)) &&
1070 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1071 rx->sta->wep_weak_iv_count++;
1072
1073 result = ieee80211_crypto_wep_decrypt(rx); 1070 result = ieee80211_crypto_wep_decrypt(rx);
1074 break; 1071 break;
1075 case WLAN_CIPHER_SUITE_TKIP: 1072 case WLAN_CIPHER_SUITE_TKIP:
@@ -1089,6 +1086,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1089 return RX_DROP_UNUSABLE; 1086 return RX_DROP_UNUSABLE;
1090 } 1087 }
1091 1088
1089 /* the hdr variable is invalid after the decrypt handlers */
1090
1092 /* either the frame has been decrypted or will be dropped */ 1091 /* either the frame has been decrypted or will be dropped */
1093 status->flag |= RX_FLAG_DECRYPTED; 1092 status->flag |= RX_FLAG_DECRYPTED;
1094 1093
@@ -1145,19 +1144,15 @@ static void ap_sta_ps_start(struct sta_info *sta)
1145 1144
1146static void ap_sta_ps_end(struct sta_info *sta) 1145static void ap_sta_ps_end(struct sta_info *sta)
1147{ 1146{
1148 struct ieee80211_sub_if_data *sdata = sta->sdata;
1149
1150 atomic_dec(&sdata->bss->num_sta_ps);
1151
1152#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1147#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1153 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1148 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1154 sdata->name, sta->sta.addr, sta->sta.aid); 1149 sta->sdata->name, sta->sta.addr, sta->sta.aid);
1155#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1150#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1156 1151
1157 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1152 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1158#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1153#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1159 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1154 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1160 sdata->name, sta->sta.addr, sta->sta.aid); 1155 sta->sdata->name, sta->sta.addr, sta->sta.aid);
1161#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1156#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1162 return; 1157 return;
1163 } 1158 }
@@ -1280,7 +1275,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1280 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1275 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1281 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1276 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1282 NL80211_IFTYPE_ADHOC); 1277 NL80211_IFTYPE_ADHOC);
1283 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) { 1278 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) {
1284 sta->last_rx = jiffies; 1279 sta->last_rx = jiffies;
1285 if (ieee80211_is_data(hdr->frame_control)) { 1280 if (ieee80211_is_data(hdr->frame_control)) {
1286 sta->last_rx_rate_idx = status->rate_idx; 1281 sta->last_rx_rate_idx = status->rate_idx;
@@ -1307,8 +1302,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1307 1302
1308 sta->rx_fragments++; 1303 sta->rx_fragments++;
1309 sta->rx_bytes += rx->skb->len; 1304 sta->rx_bytes += rx->skb->len;
1310 sta->last_signal = status->signal; 1305 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1311 ewma_add(&sta->avg_signal, -status->signal); 1306 sta->last_signal = status->signal;
1307 ewma_add(&sta->avg_signal, -status->signal);
1308 }
1312 1309
1313 /* 1310 /*
1314 * Change STA power saving mode only at the end of a frame 1311 * Change STA power saving mode only at the end of a frame
@@ -1441,8 +1438,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1441 */ 1438 */
1442 if (((hdr->frame_control ^ f_hdr->frame_control) & 1439 if (((hdr->frame_control ^ f_hdr->frame_control) &
1443 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1440 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1444 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 1441 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
1445 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 1442 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
1446 continue; 1443 continue;
1447 1444
1448 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1445 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
@@ -1717,8 +1714,8 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1717 * of whether the frame was encrypted or not. 1714 * of whether the frame was encrypted or not.
1718 */ 1715 */
1719 if (ehdr->h_proto == rx->sdata->control_port_protocol && 1716 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1720 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || 1717 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
1721 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1718 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
1722 return true; 1719 return true;
1723 1720
1724 if (ieee80211_802_1x_port_control(rx) || 1721 if (ieee80211_802_1x_port_control(rx) ||
@@ -1755,9 +1752,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1755 * local net stack and back to the wireless medium 1752 * local net stack and back to the wireless medium
1756 */ 1753 */
1757 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1754 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1758 if (!xmit_skb && net_ratelimit()) 1755 if (!xmit_skb)
1759 printk(KERN_DEBUG "%s: failed to clone " 1756 net_dbg_ratelimited("%s: failed to clone multicast frame\n",
1760 "multicast frame\n", dev->name); 1757 dev->name);
1761 } else { 1758 } else {
1762 dsta = sta_info_get(sdata, skb->data); 1759 dsta = sta_info_get(sdata, skb->data);
1763 if (dsta) { 1760 if (dsta) {
@@ -1928,7 +1925,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1928 mpp_path_add(proxied_addr, mpp_addr, sdata); 1925 mpp_path_add(proxied_addr, mpp_addr, sdata);
1929 } else { 1926 } else {
1930 spin_lock_bh(&mppath->state_lock); 1927 spin_lock_bh(&mppath->state_lock);
1931 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) 1928 if (!ether_addr_equal(mppath->mpp, mpp_addr))
1932 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 1929 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1933 spin_unlock_bh(&mppath->state_lock); 1930 spin_unlock_bh(&mppath->state_lock);
1934 } 1931 }
@@ -1937,7 +1934,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1937 1934
1938 /* Frame has reached destination. Don't forward */ 1935 /* Frame has reached destination. Don't forward */
1939 if (!is_multicast_ether_addr(hdr->addr1) && 1936 if (!is_multicast_ether_addr(hdr->addr1) &&
1940 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) 1937 ether_addr_equal(sdata->vif.addr, hdr->addr3))
1941 return RX_CONTINUE; 1938 return RX_CONTINUE;
1942 1939
1943 q = ieee80211_select_queue_80211(local, skb, hdr); 1940 q = ieee80211_select_queue_80211(local, skb, hdr);
@@ -1955,11 +1952,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1955 return RX_DROP_MONITOR; 1952 return RX_DROP_MONITOR;
1956 } 1953 }
1957 1954
1955 if (!ifmsh->mshcfg.dot11MeshForwarding)
1956 goto out;
1957
1958 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1958 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1959 if (!fwd_skb) { 1959 if (!fwd_skb) {
1960 if (net_ratelimit()) 1960 net_dbg_ratelimited("%s: failed to clone mesh frame\n",
1961 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1961 sdata->name);
1962 sdata->name);
1963 goto out; 1962 goto out;
1964 } 1963 }
1965 1964
@@ -2122,13 +2121,13 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2122 struct sk_buff *skb; 2121 struct sk_buff *skb;
2123 struct ieee80211_mgmt *resp; 2122 struct ieee80211_mgmt *resp;
2124 2123
2125 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) { 2124 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
2126 /* Not to own unicast address */ 2125 /* Not to own unicast address */
2127 return; 2126 return;
2128 } 2127 }
2129 2128
2130 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || 2129 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
2131 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { 2130 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
2132 /* Not from the current AP or not associated yet. */ 2131 /* Not from the current AP or not associated yet. */
2133 return; 2132 return;
2134 } 2133 }
@@ -2180,12 +2179,14 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2180 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2179 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2181 ieee80211_is_beacon(mgmt->frame_control) && 2180 ieee80211_is_beacon(mgmt->frame_control) &&
2182 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2181 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2183 struct ieee80211_rx_status *status; 2182 int sig = 0;
2183
2184 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2185 sig = status->signal;
2184 2186
2185 status = IEEE80211_SKB_RXCB(rx->skb);
2186 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2187 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2187 rx->skb->data, rx->skb->len, 2188 rx->skb->data, rx->skb->len,
2188 status->freq, GFP_ATOMIC); 2189 status->freq, sig, GFP_ATOMIC);
2189 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2190 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2190 } 2191 }
2191 2192
@@ -2269,8 +2270,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2269 sband = rx->local->hw.wiphy->bands[status->band]; 2270 sband = rx->local->hw.wiphy->bands[status->band];
2270 2271
2271 rate_control_rate_update(local, sband, rx->sta, 2272 rate_control_rate_update(local, sband, rx->sta,
2272 IEEE80211_RC_SMPS_CHANGED, 2273 IEEE80211_RC_SMPS_CHANGED);
2273 local->_oper_channel_type);
2274 goto handled; 2274 goto handled;
2275 } 2275 }
2276 default: 2276 default:
@@ -2337,7 +2337,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2337 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2337 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2338 break; 2338 break;
2339 2339
2340 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 2340 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
2341 break; 2341 break;
2342 2342
2343 goto queue; 2343 goto queue;
@@ -2409,6 +2409,7 @@ static ieee80211_rx_result debug_noinline
2409ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2409ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2410{ 2410{
2411 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2411 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2412 int sig = 0;
2412 2413
2413 /* skip known-bad action frames and return them in the next handler */ 2414 /* skip known-bad action frames and return them in the next handler */
2414 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2415 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
@@ -2421,7 +2422,10 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2421 * it transmitted were processed or returned. 2422 * it transmitted were processed or returned.
2422 */ 2423 */
2423 2424
2424 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, 2425 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2426 sig = status->signal;
2427
2428 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, sig,
2425 rx->skb->data, rx->skb->len, 2429 rx->skb->data, rx->skb->len,
2426 GFP_ATOMIC)) { 2430 GFP_ATOMIC)) {
2427 if (rx->sta) 2431 if (rx->sta)
@@ -2486,14 +2490,9 @@ static ieee80211_rx_result debug_noinline
2486ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2490ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2487{ 2491{
2488 struct ieee80211_sub_if_data *sdata = rx->sdata; 2492 struct ieee80211_sub_if_data *sdata = rx->sdata;
2489 ieee80211_rx_result rxs;
2490 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2493 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2491 __le16 stype; 2494 __le16 stype;
2492 2495
2493 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2494 if (rxs != RX_CONTINUE)
2495 return rxs;
2496
2497 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2496 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2498 2497
2499 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2498 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
@@ -2502,10 +2501,13 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2502 return RX_DROP_MONITOR; 2501 return RX_DROP_MONITOR;
2503 2502
2504 switch (stype) { 2503 switch (stype) {
2504 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2505 case cpu_to_le16(IEEE80211_STYPE_BEACON): 2505 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2506 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 2506 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2507 /* process for all: mesh, mlme, ibss */ 2507 /* process for all: mesh, mlme, ibss */
2508 break; 2508 break;
2509 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
2510 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
2509 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2511 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2510 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2512 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2511 if (is_multicast_ether_addr(mgmt->da) && 2513 if (is_multicast_ether_addr(mgmt->da) &&
@@ -2517,7 +2519,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2517 return RX_DROP_MONITOR; 2519 return RX_DROP_MONITOR;
2518 break; 2520 break;
2519 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2521 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2520 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2521 /* process only for ibss */ 2522 /* process only for ibss */
2522 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2523 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2523 return RX_DROP_MONITOR; 2524 return RX_DROP_MONITOR;
@@ -2542,16 +2543,10 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2542{ 2543{
2543 struct ieee80211_sub_if_data *sdata; 2544 struct ieee80211_sub_if_data *sdata;
2544 struct ieee80211_local *local = rx->local; 2545 struct ieee80211_local *local = rx->local;
2545 struct ieee80211_rtap_hdr {
2546 struct ieee80211_radiotap_header hdr;
2547 u8 flags;
2548 u8 rate_or_pad;
2549 __le16 chan_freq;
2550 __le16 chan_flags;
2551 } __packed *rthdr;
2552 struct sk_buff *skb = rx->skb, *skb2; 2546 struct sk_buff *skb = rx->skb, *skb2;
2553 struct net_device *prev_dev = NULL; 2547 struct net_device *prev_dev = NULL;
2554 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2548 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2549 int needed_headroom;
2555 2550
2556 /* 2551 /*
2557 * If cooked monitor has been processed already, then 2552 * If cooked monitor has been processed already, then
@@ -2565,30 +2560,16 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2565 if (!local->cooked_mntrs) 2560 if (!local->cooked_mntrs)
2566 goto out_free_skb; 2561 goto out_free_skb;
2567 2562
2568 if (skb_headroom(skb) < sizeof(*rthdr) && 2563 /* room for the radiotap header based on driver features */
2569 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) 2564 needed_headroom = ieee80211_rx_radiotap_len(local, status);
2570 goto out_free_skb;
2571
2572 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2573 memset(rthdr, 0, sizeof(*rthdr));
2574 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2575 rthdr->hdr.it_present =
2576 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2577 (1 << IEEE80211_RADIOTAP_CHANNEL));
2578 2565
2579 if (rate) { 2566 if (skb_headroom(skb) < needed_headroom &&
2580 rthdr->rate_or_pad = rate->bitrate / 5; 2567 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
2581 rthdr->hdr.it_present |= 2568 goto out_free_skb;
2582 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2583 }
2584 rthdr->chan_freq = cpu_to_le16(status->freq);
2585 2569
2586 if (status->band == IEEE80211_BAND_5GHZ) 2570 /* prepend radiotap information */
2587 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM | 2571 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
2588 IEEE80211_CHAN_5GHZ); 2572 false);
2589 else
2590 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2591 IEEE80211_CHAN_2GHZ);
2592 2573
2593 skb_set_mac_header(skb, 0); 2574 skb_set_mac_header(skb, 0);
2594 skb->ip_summed = CHECKSUM_UNNECESSARY; 2575 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2790,7 +2771,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2790 if (!bssid && !sdata->u.mgd.use_4addr) 2771 if (!bssid && !sdata->u.mgd.use_4addr)
2791 return 0; 2772 return 0;
2792 if (!multicast && 2773 if (!multicast &&
2793 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { 2774 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2794 if (!(sdata->dev->flags & IFF_PROMISC) || 2775 if (!(sdata->dev->flags & IFF_PROMISC) ||
2795 sdata->u.mgd.use_4addr) 2776 sdata->u.mgd.use_4addr)
2796 return 0; 2777 return 0;
@@ -2808,8 +2789,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2808 return 0; 2789 return 0;
2809 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2790 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2810 } else if (!multicast && 2791 } else if (!multicast &&
2811 compare_ether_addr(sdata->vif.addr, 2792 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2812 hdr->addr1) != 0) {
2813 if (!(sdata->dev->flags & IFF_PROMISC)) 2793 if (!(sdata->dev->flags & IFF_PROMISC))
2814 return 0; 2794 return 0;
2815 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2795 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2825,8 +2805,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2825 break; 2805 break;
2826 case NL80211_IFTYPE_MESH_POINT: 2806 case NL80211_IFTYPE_MESH_POINT:
2827 if (!multicast && 2807 if (!multicast &&
2828 compare_ether_addr(sdata->vif.addr, 2808 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2829 hdr->addr1) != 0) {
2830 if (!(sdata->dev->flags & IFF_PROMISC)) 2809 if (!(sdata->dev->flags & IFF_PROMISC))
2831 return 0; 2810 return 0;
2832 2811
@@ -2836,8 +2815,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2836 case NL80211_IFTYPE_AP_VLAN: 2815 case NL80211_IFTYPE_AP_VLAN:
2837 case NL80211_IFTYPE_AP: 2816 case NL80211_IFTYPE_AP:
2838 if (!bssid) { 2817 if (!bssid) {
2839 if (compare_ether_addr(sdata->vif.addr, 2818 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2840 hdr->addr1))
2841 return 0; 2819 return 0;
2842 } else if (!ieee80211_bssid_match(bssid, 2820 } else if (!ieee80211_bssid_match(bssid,
2843 sdata->vif.addr)) { 2821 sdata->vif.addr)) {
@@ -2859,7 +2837,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2859 case NL80211_IFTYPE_WDS: 2837 case NL80211_IFTYPE_WDS:
2860 if (bssid || !ieee80211_is_data(hdr->frame_control)) 2838 if (bssid || !ieee80211_is_data(hdr->frame_control))
2861 return 0; 2839 return 0;
2862 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 2840 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
2863 return 0; 2841 return 0;
2864 break; 2842 break;
2865 default: 2843 default:
@@ -2936,6 +2914,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2936 local->dot11ReceivedFragmentCount++; 2914 local->dot11ReceivedFragmentCount++;
2937 2915
2938 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2916 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2917 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
2939 test_bit(SCAN_SW_SCANNING, &local->scanning))) 2918 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2940 status->rx_flags |= IEEE80211_RX_IN_SCAN; 2919 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2941 2920
@@ -2956,7 +2935,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2956 if (ieee80211_is_data(fc)) { 2935 if (ieee80211_is_data(fc)) {
2957 prev_sta = NULL; 2936 prev_sta = NULL;
2958 2937
2959 for_each_sta_info_rx(local, hdr->addr2, sta, tmp) { 2938 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2960 if (!prev_sta) { 2939 if (!prev_sta) {
2961 prev_sta = sta; 2940 prev_sta = sta;
2962 continue; 2941 continue;
@@ -3000,7 +2979,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
3000 continue; 2979 continue;
3001 } 2980 }
3002 2981
3003 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2); 2982 rx.sta = sta_info_get_bss(prev, hdr->addr2);
3004 rx.sdata = prev; 2983 rx.sdata = prev;
3005 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2984 ieee80211_prepare_and_rx_handle(&rx, skb, false);
3006 2985
@@ -3008,7 +2987,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
3008 } 2987 }
3009 2988
3010 if (prev) { 2989 if (prev) {
3011 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2); 2990 rx.sta = sta_info_get_bss(prev, hdr->addr2);
3012 rx.sdata = prev; 2991 rx.sdata = prev;
3013 2992
3014 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2993 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 9270771702fe..169da0742c81 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/etherdevice.h>
16#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
17#include <linux/pm_qos.h> 18#include <linux/pm_qos.h>
18#include <net/sch_generic.h> 19#include <net/sch_generic.h>
@@ -28,20 +29,6 @@
28#define IEEE80211_CHANNEL_TIME (HZ / 33) 29#define IEEE80211_CHANNEL_TIME (HZ / 33)
29#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8) 30#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8)
30 31
31struct ieee80211_bss *
32ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
33 u8 *ssid, u8 ssid_len)
34{
35 struct cfg80211_bss *cbss;
36
37 cbss = cfg80211_get_bss(local->hw.wiphy,
38 ieee80211_get_channel(local->hw.wiphy, freq),
39 bssid, ssid, ssid_len, 0, 0);
40 if (!cbss)
41 return NULL;
42 return (void *)cbss->priv;
43}
44
45static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) 32static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
46{ 33{
47 struct ieee80211_bss *bss = (void *)cbss->priv; 34 struct ieee80211_bss *bss = (void *)cbss->priv;
@@ -103,16 +90,35 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
103 cbss->free_priv = ieee80211_rx_bss_free; 90 cbss->free_priv = ieee80211_rx_bss_free;
104 bss = (void *)cbss->priv; 91 bss = (void *)cbss->priv;
105 92
93 if (elems->parse_error) {
94 if (beacon)
95 bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON;
96 else
97 bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP;
98 } else {
99 if (beacon)
100 bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON;
101 else
102 bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP;
103 }
104
106 /* save the ERP value so that it is available at association time */ 105 /* save the ERP value so that it is available at association time */
107 if (elems->erp_info && elems->erp_info_len >= 1) { 106 if (elems->erp_info && elems->erp_info_len >= 1 &&
107 (!elems->parse_error ||
108 !(bss->valid_data & IEEE80211_BSS_VALID_ERP))) {
108 bss->erp_value = elems->erp_info[0]; 109 bss->erp_value = elems->erp_info[0];
109 bss->has_erp_value = true; 110 bss->has_erp_value = true;
111 if (!elems->parse_error)
112 bss->valid_data |= IEEE80211_BSS_VALID_ERP;
110 } 113 }
111 114
112 if (elems->tim) { 115 if (elems->tim && (!elems->parse_error ||
116 !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
113 struct ieee80211_tim_ie *tim_ie = 117 struct ieee80211_tim_ie *tim_ie =
114 (struct ieee80211_tim_ie *)elems->tim; 118 (struct ieee80211_tim_ie *)elems->tim;
115 bss->dtim_period = tim_ie->dtim_period; 119 bss->dtim_period = tim_ie->dtim_period;
120 if (!elems->parse_error)
121 bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
116 } 122 }
117 123
118 /* If the beacon had no TIM IE, or it was invalid, use 1 */ 124 /* If the beacon had no TIM IE, or it was invalid, use 1 */
@@ -120,26 +126,38 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
120 bss->dtim_period = 1; 126 bss->dtim_period = 1;
121 127
122 /* replace old supported rates if we get new values */ 128 /* replace old supported rates if we get new values */
123 srlen = 0; 129 if (!elems->parse_error ||
124 if (elems->supp_rates) { 130 !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
125 clen = IEEE80211_MAX_SUPP_RATES; 131 srlen = 0;
126 if (clen > elems->supp_rates_len) 132 if (elems->supp_rates) {
127 clen = elems->supp_rates_len; 133 clen = IEEE80211_MAX_SUPP_RATES;
128 memcpy(bss->supp_rates, elems->supp_rates, clen); 134 if (clen > elems->supp_rates_len)
129 srlen += clen; 135 clen = elems->supp_rates_len;
130 } 136 memcpy(bss->supp_rates, elems->supp_rates, clen);
131 if (elems->ext_supp_rates) { 137 srlen += clen;
132 clen = IEEE80211_MAX_SUPP_RATES - srlen; 138 }
133 if (clen > elems->ext_supp_rates_len) 139 if (elems->ext_supp_rates) {
134 clen = elems->ext_supp_rates_len; 140 clen = IEEE80211_MAX_SUPP_RATES - srlen;
135 memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen); 141 if (clen > elems->ext_supp_rates_len)
136 srlen += clen; 142 clen = elems->ext_supp_rates_len;
143 memcpy(bss->supp_rates + srlen, elems->ext_supp_rates,
144 clen);
145 srlen += clen;
146 }
147 if (srlen) {
148 bss->supp_rates_len = srlen;
149 if (!elems->parse_error)
150 bss->valid_data |= IEEE80211_BSS_VALID_RATES;
151 }
137 } 152 }
138 if (srlen)
139 bss->supp_rates_len = srlen;
140 153
141 bss->wmm_used = elems->wmm_param || elems->wmm_info; 154 if (!elems->parse_error ||
142 bss->uapsd_supported = is_uapsd_supported(elems); 155 !(bss->valid_data & IEEE80211_BSS_VALID_WMM)) {
156 bss->wmm_used = elems->wmm_param || elems->wmm_info;
157 bss->uapsd_supported = is_uapsd_supported(elems);
158 if (!elems->parse_error)
159 bss->valid_data |= IEEE80211_BSS_VALID_WMM;
160 }
143 161
144 if (!beacon) 162 if (!beacon)
145 bss->last_probe_resp = jiffies; 163 bss->last_probe_resp = jiffies;
@@ -176,7 +194,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
176 presp = ieee80211_is_probe_resp(fc); 194 presp = ieee80211_is_probe_resp(fc);
177 if (presp) { 195 if (presp) {
178 /* ignore ProbeResp to foreign address */ 196 /* ignore ProbeResp to foreign address */
179 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN)) 197 if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
180 return RX_DROP_MONITOR; 198 return RX_DROP_MONITOR;
181 199
182 presp = true; 200 presp = true;
@@ -338,7 +356,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
338 */ 356 */
339 drv_sw_scan_start(local); 357 drv_sw_scan_start(local);
340 358
341 local->leave_oper_channel_time = 0; 359 local->leave_oper_channel_time = jiffies;
342 local->next_scan_state = SCAN_DECISION; 360 local->next_scan_state = SCAN_DECISION;
343 local->scan_channel_idx = 0; 361 local->scan_channel_idx = 0;
344 362
@@ -355,6 +373,57 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
355 return 0; 373 return 0;
356} 374}
357 375
376static bool ieee80211_can_scan(struct ieee80211_local *local,
377 struct ieee80211_sub_if_data *sdata)
378{
379 if (!list_empty(&local->work_list))
380 return false;
381
382 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
383 sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
384 IEEE80211_STA_CONNECTION_POLL))
385 return false;
386
387 return true;
388}
389
390void ieee80211_run_deferred_scan(struct ieee80211_local *local)
391{
392 lockdep_assert_held(&local->mtx);
393
394 if (!local->scan_req || local->scanning)
395 return;
396
397 if (!ieee80211_can_scan(local, local->scan_sdata))
398 return;
399
400 ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
401 round_jiffies_relative(0));
402}
403
404static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
405 unsigned long *next_delay)
406{
407 int i;
408 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
409 enum ieee80211_band band = local->hw.conf.channel->band;
410
411 for (i = 0; i < local->scan_req->n_ssids; i++)
412 ieee80211_send_probe_req(
413 sdata, NULL,
414 local->scan_req->ssids[i].ssid,
415 local->scan_req->ssids[i].ssid_len,
416 local->scan_req->ie, local->scan_req->ie_len,
417 local->scan_req->rates[band], false,
418 local->scan_req->no_cck);
419
420 /*
421 * After sending probe requests, wait for probe responses
422 * on the channel.
423 */
424 *next_delay = IEEE80211_CHANNEL_TIME;
425 local->next_scan_state = SCAN_DECISION;
426}
358 427
359static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, 428static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
360 struct cfg80211_scan_request *req) 429 struct cfg80211_scan_request *req)
@@ -367,7 +436,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
367 if (local->scan_req) 436 if (local->scan_req)
368 return -EBUSY; 437 return -EBUSY;
369 438
370 if (!list_empty(&local->work_list)) { 439 if (!ieee80211_can_scan(local, sdata)) {
371 /* wait for the work to finish/time out */ 440 /* wait for the work to finish/time out */
372 local->scan_req = req; 441 local->scan_req = req;
373 local->scan_sdata = sdata; 442 local->scan_sdata = sdata;
@@ -406,10 +475,47 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
406 local->scan_req = req; 475 local->scan_req = req;
407 local->scan_sdata = sdata; 476 local->scan_sdata = sdata;
408 477
409 if (local->ops->hw_scan) 478 if (local->ops->hw_scan) {
410 __set_bit(SCAN_HW_SCANNING, &local->scanning); 479 __set_bit(SCAN_HW_SCANNING, &local->scanning);
411 else 480 } else if ((req->n_channels == 1) &&
481 (req->channels[0]->center_freq ==
482 local->hw.conf.channel->center_freq)) {
483
484 /* If we are scanning only on the current channel, then
485 * we do not need to stop normal activities
486 */
487 unsigned long next_delay;
488
489 __set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
490
491 ieee80211_recalc_idle(local);
492
493 /* Notify driver scan is starting, keep order of operations
494 * same as normal software scan, in case that matters. */
495 drv_sw_scan_start(local);
496
497 ieee80211_configure_filter(local); /* accept probe-responses */
498
499 /* We need to ensure power level is at max for scanning. */
500 ieee80211_hw_config(local, 0);
501
502 if ((req->channels[0]->flags &
503 IEEE80211_CHAN_PASSIVE_SCAN) ||
504 !local->scan_req->n_ssids) {
505 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
506 } else {
507 ieee80211_scan_state_send_probe(local, &next_delay);
508 next_delay = IEEE80211_CHANNEL_TIME;
509 }
510
511 /* Now, just wait a bit and we are all done! */
512 ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
513 next_delay);
514 return 0;
515 } else {
516 /* Do normal software scan */
412 __set_bit(SCAN_SW_SCANNING, &local->scanning); 517 __set_bit(SCAN_SW_SCANNING, &local->scanning);
518 }
413 519
414 ieee80211_recalc_idle(local); 520 ieee80211_recalc_idle(local);
415 521
@@ -566,30 +672,6 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
566 local->next_scan_state = SCAN_SEND_PROBE; 672 local->next_scan_state = SCAN_SEND_PROBE;
567} 673}
568 674
569static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
570 unsigned long *next_delay)
571{
572 int i;
573 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
574 enum ieee80211_band band = local->hw.conf.channel->band;
575
576 for (i = 0; i < local->scan_req->n_ssids; i++)
577 ieee80211_send_probe_req(
578 sdata, NULL,
579 local->scan_req->ssids[i].ssid,
580 local->scan_req->ssids[i].ssid_len,
581 local->scan_req->ie, local->scan_req->ie_len,
582 local->scan_req->rates[band], false,
583 local->scan_req->no_cck);
584
585 /*
586 * After sending probe requests, wait for probe responses
587 * on the channel.
588 */
589 *next_delay = IEEE80211_CHANNEL_TIME;
590 local->next_scan_state = SCAN_DECISION;
591}
592
593static void ieee80211_scan_state_suspend(struct ieee80211_local *local, 675static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
594 unsigned long *next_delay) 676 unsigned long *next_delay)
595{ 677{
@@ -640,6 +722,12 @@ void ieee80211_scan_work(struct work_struct *work)
640 722
641 sdata = local->scan_sdata; 723 sdata = local->scan_sdata;
642 724
725 /* When scanning on-channel, the first-callback means completed. */
726 if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
727 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
728 goto out_complete;
729 }
730
643 if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) { 731 if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
644 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); 732 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
645 goto out_complete; 733 goto out_complete;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ff11f6bf8266..f5b1638fbf80 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/etherdevice.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -100,27 +101,8 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
100 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], 101 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
101 lockdep_is_held(&local->sta_mtx)); 102 lockdep_is_held(&local->sta_mtx));
102 while (sta) { 103 while (sta) {
103 if (sta->sdata == sdata && !sta->dummy &&
104 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
105 break;
106 sta = rcu_dereference_check(sta->hnext,
107 lockdep_is_held(&local->sta_mtx));
108 }
109 return sta;
110}
111
112/* get a station info entry even if it is a dummy station*/
113struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
114 const u8 *addr)
115{
116 struct ieee80211_local *local = sdata->local;
117 struct sta_info *sta;
118
119 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
120 lockdep_is_held(&local->sta_mtx));
121 while (sta) {
122 if (sta->sdata == sdata && 104 if (sta->sdata == sdata &&
123 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 105 ether_addr_equal(sta->sta.addr, addr))
124 break; 106 break;
125 sta = rcu_dereference_check(sta->hnext, 107 sta = rcu_dereference_check(sta->hnext,
126 lockdep_is_held(&local->sta_mtx)); 108 lockdep_is_held(&local->sta_mtx));
@@ -143,31 +125,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
143 while (sta) { 125 while (sta) {
144 if ((sta->sdata == sdata || 126 if ((sta->sdata == sdata ||
145 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) && 127 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
146 !sta->dummy && 128 ether_addr_equal(sta->sta.addr, addr))
147 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
148 break;
149 sta = rcu_dereference_check(sta->hnext,
150 lockdep_is_held(&local->sta_mtx));
151 }
152 return sta;
153}
154
155/*
156 * Get sta info either from the specified interface
157 * or from one of its vlans (including dummy stations)
158 */
159struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
160 const u8 *addr)
161{
162 struct ieee80211_local *local = sdata->local;
163 struct sta_info *sta;
164
165 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
166 lockdep_is_held(&local->sta_mtx));
167 while (sta) {
168 if ((sta->sdata == sdata ||
169 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
170 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
171 break; 129 break;
172 sta = rcu_dereference_check(sta->hnext, 130 sta = rcu_dereference_check(sta->hnext,
173 lockdep_is_held(&local->sta_mtx)); 131 lockdep_is_held(&local->sta_mtx));
@@ -208,10 +166,8 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
208 */ 166 */
209void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 167void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
210{ 168{
211 if (sta->rate_ctrl) { 169 if (sta->rate_ctrl)
212 rate_control_free_sta(sta); 170 rate_control_free_sta(sta);
213 rate_control_put(sta->rate_ctrl);
214 }
215 171
216#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 172#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
217 wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr); 173 wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
@@ -264,13 +220,11 @@ static int sta_prepare_rate_control(struct ieee80211_local *local,
264 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 220 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
265 return 0; 221 return 0;
266 222
267 sta->rate_ctrl = rate_control_get(local->rate_ctrl); 223 sta->rate_ctrl = local->rate_ctrl;
268 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 224 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
269 &sta->sta, gfp); 225 &sta->sta, gfp);
270 if (!sta->rate_ctrl_priv) { 226 if (!sta->rate_ctrl_priv)
271 rate_control_put(sta->rate_ctrl);
272 return -ENOMEM; 227 return -ENOMEM;
273 }
274 228
275 return 0; 229 return 0;
276} 230}
@@ -297,6 +251,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
297 sta->sdata = sdata; 251 sta->sdata = sdata;
298 sta->last_rx = jiffies; 252 sta->last_rx = jiffies;
299 253
254 sta->sta_state = IEEE80211_STA_NONE;
255
300 do_posix_clock_monotonic_gettime(&uptime); 256 do_posix_clock_monotonic_gettime(&uptime);
301 sta->last_connected = uptime.tv_sec; 257 sta->last_connected = uptime.tv_sec;
302 ewma_init(&sta->avg_signal, 1024, 8); 258 ewma_init(&sta->avg_signal, 1024, 8);
@@ -346,13 +302,50 @@ static int sta_info_insert_check(struct sta_info *sta)
346 if (unlikely(!ieee80211_sdata_running(sdata))) 302 if (unlikely(!ieee80211_sdata_running(sdata)))
347 return -ENETDOWN; 303 return -ENETDOWN;
348 304
349 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 || 305 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) ||
350 is_multicast_ether_addr(sta->sta.addr))) 306 is_multicast_ether_addr(sta->sta.addr)))
351 return -EINVAL; 307 return -EINVAL;
352 308
353 return 0; 309 return 0;
354} 310}
355 311
312static int sta_info_insert_drv_state(struct ieee80211_local *local,
313 struct ieee80211_sub_if_data *sdata,
314 struct sta_info *sta)
315{
316 enum ieee80211_sta_state state;
317 int err = 0;
318
319 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) {
320 err = drv_sta_state(local, sdata, sta, state, state + 1);
321 if (err)
322 break;
323 }
324
325 if (!err) {
326 /*
327 * Drivers using legacy sta_add/sta_remove callbacks only
328 * get uploaded set to true after sta_add is called.
329 */
330 if (!local->ops->sta_add)
331 sta->uploaded = true;
332 return 0;
333 }
334
335 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
336 printk(KERN_DEBUG
337 "%s: failed to move IBSS STA %pM to state %d (%d) - keeping it anyway.\n",
338 sdata->name, sta->sta.addr, state + 1, err);
339 err = 0;
340 }
341
342 /* unwind on error */
343 for (; state > IEEE80211_STA_NOTEXIST; state--)
344 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1));
345
346 return err;
347}
348
356/* 349/*
357 * should be called with sta_mtx locked 350 * should be called with sta_mtx locked
358 * this function replaces the mutex lock 351 * this function replaces the mutex lock
@@ -362,70 +355,43 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
362{ 355{
363 struct ieee80211_local *local = sta->local; 356 struct ieee80211_local *local = sta->local;
364 struct ieee80211_sub_if_data *sdata = sta->sdata; 357 struct ieee80211_sub_if_data *sdata = sta->sdata;
365 struct sta_info *exist_sta; 358 struct station_info sinfo;
366 bool dummy_reinsert = false;
367 int err = 0; 359 int err = 0;
368 360
369 lockdep_assert_held(&local->sta_mtx); 361 lockdep_assert_held(&local->sta_mtx);
370 362
371 /* 363 /* check if STA exists already */
372 * check if STA exists already. 364 if (sta_info_get_bss(sdata, sta->sta.addr)) {
373 * only accept a scenario of a second call to sta_info_insert_finish 365 err = -EEXIST;
374 * with a dummy station entry that was inserted earlier 366 goto out_err;
375 * in that case - assume that the dummy station flag should
376 * be removed.
377 */
378 exist_sta = sta_info_get_bss_rx(sdata, sta->sta.addr);
379 if (exist_sta) {
380 if (exist_sta == sta && sta->dummy) {
381 dummy_reinsert = true;
382 } else {
383 err = -EEXIST;
384 goto out_err;
385 }
386 } 367 }
387 368
388 if (!sta->dummy || dummy_reinsert) { 369 /* notify driver */
389 /* notify driver */ 370 err = sta_info_insert_drv_state(local, sdata, sta);
390 err = drv_sta_add(local, sdata, &sta->sta); 371 if (err)
391 if (err) { 372 goto out_err;
392 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
393 goto out_err;
394 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
395 "driver (%d) - keeping it anyway.\n",
396 sdata->name, sta->sta.addr, err);
397 } else
398 sta->uploaded = true;
399 }
400 373
401 if (!dummy_reinsert) { 374 local->num_sta++;
402 local->num_sta++; 375 local->sta_generation++;
403 local->sta_generation++; 376 smp_mb();
404 smp_mb();
405 377
406 /* make the station visible */ 378 /* make the station visible */
407 sta_info_hash_add(local, sta); 379 sta_info_hash_add(local, sta);
408 380
409 list_add(&sta->list, &local->sta_list); 381 list_add(&sta->list, &local->sta_list);
410 } else {
411 sta->dummy = false;
412 }
413 382
414 if (!sta->dummy) { 383 set_sta_flag(sta, WLAN_STA_INSERTED);
415 struct station_info sinfo;
416 384
417 ieee80211_sta_debugfs_add(sta); 385 ieee80211_sta_debugfs_add(sta);
418 rate_control_add_sta_debugfs(sta); 386 rate_control_add_sta_debugfs(sta);
419 387
420 memset(&sinfo, 0, sizeof(sinfo)); 388 memset(&sinfo, 0, sizeof(sinfo));
421 sinfo.filled = 0; 389 sinfo.filled = 0;
422 sinfo.generation = local->sta_generation; 390 sinfo.generation = local->sta_generation;
423 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); 391 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
424 }
425 392
426#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 393#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
427 wiphy_debug(local->hw.wiphy, "Inserted %sSTA %pM\n", 394 wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
428 sta->dummy ? "dummy " : "", sta->sta.addr);
429#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 395#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
430 396
431 /* move reference to rcu-protected */ 397 /* move reference to rcu-protected */
@@ -477,25 +443,6 @@ int sta_info_insert(struct sta_info *sta)
477 return err; 443 return err;
478} 444}
479 445
480/* Caller must hold sta->local->sta_mtx */
481int sta_info_reinsert(struct sta_info *sta)
482{
483 struct ieee80211_local *local = sta->local;
484 int err = 0;
485
486 err = sta_info_insert_check(sta);
487 if (err) {
488 mutex_unlock(&local->sta_mtx);
489 return err;
490 }
491
492 might_sleep();
493
494 err = sta_info_insert_finish(sta);
495 rcu_read_unlock();
496 return err;
497}
498
499static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) 446static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
500{ 447{
501 /* 448 /*
@@ -711,7 +658,7 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
711 return have_buffered; 658 return have_buffered;
712} 659}
713 660
714static int __must_check __sta_info_destroy(struct sta_info *sta) 661int __must_check __sta_info_destroy(struct sta_info *sta)
715{ 662{
716 struct ieee80211_local *local; 663 struct ieee80211_local *local;
717 struct ieee80211_sub_if_data *sdata; 664 struct ieee80211_sub_if_data *sdata;
@@ -726,6 +673,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
726 local = sta->local; 673 local = sta->local;
727 sdata = sta->sdata; 674 sdata = sta->sdata;
728 675
676 lockdep_assert_held(&local->sta_mtx);
677
729 /* 678 /*
730 * Before removing the station from the driver and 679 * Before removing the station from the driver and
731 * rate control, it might still start new aggregation 680 * rate control, it might still start new aggregation
@@ -750,33 +699,24 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
750 699
751 sta->dead = true; 700 sta->dead = true;
752 701
753 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
754 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
755 BUG_ON(!sdata->bss);
756
757 clear_sta_flag(sta, WLAN_STA_PS_STA);
758 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
759
760 atomic_dec(&sdata->bss->num_sta_ps);
761 sta_info_recalc_tim(sta);
762 }
763
764 local->num_sta--; 702 local->num_sta--;
765 local->sta_generation++; 703 local->sta_generation++;
766 704
767 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 705 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
768 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 706 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
769 707
770 while (sta->sta_state > IEEE80211_STA_NONE) 708 while (sta->sta_state > IEEE80211_STA_NONE) {
771 sta_info_move_state(sta, sta->sta_state - 1); 709 ret = sta_info_move_state(sta, sta->sta_state - 1);
710 if (ret) {
711 WARN_ON_ONCE(1);
712 break;
713 }
714 }
772 715
773 if (sta->uploaded) { 716 if (sta->uploaded) {
774 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 717 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE,
775 sdata = container_of(sdata->bss, 718 IEEE80211_STA_NOTEXIST);
776 struct ieee80211_sub_if_data, 719 WARN_ON_ONCE(ret != 0);
777 u.ap);
778 drv_sta_remove(local, sdata, &sta->sta);
779 sdata = sta->sdata;
780 } 720 }
781 721
782 /* 722 /*
@@ -787,6 +727,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
787 */ 727 */
788 synchronize_rcu(); 728 synchronize_rcu();
789 729
730 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
731 BUG_ON(!sdata->bss);
732
733 clear_sta_flag(sta, WLAN_STA_PS_STA);
734
735 atomic_dec(&sdata->bss->num_sta_ps);
736 sta_info_recalc_tim(sta);
737 }
738
790 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 739 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
791 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 740 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
792 __skb_queue_purge(&sta->ps_tx_buf[ac]); 741 __skb_queue_purge(&sta->ps_tx_buf[ac]);
@@ -815,35 +764,20 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
815 } 764 }
816#endif 765#endif
817 766
818 /* There could be some memory leaks because of ampdu tx pending queue 767 /*
819 * not being freed before destroying the station info. 768 * Destroy aggregation state here. It would be nice to wait for the
820 * 769 * driver to finish aggregation stop and then clean up, but for now
821 * Make sure that such queues are purged before freeing the station 770 * drivers have to handle aggregation stop being requested, followed
822 * info. 771 * directly by station destruction.
823 * TODO: We have to somehow postpone the full destruction
824 * until the aggregation stop completes. Refer
825 * http://thread.gmane.org/gmane.linux.kernel.wireless.general/81936
826 */ 772 */
827
828 mutex_lock(&sta->ampdu_mlme.mtx);
829
830 for (i = 0; i < STA_TID_NUM; i++) { 773 for (i = 0; i < STA_TID_NUM; i++) {
831 tid_tx = rcu_dereference_protected_tid_tx(sta, i); 774 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
832 if (!tid_tx) 775 if (!tid_tx)
833 continue; 776 continue;
834 if (skb_queue_len(&tid_tx->pending)) { 777 __skb_queue_purge(&tid_tx->pending);
835#ifdef CONFIG_MAC80211_HT_DEBUG 778 kfree(tid_tx);
836 wiphy_debug(local->hw.wiphy, "TX A-MPDU purging %d "
837 "packets for tid=%d\n",
838 skb_queue_len(&tid_tx->pending), i);
839#endif /* CONFIG_MAC80211_HT_DEBUG */
840 __skb_queue_purge(&tid_tx->pending);
841 }
842 kfree_rcu(tid_tx, rcu_head);
843 } 779 }
844 780
845 mutex_unlock(&sta->ampdu_mlme.mtx);
846
847 sta_info_free(local, sta); 781 sta_info_free(local, sta);
848 782
849 return 0; 783 return 0;
@@ -855,7 +789,7 @@ int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
855 int ret; 789 int ret;
856 790
857 mutex_lock(&sdata->local->sta_mtx); 791 mutex_lock(&sdata->local->sta_mtx);
858 sta = sta_info_get_rx(sdata, addr); 792 sta = sta_info_get(sdata, addr);
859 ret = __sta_info_destroy(sta); 793 ret = __sta_info_destroy(sta);
860 mutex_unlock(&sdata->local->sta_mtx); 794 mutex_unlock(&sdata->local->sta_mtx);
861 795
@@ -869,7 +803,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
869 int ret; 803 int ret;
870 804
871 mutex_lock(&sdata->local->sta_mtx); 805 mutex_lock(&sdata->local->sta_mtx);
872 sta = sta_info_get_bss_rx(sdata, addr); 806 sta = sta_info_get_bss(sdata, addr);
873 ret = __sta_info_destroy(sta); 807 ret = __sta_info_destroy(sta);
874 mutex_unlock(&sdata->local->sta_mtx); 808 mutex_unlock(&sdata->local->sta_mtx);
875 809
@@ -932,8 +866,10 @@ int sta_info_flush(struct ieee80211_local *local,
932 866
933 mutex_lock(&local->sta_mtx); 867 mutex_lock(&local->sta_mtx);
934 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 868 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
935 if (!sdata || sdata == sta->sdata) 869 if (!sdata || sdata == sta->sdata) {
936 WARN_ON(__sta_info_destroy(sta)); 870 WARN_ON(__sta_info_destroy(sta));
871 ret++;
872 }
937 } 873 }
938 mutex_unlock(&local->sta_mtx); 874 mutex_unlock(&local->sta_mtx);
939 875
@@ -976,7 +912,7 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
976 */ 912 */
977 for_each_sta_info(hw_to_local(hw), addr, sta, nxt) { 913 for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
978 if (localaddr && 914 if (localaddr &&
979 compare_ether_addr(sta->sdata->vif.addr, localaddr) != 0) 915 !ether_addr_equal(sta->sdata->vif.addr, localaddr))
980 continue; 916 continue;
981 if (!sta->uploaded) 917 if (!sta->uploaded)
982 return NULL; 918 return NULL;
@@ -1009,9 +945,11 @@ EXPORT_SYMBOL(ieee80211_find_sta);
1009static void clear_sta_ps_flags(void *_sta) 945static void clear_sta_ps_flags(void *_sta)
1010{ 946{
1011 struct sta_info *sta = _sta; 947 struct sta_info *sta = _sta;
948 struct ieee80211_sub_if_data *sdata = sta->sdata;
1012 949
1013 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 950 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1014 clear_sta_flag(sta, WLAN_STA_PS_STA); 951 if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA))
952 atomic_dec(&sdata->bss->num_sta_ps);
1015} 953}
1016 954
1017/* powersave support code */ 955/* powersave support code */
@@ -1113,7 +1051,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1113 * exchange. Also set EOSP to indicate this packet 1051 * exchange. Also set EOSP to indicate this packet
1114 * ends the poll/service period. 1052 * ends the poll/service period.
1115 */ 1053 */
1116 info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE | 1054 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
1117 IEEE80211_TX_STATUS_EOSP | 1055 IEEE80211_TX_STATUS_EOSP |
1118 IEEE80211_TX_CTL_REQ_TX_STATUS; 1056 IEEE80211_TX_CTL_REQ_TX_STATUS;
1119 1057
@@ -1240,7 +1178,7 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1240 * STA may still remain is PS mode after this frame 1178 * STA may still remain is PS mode after this frame
1241 * exchange. 1179 * exchange.
1242 */ 1180 */
1243 info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE; 1181 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1244 1182
1245 /* 1183 /*
1246 * Use MoreData flag to indicate whether there are 1184 * Use MoreData flag to indicate whether there are
@@ -1257,13 +1195,15 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1257 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1195 ieee80211_is_qos_nullfunc(hdr->frame_control))
1258 qoshdr = ieee80211_get_qos_ctl(hdr); 1196 qoshdr = ieee80211_get_qos_ctl(hdr);
1259 1197
1260 /* set EOSP for the frame */ 1198 /* end service period after last frame */
1261 if (reason == IEEE80211_FRAME_RELEASE_UAPSD && 1199 if (skb_queue_empty(&frames)) {
1262 qoshdr && skb_queue_empty(&frames)) 1200 if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
1263 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 1201 qoshdr)
1202 *qoshdr |= IEEE80211_QOS_CTL_EOSP;
1264 1203
1265 info->flags |= IEEE80211_TX_STATUS_EOSP | 1204 info->flags |= IEEE80211_TX_STATUS_EOSP |
1266 IEEE80211_TX_CTL_REQ_TX_STATUS; 1205 IEEE80211_TX_CTL_REQ_TX_STATUS;
1206 }
1267 1207
1268 if (qoshdr) 1208 if (qoshdr)
1269 tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK); 1209 tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
@@ -1410,54 +1350,93 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
1410} 1350}
1411EXPORT_SYMBOL(ieee80211_sta_set_buffered); 1351EXPORT_SYMBOL(ieee80211_sta_set_buffered);
1412 1352
1413int sta_info_move_state_checked(struct sta_info *sta, 1353int sta_info_move_state(struct sta_info *sta,
1414 enum ieee80211_sta_state new_state) 1354 enum ieee80211_sta_state new_state)
1415{ 1355{
1416 might_sleep(); 1356 might_sleep();
1417 1357
1418 if (sta->sta_state == new_state) 1358 if (sta->sta_state == new_state)
1419 return 0; 1359 return 0;
1420 1360
1361 /* check allowed transitions first */
1362
1363 switch (new_state) {
1364 case IEEE80211_STA_NONE:
1365 if (sta->sta_state != IEEE80211_STA_AUTH)
1366 return -EINVAL;
1367 break;
1368 case IEEE80211_STA_AUTH:
1369 if (sta->sta_state != IEEE80211_STA_NONE &&
1370 sta->sta_state != IEEE80211_STA_ASSOC)
1371 return -EINVAL;
1372 break;
1373 case IEEE80211_STA_ASSOC:
1374 if (sta->sta_state != IEEE80211_STA_AUTH &&
1375 sta->sta_state != IEEE80211_STA_AUTHORIZED)
1376 return -EINVAL;
1377 break;
1378 case IEEE80211_STA_AUTHORIZED:
1379 if (sta->sta_state != IEEE80211_STA_ASSOC)
1380 return -EINVAL;
1381 break;
1382 default:
1383 WARN(1, "invalid state %d", new_state);
1384 return -EINVAL;
1385 }
1386
1387#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1388 printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
1389 sta->sdata->name, sta->sta.addr, new_state);
1390#endif
1391
1392 /*
1393 * notify the driver before the actual changes so it can
1394 * fail the transition
1395 */
1396 if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
1397 int err = drv_sta_state(sta->local, sta->sdata, sta,
1398 sta->sta_state, new_state);
1399 if (err)
1400 return err;
1401 }
1402
1403 /* reflect the change in all state variables */
1404
1421 switch (new_state) { 1405 switch (new_state) {
1422 case IEEE80211_STA_NONE: 1406 case IEEE80211_STA_NONE:
1423 if (sta->sta_state == IEEE80211_STA_AUTH) 1407 if (sta->sta_state == IEEE80211_STA_AUTH)
1424 clear_bit(WLAN_STA_AUTH, &sta->_flags); 1408 clear_bit(WLAN_STA_AUTH, &sta->_flags);
1425 else
1426 return -EINVAL;
1427 break; 1409 break;
1428 case IEEE80211_STA_AUTH: 1410 case IEEE80211_STA_AUTH:
1429 if (sta->sta_state == IEEE80211_STA_NONE) 1411 if (sta->sta_state == IEEE80211_STA_NONE)
1430 set_bit(WLAN_STA_AUTH, &sta->_flags); 1412 set_bit(WLAN_STA_AUTH, &sta->_flags);
1431 else if (sta->sta_state == IEEE80211_STA_ASSOC) 1413 else if (sta->sta_state == IEEE80211_STA_ASSOC)
1432 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 1414 clear_bit(WLAN_STA_ASSOC, &sta->_flags);
1433 else
1434 return -EINVAL;
1435 break; 1415 break;
1436 case IEEE80211_STA_ASSOC: 1416 case IEEE80211_STA_ASSOC:
1437 if (sta->sta_state == IEEE80211_STA_AUTH) { 1417 if (sta->sta_state == IEEE80211_STA_AUTH) {
1438 set_bit(WLAN_STA_ASSOC, &sta->_flags); 1418 set_bit(WLAN_STA_ASSOC, &sta->_flags);
1439 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1419 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
1440 if (sta->sdata->vif.type == NL80211_IFTYPE_AP) 1420 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1441 atomic_dec(&sta->sdata->u.ap.num_sta_authorized); 1421 (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1422 !sta->sdata->u.vlan.sta))
1423 atomic_dec(&sta->sdata->bss->num_mcast_sta);
1442 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1424 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
1443 } else 1425 }
1444 return -EINVAL;
1445 break; 1426 break;
1446 case IEEE80211_STA_AUTHORIZED: 1427 case IEEE80211_STA_AUTHORIZED:
1447 if (sta->sta_state == IEEE80211_STA_ASSOC) { 1428 if (sta->sta_state == IEEE80211_STA_ASSOC) {
1448 if (sta->sdata->vif.type == NL80211_IFTYPE_AP) 1429 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1449 atomic_inc(&sta->sdata->u.ap.num_sta_authorized); 1430 (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1431 !sta->sdata->u.vlan.sta))
1432 atomic_inc(&sta->sdata->bss->num_mcast_sta);
1450 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1433 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
1451 } else 1434 }
1452 return -EINVAL;
1453 break; 1435 break;
1454 default: 1436 default:
1455 WARN(1, "invalid state %d", new_state); 1437 break;
1456 return -EINVAL;
1457 } 1438 }
1458 1439
1459 printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
1460 sta->sdata->name, sta->sta.addr, new_state);
1461 sta->sta_state = new_state; 1440 sta->sta_state = new_state;
1462 1441
1463 return 0; 1442 return 0;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index bfed851d0d36..3bb24a121c95 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -14,6 +14,7 @@
14#include <linux/if_ether.h> 14#include <linux/if_ether.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/average.h> 16#include <linux/average.h>
17#include <linux/etherdevice.h>
17#include "key.h" 18#include "key.h"
18 19
19/** 20/**
@@ -52,7 +53,9 @@
52 * @WLAN_STA_SP: Station is in a service period, so don't try to 53 * @WLAN_STA_SP: Station is in a service period, so don't try to
53 * reply to other uAPSD trigger frames or PS-Poll. 54 * reply to other uAPSD trigger frames or PS-Poll.
54 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. 55 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
56 * @WLAN_STA_INSERTED: This station is inserted into the hash table.
55 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. 57 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
58 * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid.
56 */ 59 */
57enum ieee80211_sta_info_flags { 60enum ieee80211_sta_info_flags {
58 WLAN_STA_AUTH, 61 WLAN_STA_AUTH,
@@ -72,15 +75,9 @@ enum ieee80211_sta_info_flags {
72 WLAN_STA_UAPSD, 75 WLAN_STA_UAPSD,
73 WLAN_STA_SP, 76 WLAN_STA_SP,
74 WLAN_STA_4ADDR_EVENT, 77 WLAN_STA_4ADDR_EVENT,
78 WLAN_STA_INSERTED,
75 WLAN_STA_RATE_CONTROL, 79 WLAN_STA_RATE_CONTROL,
76}; 80 WLAN_STA_TOFFSET_KNOWN,
77
78enum ieee80211_sta_state {
79 /* NOTE: These need to be ordered correctly! */
80 IEEE80211_STA_NONE,
81 IEEE80211_STA_AUTH,
82 IEEE80211_STA_ASSOC,
83 IEEE80211_STA_AUTHORIZED,
84}; 81};
85 82
86#define STA_TID_NUM 16 83#define STA_TID_NUM 16
@@ -106,6 +103,7 @@ enum ieee80211_sta_state {
106 * @dialog_token: dialog token for aggregation session 103 * @dialog_token: dialog token for aggregation session
107 * @timeout: session timeout value to be filled in ADDBA requests 104 * @timeout: session timeout value to be filled in ADDBA requests
108 * @state: session state (see above) 105 * @state: session state (see above)
106 * @last_tx: jiffies of last tx activity
109 * @stop_initiator: initiator of a session stop 107 * @stop_initiator: initiator of a session stop
110 * @tx_stop: TX DelBA frame when stopping 108 * @tx_stop: TX DelBA frame when stopping
111 * @buf_size: reorder buffer size at receiver 109 * @buf_size: reorder buffer size at receiver
@@ -127,6 +125,7 @@ struct tid_ampdu_tx {
127 struct timer_list addba_resp_timer; 125 struct timer_list addba_resp_timer;
128 struct sk_buff_head pending; 126 struct sk_buff_head pending;
129 unsigned long state; 127 unsigned long state;
128 unsigned long last_tx;
130 u16 timeout; 129 u16 timeout;
131 u8 dialog_token; 130 u8 dialog_token;
132 u8 stop_initiator; 131 u8 stop_initiator;
@@ -144,6 +143,7 @@ struct tid_ampdu_tx {
144 * @reorder_time: jiffies when skb was added 143 * @reorder_time: jiffies when skb was added
145 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) 144 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
146 * @reorder_timer: releases expired frames from the reorder buffer. 145 * @reorder_timer: releases expired frames from the reorder buffer.
146 * @last_rx: jiffies of last rx activity
147 * @head_seq_num: head sequence number in reordering buffer. 147 * @head_seq_num: head sequence number in reordering buffer.
148 * @stored_mpdu_num: number of MPDUs in reordering buffer 148 * @stored_mpdu_num: number of MPDUs in reordering buffer
149 * @ssn: Starting Sequence Number expected to be aggregated. 149 * @ssn: Starting Sequence Number expected to be aggregated.
@@ -168,6 +168,7 @@ struct tid_ampdu_rx {
168 unsigned long *reorder_time; 168 unsigned long *reorder_time;
169 struct timer_list session_timer; 169 struct timer_list session_timer;
170 struct timer_list reorder_timer; 170 struct timer_list reorder_timer;
171 unsigned long last_rx;
171 u16 head_seq_num; 172 u16 head_seq_num;
172 u16 stored_mpdu_num; 173 u16 stored_mpdu_num;
173 u16 ssn; 174 u16 ssn;
@@ -269,12 +270,11 @@ struct sta_ampdu_mlme {
269 * @plink_timeout: timeout of peer link 270 * @plink_timeout: timeout of peer link
270 * @plink_timer: peer link watch timer 271 * @plink_timer: peer link watch timer
271 * @plink_timer_was_running: used by suspend/resume to restore timers 272 * @plink_timer_was_running: used by suspend/resume to restore timers
273 * @t_offset: timing offset relative to this host
272 * @debugfs: debug filesystem info 274 * @debugfs: debug filesystem info
273 * @dead: set to true when sta is unlinked 275 * @dead: set to true when sta is unlinked
274 * @uploaded: set to true when sta is uploaded to the driver 276 * @uploaded: set to true when sta is uploaded to the driver
275 * @lost_packets: number of consecutive lost packets 277 * @lost_packets: number of consecutive lost packets
276 * @dummy: indicate a dummy station created for receiving
277 * EAP frames before association
278 * @sta: station information we share with the driver 278 * @sta: station information we share with the driver
279 * @sta_state: duplicates information about station state (for debug) 279 * @sta_state: duplicates information about station state (for debug)
280 * @beacon_loss_count: number of times beacon loss has triggered 280 * @beacon_loss_count: number of times beacon loss has triggered
@@ -360,6 +360,9 @@ struct sta_info {
360 enum nl80211_plink_state plink_state; 360 enum nl80211_plink_state plink_state;
361 u32 plink_timeout; 361 u32 plink_timeout;
362 struct timer_list plink_timer; 362 struct timer_list plink_timer;
363 s64 t_offset;
364 s64 t_offset_setpoint;
365 enum nl80211_channel_type ch_type;
363#endif 366#endif
364 367
365#ifdef CONFIG_MAC80211_DEBUGFS 368#ifdef CONFIG_MAC80211_DEBUGFS
@@ -372,8 +375,7 @@ struct sta_info {
372 unsigned int lost_packets; 375 unsigned int lost_packets;
373 unsigned int beacon_loss_count; 376 unsigned int beacon_loss_count;
374 377
375 /* should be right in front of sta to be in the same cache line */ 378 bool supports_40mhz;
376 bool dummy;
377 379
378 /* keep last! */ 380 /* keep last! */
379 struct ieee80211_sta sta; 381 struct ieee80211_sta sta;
@@ -429,13 +431,17 @@ static inline int test_and_set_sta_flag(struct sta_info *sta,
429 return test_and_set_bit(flag, &sta->_flags); 431 return test_and_set_bit(flag, &sta->_flags);
430} 432}
431 433
432int sta_info_move_state_checked(struct sta_info *sta, 434int sta_info_move_state(struct sta_info *sta,
433 enum ieee80211_sta_state new_state); 435 enum ieee80211_sta_state new_state);
434 436
435static inline void sta_info_move_state(struct sta_info *sta, 437static inline void sta_info_pre_move_state(struct sta_info *sta,
436 enum ieee80211_sta_state new_state) 438 enum ieee80211_sta_state new_state)
437{ 439{
438 int ret = sta_info_move_state_checked(sta, new_state); 440 int ret;
441
442 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
443
444 ret = sta_info_move_state(sta, new_state);
439 WARN_ON_ONCE(ret); 445 WARN_ON_ONCE(ret);
440} 446}
441 447
@@ -472,15 +478,9 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
472struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 478struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
473 const u8 *addr); 479 const u8 *addr);
474 480
475struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
476 const u8 *addr);
477
478struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 481struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
479 const u8 *addr); 482 const u8 *addr);
480 483
481struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
482 const u8 *addr);
483
484static inline 484static inline
485void for_each_sta_info_type_check(struct ieee80211_local *local, 485void for_each_sta_info_type_check(struct ieee80211_local *local,
486 const u8 *addr, 486 const u8 *addr,
@@ -489,23 +489,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
489{ 489{
490} 490}
491 491
492#define for_each_sta_info(local, _addr, _sta, nxt) \ 492#define for_each_sta_info(local, _addr, _sta, nxt) \
493 for ( /* initialise loop */ \
494 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
495 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
496 /* typecheck */ \
497 for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
498 /* continue condition */ \
499 _sta; \
500 /* advance loop */ \
501 _sta = nxt, \
502 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
503 ) \
504 /* run code only if address matches and it's not a dummy sta */ \
505 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0 && \
506 !_sta->dummy)
507
508#define for_each_sta_info_rx(local, _addr, _sta, nxt) \
509 for ( /* initialise loop */ \ 493 for ( /* initialise loop */ \
510 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\ 494 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
511 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \ 495 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
@@ -518,7 +502,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
518 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \ 502 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
519 ) \ 503 ) \
520 /* compare address and run code only if it matches */ \ 504 /* compare address and run code only if it matches */ \
521 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0) 505 if (ether_addr_equal(_sta->sta.addr, (_addr)))
522 506
523/* 507/*
524 * Get STA info by index, BROKEN! 508 * Get STA info by index, BROKEN!
@@ -544,8 +528,8 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta);
544 */ 528 */
545int sta_info_insert(struct sta_info *sta); 529int sta_info_insert(struct sta_info *sta);
546int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU); 530int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
547int sta_info_reinsert(struct sta_info *sta);
548 531
532int __must_check __sta_info_destroy(struct sta_info *sta);
549int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, 533int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
550 const u8 *addr); 534 const u8 *addr);
551int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 535int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
@@ -557,6 +541,9 @@ void sta_info_init(struct ieee80211_local *local);
557void sta_info_stop(struct ieee80211_local *local); 541void sta_info_stop(struct ieee80211_local *local);
558int sta_info_flush(struct ieee80211_local *local, 542int sta_info_flush(struct ieee80211_local *local,
559 struct ieee80211_sub_if_data *sdata); 543 struct ieee80211_sub_if_data *sdata);
544void sta_set_rate_info_tx(struct sta_info *sta,
545 const struct ieee80211_tx_rate *rate,
546 struct rate_info *rinfo);
560void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 547void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
561 unsigned long exp_time); 548 unsigned long exp_time);
562 549
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 30c265c98f73..28cfa981cfb1 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -10,7 +10,9 @@
10 */ 10 */
11 11
12#include <linux/export.h> 12#include <linux/export.h>
13#include <linux/etherdevice.h>
13#include <net/mac80211.h> 14#include <net/mac80211.h>
15#include <asm/unaligned.h>
14#include "ieee80211_i.h" 16#include "ieee80211_i.h"
15#include "rate.h" 17#include "rate.h"
16#include "mesh.h" 18#include "mesh.h"
@@ -350,11 +352,16 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
350 bool send_to_cooked; 352 bool send_to_cooked;
351 bool acked; 353 bool acked;
352 struct ieee80211_bar *bar; 354 struct ieee80211_bar *bar;
353 u16 tid;
354 int rtap_len; 355 int rtap_len;
355 356
356 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 357 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
357 if (info->status.rates[i].idx < 0) { 358 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
359 !(info->flags & IEEE80211_TX_STAT_AMPDU)) {
360 /* just the first aggr frame carry status info */
361 info->status.rates[i].idx = -1;
362 info->status.rates[i].count = 0;
363 break;
364 } else if (info->status.rates[i].idx < 0) {
358 break; 365 break;
359 } else if (i >= hw->max_report_rates) { 366 } else if (i >= hw->max_report_rates) {
360 /* the HW cannot have attempted that rate */ 367 /* the HW cannot have attempted that rate */
@@ -377,7 +384,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
377 384
378 for_each_sta_info(local, hdr->addr1, sta, tmp) { 385 for_each_sta_info(local, hdr->addr1, sta, tmp) {
379 /* skip wrong virtual interface */ 386 /* skip wrong virtual interface */
380 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN)) 387 if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
381 continue; 388 continue;
382 389
383 if (info->flags & IEEE80211_TX_STATUS_EOSP) 390 if (info->flags & IEEE80211_TX_STATUS_EOSP)
@@ -412,7 +419,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
412 } 419 }
413 420
414 if (!acked && ieee80211_is_back_req(fc)) { 421 if (!acked && ieee80211_is_back_req(fc)) {
415 u16 control; 422 u16 tid, control;
416 423
417 /* 424 /*
418 * BAR failed, store the last SSN and retry sending 425 * BAR failed, store the last SSN and retry sending
@@ -516,7 +523,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
516 523
517 if (ieee80211_is_nullfunc(hdr->frame_control) || 524 if (ieee80211_is_nullfunc(hdr->frame_control) ||
518 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 525 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
519 bool acked = info->flags & IEEE80211_TX_STAT_ACK; 526 acked = info->flags & IEEE80211_TX_STAT_ACK;
527
520 cfg80211_probe_status(skb->dev, hdr->addr1, 528 cfg80211_probe_status(skb->dev, hdr->addr1,
521 cookie, acked, GFP_ATOMIC); 529 cookie, acked, GFP_ATOMIC);
522 } else { 530 } else {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e05667cd5e76..847215bb2a6f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -153,13 +153,13 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
153 153
154 /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ 154 /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
155 if (ieee80211_is_data_qos(hdr->frame_control) && 155 if (ieee80211_is_data_qos(hdr->frame_control) &&
156 *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK) 156 *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
157 dur = 0; 157 dur = 0;
158 else 158 else
159 /* Time needed to transmit ACK 159 /* Time needed to transmit ACK
160 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up 160 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
161 * to closest integer */ 161 * to closest integer */
162 dur = ieee80211_frame_duration(local, 10, rate, erp, 162 dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
163 tx->sdata->vif.bss_conf.use_short_preamble); 163 tx->sdata->vif.bss_conf.use_short_preamble);
164 164
165 if (next_frag_len) { 165 if (next_frag_len) {
@@ -167,7 +167,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
167 * transmit next fragment plus ACK and 2 x SIFS. */ 167 * transmit next fragment plus ACK and 2 x SIFS. */
168 dur *= 2; /* ACK + SIFS */ 168 dur *= 2; /* ACK + SIFS */
169 /* next fragment */ 169 /* next fragment */
170 dur += ieee80211_frame_duration(local, next_frag_len, 170 dur += ieee80211_frame_duration(sband->band, next_frag_len,
171 txrate->bitrate, erp, 171 txrate->bitrate, erp,
172 tx->sdata->vif.bss_conf.use_short_preamble); 172 tx->sdata->vif.bss_conf.use_short_preamble);
173 } 173 }
@@ -226,13 +226,13 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
226 * have correct qos tag for some reason, due the network or the 226 * have correct qos tag for some reason, due the network or the
227 * peer application. 227 * peer application.
228 * 228 *
229 * Note: local->uapsd_queues access is racy here. If the value is 229 * Note: ifmgd->uapsd_queues access is racy here. If the value is
230 * changed via debugfs, user needs to reassociate manually to have 230 * changed via debugfs, user needs to reassociate manually to have
231 * everything in sync. 231 * everything in sync.
232 */ 232 */
233 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) 233 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
234 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 234 (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
235 && skb_get_queue_mapping(tx->skb) == 0) 235 skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
236 return TX_CONTINUE; 236 return TX_CONTINUE;
237 237
238 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 238 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
@@ -306,7 +306,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
306 } 306 }
307 } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP && 307 } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP &&
308 ieee80211_is_data(hdr->frame_control) && 308 ieee80211_is_data(hdr->frame_control) &&
309 !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) { 309 !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) {
310 /* 310 /*
311 * No associated STAs - no need to send multicast 311 * No associated STAs - no need to send multicast
312 * frames. 312 * frames.
@@ -400,6 +400,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
400 return TX_CONTINUE; 400 return TX_CONTINUE;
401 401
402 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; 402 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
403 if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
404 info->hw_queue = tx->sdata->vif.cab_queue;
403 405
404 /* device releases frame after DTIM beacon */ 406 /* device releases frame after DTIM beacon */
405 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) 407 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
@@ -411,9 +413,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
411 413
412 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { 414 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
413#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 415#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
414 if (net_ratelimit()) 416 net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n",
415 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", 417 tx->sdata->name);
416 tx->sdata->name);
417#endif 418#endif
418 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 419 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
419 } else 420 } else
@@ -448,18 +449,23 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
448 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 449 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
449 struct ieee80211_local *local = tx->local; 450 struct ieee80211_local *local = tx->local;
450 451
451 if (unlikely(!sta || 452 if (unlikely(!sta))
452 ieee80211_is_probe_resp(hdr->frame_control) ||
453 ieee80211_is_auth(hdr->frame_control) ||
454 ieee80211_is_assoc_resp(hdr->frame_control) ||
455 ieee80211_is_reassoc_resp(hdr->frame_control)))
456 return TX_CONTINUE; 453 return TX_CONTINUE;
457 454
458 if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) || 455 if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
459 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) && 456 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
460 !(info->flags & IEEE80211_TX_CTL_POLL_RESPONSE))) { 457 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
461 int ac = skb_get_queue_mapping(tx->skb); 458 int ac = skb_get_queue_mapping(tx->skb);
462 459
460 /* only deauth, disassoc and action are bufferable MMPDUs */
461 if (ieee80211_is_mgmt(hdr->frame_control) &&
462 !ieee80211_is_deauth(hdr->frame_control) &&
463 !ieee80211_is_disassoc(hdr->frame_control) &&
464 !ieee80211_is_action(hdr->frame_control)) {
465 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
466 return TX_CONTINUE;
467 }
468
463#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 469#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
464 printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n", 470 printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
465 sta->sta.addr, sta->sta.aid, ac); 471 sta->sta.addr, sta->sta.aid, ac);
@@ -469,10 +475,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
469 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { 475 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
470 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); 476 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
471#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 477#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
472 if (net_ratelimit()) 478 net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n",
473 printk(KERN_DEBUG "%s: STA %pM TX buffer for " 479 tx->sdata->name, sta->sta.addr, ac);
474 "AC %d full - dropping oldest frame\n",
475 tx->sdata->name, sta->sta.addr, ac);
476#endif 480#endif
477 dev_kfree_skb(old); 481 dev_kfree_skb(old);
478 } else 482 } else
@@ -625,7 +629,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
625 tx->local->hw.wiphy->frag_threshold); 629 tx->local->hw.wiphy->frag_threshold);
626 630
627 /* set up the tx rate control struct we give the RC algo */ 631 /* set up the tx rate control struct we give the RC algo */
628 txrc.hw = local_to_hw(tx->local); 632 txrc.hw = &tx->local->hw;
629 txrc.sband = sband; 633 txrc.sband = sband;
630 txrc.bss_conf = &tx->sdata->vif.bss_conf; 634 txrc.bss_conf = &tx->sdata->vif.bss_conf;
631 txrc.skb = tx->skb; 635 txrc.skb = tx->skb;
@@ -635,6 +639,9 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
635 txrc.max_rate_idx = -1; 639 txrc.max_rate_idx = -1;
636 else 640 else
637 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 641 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
642 memcpy(txrc.rate_idx_mcs_mask,
643 tx->sdata->rc_rateidx_mcs_mask[tx->channel->band],
644 sizeof(txrc.rate_idx_mcs_mask));
638 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || 645 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
639 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || 646 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
640 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC); 647 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
@@ -1057,6 +1064,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1057{ 1064{
1058 bool queued = false; 1065 bool queued = false;
1059 bool reset_agg_timer = false; 1066 bool reset_agg_timer = false;
1067 struct sk_buff *purge_skb = NULL;
1060 1068
1061 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { 1069 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1062 info->flags |= IEEE80211_TX_CTL_AMPDU; 1070 info->flags |= IEEE80211_TX_CTL_AMPDU;
@@ -1098,14 +1106,18 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1098 info->control.vif = &tx->sdata->vif; 1106 info->control.vif = &tx->sdata->vif;
1099 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1107 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1100 __skb_queue_tail(&tid_tx->pending, skb); 1108 __skb_queue_tail(&tid_tx->pending, skb);
1109 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1110 purge_skb = __skb_dequeue(&tid_tx->pending);
1101 } 1111 }
1102 spin_unlock(&tx->sta->lock); 1112 spin_unlock(&tx->sta->lock);
1113
1114 if (purge_skb)
1115 dev_kfree_skb(purge_skb);
1103 } 1116 }
1104 1117
1105 /* reset session timer */ 1118 /* reset session timer */
1106 if (reset_agg_timer && tid_tx->timeout) 1119 if (reset_agg_timer && tid_tx->timeout)
1107 mod_timer(&tid_tx->session_timer, 1120 tid_tx->last_tx = jiffies;
1108 TU_TO_EXP_TIME(tid_tx->timeout));
1109 1121
1110 return queued; 1122 return queued;
1111} 1123}
@@ -1144,7 +1156,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1144 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1156 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1145 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1157 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1146 return TX_DROP; 1158 return TX_DROP;
1147 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { 1159 } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
1160 tx->sdata->control_port_protocol == tx->skb->protocol) {
1148 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1161 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1149 } 1162 }
1150 if (!tx->sta) 1163 if (!tx->sta)
@@ -1201,11 +1214,19 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1201 bool txpending) 1214 bool txpending)
1202{ 1215{
1203 struct sk_buff *skb, *tmp; 1216 struct sk_buff *skb, *tmp;
1204 struct ieee80211_tx_info *info;
1205 unsigned long flags; 1217 unsigned long flags;
1206 1218
1207 skb_queue_walk_safe(skbs, skb, tmp) { 1219 skb_queue_walk_safe(skbs, skb, tmp) {
1208 int q = skb_get_queue_mapping(skb); 1220 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1221 int q = info->hw_queue;
1222
1223#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1224 if (WARN_ON_ONCE(q >= local->hw.queues)) {
1225 __skb_unlink(skb, skbs);
1226 dev_kfree_skb(skb);
1227 continue;
1228 }
1229#endif
1209 1230
1210 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1231 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1211 if (local->queue_stop_reasons[q] || 1232 if (local->queue_stop_reasons[q] ||
@@ -1227,7 +1248,6 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1227 } 1248 }
1228 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1249 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1229 1250
1230 info = IEEE80211_SKB_CB(skb);
1231 info->control.vif = vif; 1251 info->control.vif = vif;
1232 info->control.sta = sta; 1252 info->control.sta = sta;
1233 1253
@@ -1270,8 +1290,16 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
1270 1290
1271 switch (sdata->vif.type) { 1291 switch (sdata->vif.type) {
1272 case NL80211_IFTYPE_MONITOR: 1292 case NL80211_IFTYPE_MONITOR:
1273 sdata = NULL; 1293 sdata = rcu_dereference(local->monitor_sdata);
1274 vif = NULL; 1294 if (sdata) {
1295 vif = &sdata->vif;
1296 info->hw_queue =
1297 vif->hw_queue[skb_get_queue_mapping(skb)];
1298 } else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
1299 dev_kfree_skb(skb);
1300 return true;
1301 } else
1302 vif = NULL;
1275 break; 1303 break;
1276 case NL80211_IFTYPE_AP_VLAN: 1304 case NL80211_IFTYPE_AP_VLAN:
1277 sdata = container_of(sdata->bss, 1305 sdata = container_of(sdata->bss,
@@ -1386,6 +1414,12 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1386 tx.channel = local->hw.conf.channel; 1414 tx.channel = local->hw.conf.channel;
1387 info->band = tx.channel->band; 1415 info->band = tx.channel->band;
1388 1416
1417 /* set up hw_queue value early */
1418 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
1419 !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
1420 info->hw_queue =
1421 sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
1422
1389 if (!invoke_tx_handlers(&tx)) 1423 if (!invoke_tx_handlers(&tx))
1390 result = __ieee80211_tx(local, &tx.skbs, led_len, 1424 result = __ieee80211_tx(local, &tx.skbs, led_len,
1391 tx.sta, txpending); 1425 tx.sta, txpending);
@@ -1454,12 +1488,12 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
1454 1488
1455 if (ieee80211_vif_is_mesh(&sdata->vif) && 1489 if (ieee80211_vif_is_mesh(&sdata->vif) &&
1456 ieee80211_is_data(hdr->frame_control) && 1490 ieee80211_is_data(hdr->frame_control) &&
1457 !is_multicast_ether_addr(hdr->addr1)) 1491 !is_multicast_ether_addr(hdr->addr1) &&
1458 if (mesh_nexthop_resolve(skb, sdata)) { 1492 mesh_nexthop_resolve(skb, sdata)) {
1459 /* skb queued: don't free */ 1493 /* skb queued: don't free */
1460 rcu_read_unlock(); 1494 rcu_read_unlock();
1461 return; 1495 return;
1462 } 1496 }
1463 1497
1464 ieee80211_set_qos_hdr(sdata, skb); 1498 ieee80211_set_qos_hdr(sdata, skb);
1465 ieee80211_tx(sdata, skb, false); 1499 ieee80211_tx(sdata, skb, false);
@@ -1628,7 +1662,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1628 skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { 1662 skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
1629 u8 *payload = (u8 *)hdr + hdrlen; 1663 u8 *payload = (u8 *)hdr + hdrlen;
1630 1664
1631 if (compare_ether_addr(payload, rfc1042_header) == 0) 1665 if (ether_addr_equal(payload, rfc1042_header))
1632 skb->protocol = cpu_to_be16((payload[6] << 8) | 1666 skb->protocol = cpu_to_be16((payload[6] << 8) |
1633 payload[7]); 1667 payload[7]);
1634 } 1668 }
@@ -1661,7 +1695,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1661 tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 1695 tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1662 tmp_sdata->vif.type == NL80211_IFTYPE_WDS) 1696 tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
1663 continue; 1697 continue;
1664 if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) { 1698 if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
1665 sdata = tmp_sdata; 1699 sdata = tmp_sdata;
1666 break; 1700 break;
1667 } 1701 }
@@ -1778,9 +1812,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1778 * is being proxied by a portal (i.e. portal address 1812 * is being proxied by a portal (i.e. portal address
1779 * differs from proxied address) 1813 * differs from proxied address)
1780 */ 1814 */
1781 if (compare_ether_addr(sdata->vif.addr, 1815 if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
1782 skb->data + ETH_ALEN) == 0 && 1816 !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
1783 !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
1784 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1817 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1785 skb->data, skb->data + ETH_ALEN); 1818 skb->data, skb->data + ETH_ALEN);
1786 rcu_read_unlock(); 1819 rcu_read_unlock();
@@ -1915,7 +1948,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1915 wme_sta = true; 1948 wme_sta = true;
1916 1949
1917 /* receiver and we are QoS enabled, use a QoS type frame */ 1950 /* receiver and we are QoS enabled, use a QoS type frame */
1918 if (wme_sta && local->hw.queues >= 4) { 1951 if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) {
1919 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1952 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1920 hdrlen += 2; 1953 hdrlen += 2;
1921 } 1954 }
@@ -1927,12 +1960,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1927 if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) && 1960 if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
1928 !is_multicast_ether_addr(hdr.addr1) && !authorized && 1961 !is_multicast_ether_addr(hdr.addr1) && !authorized &&
1929 (cpu_to_be16(ethertype) != sdata->control_port_protocol || 1962 (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
1930 compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) { 1963 !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
1931#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1964#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1932 if (net_ratelimit()) 1965 net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
1933 printk(KERN_DEBUG "%s: dropped frame to %pM" 1966 dev->name, hdr.addr1);
1934 " (unauthorized port)\n", dev->name,
1935 hdr.addr1);
1936#endif 1967#endif
1937 1968
1938 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); 1969 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
@@ -2156,7 +2187,6 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2156void ieee80211_tx_pending(unsigned long data) 2187void ieee80211_tx_pending(unsigned long data)
2157{ 2188{
2158 struct ieee80211_local *local = (struct ieee80211_local *)data; 2189 struct ieee80211_local *local = (struct ieee80211_local *)data;
2159 struct ieee80211_sub_if_data *sdata;
2160 unsigned long flags; 2190 unsigned long flags;
2161 int i; 2191 int i;
2162 bool txok; 2192 bool txok;
@@ -2193,8 +2223,7 @@ void ieee80211_tx_pending(unsigned long data)
2193 } 2223 }
2194 2224
2195 if (skb_queue_empty(&local->pending[i])) 2225 if (skb_queue_empty(&local->pending[i]))
2196 list_for_each_entry_rcu(sdata, &local->interfaces, list) 2226 ieee80211_propagate_queue_wake(local, i);
2197 netif_wake_subqueue(sdata->dev, i);
2198 } 2227 }
2199 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 2228 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2200 2229
@@ -2203,7 +2232,8 @@ void ieee80211_tx_pending(unsigned long data)
2203 2232
2204/* functions for drivers to get certain frames */ 2233/* functions for drivers to get certain frames */
2205 2234
2206static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss, 2235static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2236 struct ieee80211_if_ap *bss,
2207 struct sk_buff *skb, 2237 struct sk_buff *skb,
2208 struct beacon_data *beacon) 2238 struct beacon_data *beacon)
2209{ 2239{
@@ -2220,7 +2250,7 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
2220 IEEE80211_MAX_AID+1); 2250 IEEE80211_MAX_AID+1);
2221 2251
2222 if (bss->dtim_count == 0) 2252 if (bss->dtim_count == 0)
2223 bss->dtim_count = beacon->dtim_period - 1; 2253 bss->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
2224 else 2254 else
2225 bss->dtim_count--; 2255 bss->dtim_count--;
2226 2256
@@ -2228,7 +2258,7 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
2228 *pos++ = WLAN_EID_TIM; 2258 *pos++ = WLAN_EID_TIM;
2229 *pos++ = 4; 2259 *pos++ = 4;
2230 *pos++ = bss->dtim_count; 2260 *pos++ = bss->dtim_count;
2231 *pos++ = beacon->dtim_period; 2261 *pos++ = sdata->vif.bss_conf.dtim_period;
2232 2262
2233 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) 2263 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
2234 aid0 = 1; 2264 aid0 = 1;
@@ -2321,12 +2351,14 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2321 * of the tim bitmap in mac80211 and the driver. 2351 * of the tim bitmap in mac80211 and the driver.
2322 */ 2352 */
2323 if (local->tim_in_locked_section) { 2353 if (local->tim_in_locked_section) {
2324 ieee80211_beacon_add_tim(ap, skb, beacon); 2354 ieee80211_beacon_add_tim(sdata, ap, skb,
2355 beacon);
2325 } else { 2356 } else {
2326 unsigned long flags; 2357 unsigned long flags;
2327 2358
2328 spin_lock_irqsave(&local->tim_lock, flags); 2359 spin_lock_irqsave(&local->tim_lock, flags);
2329 ieee80211_beacon_add_tim(ap, skb, beacon); 2360 ieee80211_beacon_add_tim(sdata, ap, skb,
2361 beacon);
2330 spin_unlock_irqrestore(&local->tim_lock, flags); 2362 spin_unlock_irqrestore(&local->tim_lock, flags);
2331 } 2363 }
2332 2364
@@ -2357,6 +2389,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2357 IEEE80211_STYPE_BEACON); 2389 IEEE80211_STYPE_BEACON);
2358 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 2390 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2359 struct ieee80211_mgmt *mgmt; 2391 struct ieee80211_mgmt *mgmt;
2392 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2360 u8 *pos; 2393 u8 *pos;
2361 int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) + 2394 int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
2362 sizeof(mgmt->u.beacon); 2395 sizeof(mgmt->u.beacon);
@@ -2366,6 +2399,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2366 goto out; 2399 goto out;
2367#endif 2400#endif
2368 2401
2402 if (ifmsh->sync_ops)
2403 ifmsh->sync_ops->adjust_tbtt(
2404 sdata);
2405
2369 skb = dev_alloc_skb(local->tx_headroom + 2406 skb = dev_alloc_skb(local->tx_headroom +
2370 hdr_len + 2407 hdr_len +
2371 2 + /* NULL SSID */ 2408 2 + /* NULL SSID */
@@ -2373,7 +2410,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2373 2 + 3 + /* DS params */ 2410 2 + 3 + /* DS params */
2374 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2411 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2375 2 + sizeof(struct ieee80211_ht_cap) + 2412 2 + sizeof(struct ieee80211_ht_cap) +
2376 2 + sizeof(struct ieee80211_ht_info) + 2413 2 + sizeof(struct ieee80211_ht_operation) +
2377 2 + sdata->u.mesh.mesh_id_len + 2414 2 + sdata->u.mesh.mesh_id_len +
2378 2 + sizeof(struct ieee80211_meshconf_ie) + 2415 2 + sizeof(struct ieee80211_meshconf_ie) +
2379 sdata->u.mesh.ie_len); 2416 sdata->u.mesh.ie_len);
@@ -2397,12 +2434,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2397 *pos++ = WLAN_EID_SSID; 2434 *pos++ = WLAN_EID_SSID;
2398 *pos++ = 0x0; 2435 *pos++ = 0x0;
2399 2436
2400 if (ieee80211_add_srates_ie(&sdata->vif, skb) || 2437 if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
2401 mesh_add_ds_params_ie(skb, sdata) || 2438 mesh_add_ds_params_ie(skb, sdata) ||
2402 ieee80211_add_ext_srates_ie(&sdata->vif, skb) || 2439 ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
2403 mesh_add_rsn_ie(skb, sdata) || 2440 mesh_add_rsn_ie(skb, sdata) ||
2404 mesh_add_ht_cap_ie(skb, sdata) || 2441 mesh_add_ht_cap_ie(skb, sdata) ||
2405 mesh_add_ht_info_ie(skb, sdata) || 2442 mesh_add_ht_oper_ie(skb, sdata) ||
2406 mesh_add_meshid_ie(skb, sdata) || 2443 mesh_add_meshid_ie(skb, sdata) ||
2407 mesh_add_meshconf_ie(skb, sdata) || 2444 mesh_add_meshconf_ie(skb, sdata) ||
2408 mesh_add_vendor_ies(skb, sdata)) { 2445 mesh_add_vendor_ies(skb, sdata)) {
@@ -2431,6 +2468,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2431 txrc.max_rate_idx = -1; 2468 txrc.max_rate_idx = -1;
2432 else 2469 else
2433 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 2470 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2471 memcpy(txrc.rate_idx_mcs_mask, sdata->rc_rateidx_mcs_mask[band],
2472 sizeof(txrc.rate_idx_mcs_mask));
2434 txrc.bss = true; 2473 txrc.bss = true;
2435 rate_control_get_rate(sdata, NULL, &txrc); 2474 rate_control_get_rate(sdata, NULL, &txrc);
2436 2475
@@ -2584,7 +2623,7 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2584 pos = skb_put(skb, ie_ssid_len); 2623 pos = skb_put(skb, ie_ssid_len);
2585 *pos++ = WLAN_EID_SSID; 2624 *pos++ = WLAN_EID_SSID;
2586 *pos++ = ssid_len; 2625 *pos++ = ssid_len;
2587 if (ssid) 2626 if (ssid_len)
2588 memcpy(pos, ssid, ssid_len); 2627 memcpy(pos, ssid, ssid_len);
2589 pos += ssid_len; 2628 pos += ssid_len;
2590 2629
@@ -2691,11 +2730,13 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2691void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, 2730void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
2692 struct sk_buff *skb, int tid) 2731 struct sk_buff *skb, int tid)
2693{ 2732{
2733 int ac = ieee802_1d_to_ac[tid];
2734
2694 skb_set_mac_header(skb, 0); 2735 skb_set_mac_header(skb, 0);
2695 skb_set_network_header(skb, 0); 2736 skb_set_network_header(skb, 0);
2696 skb_set_transport_header(skb, 0); 2737 skb_set_transport_header(skb, 0);
2697 2738
2698 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 2739 skb_set_queue_mapping(skb, ac);
2699 skb->priority = tid; 2740 skb->priority = tid;
2700 2741
2701 /* 2742 /*
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 9919892575f4..a44c6807df01 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -106,7 +106,7 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
106 } 106 }
107} 107}
108 108
109int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 109int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
110 int rate, int erp, int short_preamble) 110 int rate, int erp, int short_preamble)
111{ 111{
112 int dur; 112 int dur;
@@ -120,7 +120,7 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
120 * DIV_ROUND_UP() operations. 120 * DIV_ROUND_UP() operations.
121 */ 121 */
122 122
123 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) { 123 if (band == IEEE80211_BAND_5GHZ || erp) {
124 /* 124 /*
125 * OFDM: 125 * OFDM:
126 * 126 *
@@ -162,10 +162,10 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
162/* Exported duration function for driver use */ 162/* Exported duration function for driver use */
163__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, 163__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
164 struct ieee80211_vif *vif, 164 struct ieee80211_vif *vif,
165 enum ieee80211_band band,
165 size_t frame_len, 166 size_t frame_len,
166 struct ieee80211_rate *rate) 167 struct ieee80211_rate *rate)
167{ 168{
168 struct ieee80211_local *local = hw_to_local(hw);
169 struct ieee80211_sub_if_data *sdata; 169 struct ieee80211_sub_if_data *sdata;
170 u16 dur; 170 u16 dur;
171 int erp; 171 int erp;
@@ -179,7 +179,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
179 erp = rate->flags & IEEE80211_RATE_ERP_G; 179 erp = rate->flags & IEEE80211_RATE_ERP_G;
180 } 180 }
181 181
182 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, 182 dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
183 short_preamble); 183 short_preamble);
184 184
185 return cpu_to_le16(dur); 185 return cpu_to_le16(dur);
@@ -198,7 +198,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
198 u16 dur; 198 u16 dur;
199 struct ieee80211_supported_band *sband; 199 struct ieee80211_supported_band *sband;
200 200
201 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 201 sband = local->hw.wiphy->bands[frame_txctl->band];
202 202
203 short_preamble = false; 203 short_preamble = false;
204 204
@@ -213,13 +213,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
213 } 213 }
214 214
215 /* CTS duration */ 215 /* CTS duration */
216 dur = ieee80211_frame_duration(local, 10, rate->bitrate, 216 dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate,
217 erp, short_preamble); 217 erp, short_preamble);
218 /* Data frame duration */ 218 /* Data frame duration */
219 dur += ieee80211_frame_duration(local, frame_len, rate->bitrate, 219 dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
220 erp, short_preamble); 220 erp, short_preamble);
221 /* ACK duration */ 221 /* ACK duration */
222 dur += ieee80211_frame_duration(local, 10, rate->bitrate, 222 dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
223 erp, short_preamble); 223 erp, short_preamble);
224 224
225 return cpu_to_le16(dur); 225 return cpu_to_le16(dur);
@@ -239,7 +239,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
239 u16 dur; 239 u16 dur;
240 struct ieee80211_supported_band *sband; 240 struct ieee80211_supported_band *sband;
241 241
242 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 242 sband = local->hw.wiphy->bands[frame_txctl->band];
243 243
244 short_preamble = false; 244 short_preamble = false;
245 245
@@ -253,11 +253,11 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
253 } 253 }
254 254
255 /* Data frame duration */ 255 /* Data frame duration */
256 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, 256 dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
257 erp, short_preamble); 257 erp, short_preamble);
258 if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { 258 if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
259 /* ACK duration */ 259 /* ACK duration */
260 dur += ieee80211_frame_duration(local, 10, rate->bitrate, 260 dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
261 erp, short_preamble); 261 erp, short_preamble);
262 } 262 }
263 263
@@ -265,17 +265,45 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
265} 265}
266EXPORT_SYMBOL(ieee80211_ctstoself_duration); 266EXPORT_SYMBOL(ieee80211_ctstoself_duration);
267 267
268void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
269{
270 struct ieee80211_sub_if_data *sdata;
271
272 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
273 int ac;
274
275 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
276 continue;
277
278 if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE &&
279 local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
280 continue;
281
282 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
283 int ac_queue = sdata->vif.hw_queue[ac];
284
285 if (ac_queue == queue ||
286 (sdata->vif.cab_queue == queue &&
287 local->queue_stop_reasons[ac_queue] == 0 &&
288 skb_queue_empty(&local->pending[ac_queue])))
289 netif_wake_subqueue(sdata->dev, ac);
290 }
291 }
292}
293
268static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, 294static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
269 enum queue_stop_reason reason) 295 enum queue_stop_reason reason)
270{ 296{
271 struct ieee80211_local *local = hw_to_local(hw); 297 struct ieee80211_local *local = hw_to_local(hw);
272 struct ieee80211_sub_if_data *sdata;
273 298
274 trace_wake_queue(local, queue, reason); 299 trace_wake_queue(local, queue, reason);
275 300
276 if (WARN_ON(queue >= hw->queues)) 301 if (WARN_ON(queue >= hw->queues))
277 return; 302 return;
278 303
304 if (!test_bit(reason, &local->queue_stop_reasons[queue]))
305 return;
306
279 __clear_bit(reason, &local->queue_stop_reasons[queue]); 307 __clear_bit(reason, &local->queue_stop_reasons[queue]);
280 308
281 if (local->queue_stop_reasons[queue] != 0) 309 if (local->queue_stop_reasons[queue] != 0)
@@ -284,11 +312,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
284 312
285 if (skb_queue_empty(&local->pending[queue])) { 313 if (skb_queue_empty(&local->pending[queue])) {
286 rcu_read_lock(); 314 rcu_read_lock();
287 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 315 ieee80211_propagate_queue_wake(local, queue);
288 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
289 continue;
290 netif_wake_subqueue(sdata->dev, queue);
291 }
292 rcu_read_unlock(); 316 rcu_read_unlock();
293 } else 317 } else
294 tasklet_schedule(&local->tx_pending_tasklet); 318 tasklet_schedule(&local->tx_pending_tasklet);
@@ -323,11 +347,21 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
323 if (WARN_ON(queue >= hw->queues)) 347 if (WARN_ON(queue >= hw->queues))
324 return; 348 return;
325 349
350 if (test_bit(reason, &local->queue_stop_reasons[queue]))
351 return;
352
326 __set_bit(reason, &local->queue_stop_reasons[queue]); 353 __set_bit(reason, &local->queue_stop_reasons[queue]);
327 354
328 rcu_read_lock(); 355 rcu_read_lock();
329 list_for_each_entry_rcu(sdata, &local->interfaces, list) 356 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
330 netif_stop_subqueue(sdata->dev, queue); 357 int ac;
358
359 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
360 if (sdata->vif.hw_queue[ac] == queue ||
361 sdata->vif.cab_queue == queue)
362 netif_stop_subqueue(sdata->dev, ac);
363 }
364 }
331 rcu_read_unlock(); 365 rcu_read_unlock();
332} 366}
333 367
@@ -354,8 +388,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
354{ 388{
355 struct ieee80211_hw *hw = &local->hw; 389 struct ieee80211_hw *hw = &local->hw;
356 unsigned long flags; 390 unsigned long flags;
357 int queue = skb_get_queue_mapping(skb);
358 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 391 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
392 int queue = info->hw_queue;
359 393
360 if (WARN_ON(!info->control.vif)) { 394 if (WARN_ON(!info->control.vif)) {
361 kfree_skb(skb); 395 kfree_skb(skb);
@@ -379,10 +413,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
379 int queue, i; 413 int queue, i;
380 414
381 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 415 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
382 for (i = 0; i < hw->queues; i++)
383 __ieee80211_stop_queue(hw, i,
384 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
385
386 while ((skb = skb_dequeue(skbs))) { 416 while ((skb = skb_dequeue(skbs))) {
387 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 417 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
388 418
@@ -391,7 +421,11 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
391 continue; 421 continue;
392 } 422 }
393 423
394 queue = skb_get_queue_mapping(skb); 424 queue = info->hw_queue;
425
426 __ieee80211_stop_queue(hw, queue,
427 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
428
395 __skb_queue_tail(&local->pending[queue], skb); 429 __skb_queue_tail(&local->pending[queue], skb);
396 } 430 }
397 431
@@ -404,12 +438,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
404 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 438 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
405} 439}
406 440
407void ieee80211_add_pending_skbs(struct ieee80211_local *local,
408 struct sk_buff_head *skbs)
409{
410 ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
411}
412
413void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 441void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
414 enum queue_stop_reason reason) 442 enum queue_stop_reason reason)
415{ 443{
@@ -572,24 +600,40 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
572 size_t left = len; 600 size_t left = len;
573 u8 *pos = start; 601 u8 *pos = start;
574 bool calc_crc = filter != 0; 602 bool calc_crc = filter != 0;
603 DECLARE_BITMAP(seen_elems, 256);
575 604
605 bitmap_zero(seen_elems, 256);
576 memset(elems, 0, sizeof(*elems)); 606 memset(elems, 0, sizeof(*elems));
577 elems->ie_start = start; 607 elems->ie_start = start;
578 elems->total_len = len; 608 elems->total_len = len;
579 609
580 while (left >= 2) { 610 while (left >= 2) {
581 u8 id, elen; 611 u8 id, elen;
612 bool elem_parse_failed;
582 613
583 id = *pos++; 614 id = *pos++;
584 elen = *pos++; 615 elen = *pos++;
585 left -= 2; 616 left -= 2;
586 617
587 if (elen > left) 618 if (elen > left) {
619 elems->parse_error = true;
588 break; 620 break;
621 }
622
623 if (id != WLAN_EID_VENDOR_SPECIFIC &&
624 id != WLAN_EID_QUIET &&
625 test_bit(id, seen_elems)) {
626 elems->parse_error = true;
627 left -= elen;
628 pos += elen;
629 continue;
630 }
589 631
590 if (calc_crc && id < 64 && (filter & (1ULL << id))) 632 if (calc_crc && id < 64 && (filter & (1ULL << id)))
591 crc = crc32_be(crc, pos - 2, elen + 2); 633 crc = crc32_be(crc, pos - 2, elen + 2);
592 634
635 elem_parse_failed = false;
636
593 switch (id) { 637 switch (id) {
594 case WLAN_EID_SSID: 638 case WLAN_EID_SSID:
595 elems->ssid = pos; 639 elems->ssid = pos;
@@ -615,7 +659,8 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
615 if (elen >= sizeof(struct ieee80211_tim_ie)) { 659 if (elen >= sizeof(struct ieee80211_tim_ie)) {
616 elems->tim = (void *)pos; 660 elems->tim = (void *)pos;
617 elems->tim_len = elen; 661 elems->tim_len = elen;
618 } 662 } else
663 elem_parse_failed = true;
619 break; 664 break;
620 case WLAN_EID_IBSS_PARAMS: 665 case WLAN_EID_IBSS_PARAMS:
621 elems->ibss_params = pos; 666 elems->ibss_params = pos;
@@ -664,10 +709,14 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
664 case WLAN_EID_HT_CAPABILITY: 709 case WLAN_EID_HT_CAPABILITY:
665 if (elen >= sizeof(struct ieee80211_ht_cap)) 710 if (elen >= sizeof(struct ieee80211_ht_cap))
666 elems->ht_cap_elem = (void *)pos; 711 elems->ht_cap_elem = (void *)pos;
712 else
713 elem_parse_failed = true;
667 break; 714 break;
668 case WLAN_EID_HT_INFORMATION: 715 case WLAN_EID_HT_OPERATION:
669 if (elen >= sizeof(struct ieee80211_ht_info)) 716 if (elen >= sizeof(struct ieee80211_ht_operation))
670 elems->ht_info_elem = (void *)pos; 717 elems->ht_operation = (void *)pos;
718 else
719 elem_parse_failed = true;
671 break; 720 break;
672 case WLAN_EID_MESH_ID: 721 case WLAN_EID_MESH_ID:
673 elems->mesh_id = pos; 722 elems->mesh_id = pos;
@@ -676,6 +725,8 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
676 case WLAN_EID_MESH_CONFIG: 725 case WLAN_EID_MESH_CONFIG:
677 if (elen >= sizeof(struct ieee80211_meshconf_ie)) 726 if (elen >= sizeof(struct ieee80211_meshconf_ie))
678 elems->mesh_config = (void *)pos; 727 elems->mesh_config = (void *)pos;
728 else
729 elem_parse_failed = true;
679 break; 730 break;
680 case WLAN_EID_PEER_MGMT: 731 case WLAN_EID_PEER_MGMT:
681 elems->peering = pos; 732 elems->peering = pos;
@@ -696,6 +747,8 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
696 case WLAN_EID_RANN: 747 case WLAN_EID_RANN:
697 if (elen >= sizeof(struct ieee80211_rann_ie)) 748 if (elen >= sizeof(struct ieee80211_rann_ie))
698 elems->rann = (void *)pos; 749 elems->rann = (void *)pos;
750 else
751 elem_parse_failed = true;
699 break; 752 break;
700 case WLAN_EID_CHANNEL_SWITCH: 753 case WLAN_EID_CHANNEL_SWITCH:
701 elems->ch_switch_elem = pos; 754 elems->ch_switch_elem = pos;
@@ -724,10 +777,18 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
724 break; 777 break;
725 } 778 }
726 779
780 if (elem_parse_failed)
781 elems->parse_error = true;
782 else
783 set_bit(id, seen_elems);
784
727 left -= elen; 785 left -= elen;
728 pos += elen; 786 pos += elen;
729 } 787 }
730 788
789 if (left != 0)
790 elems->parse_error = true;
791
731 return crc; 792 return crc;
732} 793}
733 794
@@ -737,23 +798,27 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
737 ieee802_11_parse_elems_crc(start, len, elems, 0, 0); 798 ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
738} 799}
739 800
740void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) 801void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
802 bool bss_notify)
741{ 803{
742 struct ieee80211_local *local = sdata->local; 804 struct ieee80211_local *local = sdata->local;
743 struct ieee80211_tx_queue_params qparam; 805 struct ieee80211_tx_queue_params qparam;
744 int queue; 806 int ac;
745 bool use_11b; 807 bool use_11b;
746 int aCWmin, aCWmax; 808 int aCWmin, aCWmax;
747 809
748 if (!local->ops->conf_tx) 810 if (!local->ops->conf_tx)
749 return; 811 return;
750 812
813 if (local->hw.queues < IEEE80211_NUM_ACS)
814 return;
815
751 memset(&qparam, 0, sizeof(qparam)); 816 memset(&qparam, 0, sizeof(qparam));
752 817
753 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && 818 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
754 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); 819 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
755 820
756 for (queue = 0; queue < local_to_hw(local)->queues; queue++) { 821 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
757 /* Set defaults according to 802.11-2007 Table 7-37 */ 822 /* Set defaults according to 802.11-2007 Table 7-37 */
758 aCWmax = 1023; 823 aCWmax = 1023;
759 if (use_11b) 824 if (use_11b)
@@ -761,21 +826,21 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
761 else 826 else
762 aCWmin = 15; 827 aCWmin = 15;
763 828
764 switch (queue) { 829 switch (ac) {
765 case 3: /* AC_BK */ 830 case IEEE80211_AC_BK:
766 qparam.cw_max = aCWmax; 831 qparam.cw_max = aCWmax;
767 qparam.cw_min = aCWmin; 832 qparam.cw_min = aCWmin;
768 qparam.txop = 0; 833 qparam.txop = 0;
769 qparam.aifs = 7; 834 qparam.aifs = 7;
770 break; 835 break;
771 default: /* never happens but let's not leave undefined */ 836 default: /* never happens but let's not leave undefined */
772 case 2: /* AC_BE */ 837 case IEEE80211_AC_BE:
773 qparam.cw_max = aCWmax; 838 qparam.cw_max = aCWmax;
774 qparam.cw_min = aCWmin; 839 qparam.cw_min = aCWmin;
775 qparam.txop = 0; 840 qparam.txop = 0;
776 qparam.aifs = 3; 841 qparam.aifs = 3;
777 break; 842 break;
778 case 1: /* AC_VI */ 843 case IEEE80211_AC_VI:
779 qparam.cw_max = aCWmin; 844 qparam.cw_max = aCWmin;
780 qparam.cw_min = (aCWmin + 1) / 2 - 1; 845 qparam.cw_min = (aCWmin + 1) / 2 - 1;
781 if (use_11b) 846 if (use_11b)
@@ -784,7 +849,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
784 qparam.txop = 3008/32; 849 qparam.txop = 3008/32;
785 qparam.aifs = 2; 850 qparam.aifs = 2;
786 break; 851 break;
787 case 0: /* AC_VO */ 852 case IEEE80211_AC_VO:
788 qparam.cw_max = (aCWmin + 1) / 2 - 1; 853 qparam.cw_max = (aCWmin + 1) / 2 - 1;
789 qparam.cw_min = (aCWmin + 1) / 4 - 1; 854 qparam.cw_min = (aCWmin + 1) / 4 - 1;
790 if (use_11b) 855 if (use_11b)
@@ -797,8 +862,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
797 862
798 qparam.uapsd = false; 863 qparam.uapsd = false;
799 864
800 sdata->tx_conf[queue] = qparam; 865 sdata->tx_conf[ac] = qparam;
801 drv_conf_tx(local, sdata, queue, &qparam); 866 drv_conf_tx(local, sdata, ac, &qparam);
802 } 867 }
803 868
804 /* after reinitialize QoS TX queues setting to default, 869 /* after reinitialize QoS TX queues setting to default,
@@ -807,7 +872,9 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
807 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 872 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
808 sdata->vif.bss_conf.qos = 873 sdata->vif.bss_conf.qos =
809 sdata->vif.type != NL80211_IFTYPE_STATION; 874 sdata->vif.type != NL80211_IFTYPE_STATION;
810 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); 875 if (bss_notify)
876 ieee80211_bss_info_change_notify(sdata,
877 BSS_CHANGED_QOS);
811 } 878 }
812} 879}
813 880
@@ -829,7 +896,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
829 else 896 else
830 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; 897 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
831 898
832 ieee80211_set_wmm_default(sdata); 899 ieee80211_set_wmm_default(sdata, true);
833} 900}
834 901
835u32 ieee80211_mandatory_rates(struct ieee80211_local *local, 902u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
@@ -842,10 +909,8 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
842 int i; 909 int i;
843 910
844 sband = local->hw.wiphy->bands[band]; 911 sband = local->hw.wiphy->bands[band];
845 if (!sband) { 912 if (WARN_ON(!sband))
846 WARN_ON(1); 913 return 1;
847 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
848 }
849 914
850 if (band == IEEE80211_BAND_2GHZ) 915 if (band == IEEE80211_BAND_2GHZ)
851 mandatory_flag = IEEE80211_RATE_MANDATORY_B; 916 mandatory_flag = IEEE80211_RATE_MANDATORY_B;
@@ -862,8 +927,8 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
862 927
863void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 928void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
864 u16 transaction, u16 auth_alg, 929 u16 transaction, u16 auth_alg,
865 u8 *extra, size_t extra_len, const u8 *bssid, 930 u8 *extra, size_t extra_len, const u8 *da,
866 const u8 *key, u8 key_len, u8 key_idx) 931 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx)
867{ 932{
868 struct ieee80211_local *local = sdata->local; 933 struct ieee80211_local *local = sdata->local;
869 struct sk_buff *skb; 934 struct sk_buff *skb;
@@ -881,7 +946,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
881 memset(mgmt, 0, 24 + 6); 946 memset(mgmt, 0, 24 + 6);
882 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 947 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
883 IEEE80211_STYPE_AUTH); 948 IEEE80211_STYPE_AUTH);
884 memcpy(mgmt->da, bssid, ETH_ALEN); 949 memcpy(mgmt->da, da, ETH_ALEN);
885 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 950 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
886 memcpy(mgmt->bssid, bssid, ETH_ALEN); 951 memcpy(mgmt->bssid, bssid, ETH_ALEN);
887 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); 952 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
@@ -1070,7 +1135,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1070 1135
1071u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1136u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1072 struct ieee802_11_elems *elems, 1137 struct ieee802_11_elems *elems,
1073 enum ieee80211_band band) 1138 enum ieee80211_band band, u32 *basic_rates)
1074{ 1139{
1075 struct ieee80211_supported_band *sband; 1140 struct ieee80211_supported_band *sband;
1076 struct ieee80211_rate *bitrates; 1141 struct ieee80211_rate *bitrates;
@@ -1079,10 +1144,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1079 int i, j; 1144 int i, j;
1080 sband = local->hw.wiphy->bands[band]; 1145 sband = local->hw.wiphy->bands[band];
1081 1146
1082 if (!sband) { 1147 if (WARN_ON(!sband))
1083 WARN_ON(1); 1148 return 1;
1084 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1085 }
1086 1149
1087 bitrates = sband->bitrates; 1150 bitrates = sband->bitrates;
1088 num_rates = sband->n_bitrates; 1151 num_rates = sband->n_bitrates;
@@ -1091,15 +1154,25 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1091 elems->ext_supp_rates_len; i++) { 1154 elems->ext_supp_rates_len; i++) {
1092 u8 rate = 0; 1155 u8 rate = 0;
1093 int own_rate; 1156 int own_rate;
1157 bool is_basic;
1094 if (i < elems->supp_rates_len) 1158 if (i < elems->supp_rates_len)
1095 rate = elems->supp_rates[i]; 1159 rate = elems->supp_rates[i];
1096 else if (elems->ext_supp_rates) 1160 else if (elems->ext_supp_rates)
1097 rate = elems->ext_supp_rates 1161 rate = elems->ext_supp_rates
1098 [i - elems->supp_rates_len]; 1162 [i - elems->supp_rates_len];
1099 own_rate = 5 * (rate & 0x7f); 1163 own_rate = 5 * (rate & 0x7f);
1100 for (j = 0; j < num_rates; j++) 1164 is_basic = !!(rate & 0x80);
1101 if (bitrates[j].bitrate == own_rate) 1165
1166 if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
1167 continue;
1168
1169 for (j = 0; j < num_rates; j++) {
1170 if (bitrates[j].bitrate == own_rate) {
1102 supp_rates |= BIT(j); 1171 supp_rates |= BIT(j);
1172 if (basic_rates && is_basic)
1173 *basic_rates |= BIT(j);
1174 }
1175 }
1103 } 1176 }
1104 return supp_rates; 1177 return supp_rates;
1105} 1178}
@@ -1174,6 +1247,16 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1174 IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); 1247 IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
1175 1248
1176 /* add interfaces */ 1249 /* add interfaces */
1250 sdata = rtnl_dereference(local->monitor_sdata);
1251 if (sdata) {
1252 res = drv_add_interface(local, sdata);
1253 if (WARN_ON(res)) {
1254 rcu_assign_pointer(local->monitor_sdata, NULL);
1255 synchronize_net();
1256 kfree(sdata);
1257 }
1258 }
1259
1177 list_for_each_entry(sdata, &local->interfaces, list) { 1260 list_for_each_entry(sdata, &local->interfaces, list) {
1178 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1261 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1179 sdata->vif.type != NL80211_IFTYPE_MONITOR && 1262 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
@@ -1185,26 +1268,28 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1185 mutex_lock(&local->sta_mtx); 1268 mutex_lock(&local->sta_mtx);
1186 list_for_each_entry(sta, &local->sta_list, list) { 1269 list_for_each_entry(sta, &local->sta_list, list) {
1187 if (sta->uploaded) { 1270 if (sta->uploaded) {
1188 sdata = sta->sdata; 1271 enum ieee80211_sta_state state;
1189 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1190 sdata = container_of(sdata->bss,
1191 struct ieee80211_sub_if_data,
1192 u.ap);
1193 1272
1194 WARN_ON(drv_sta_add(local, sdata, &sta->sta)); 1273 for (state = IEEE80211_STA_NOTEXIST;
1274 state < sta->sta_state - 1; state++)
1275 WARN_ON(drv_sta_state(local, sta->sdata, sta,
1276 state, state + 1));
1195 } 1277 }
1196 } 1278 }
1197 mutex_unlock(&local->sta_mtx); 1279 mutex_unlock(&local->sta_mtx);
1198 1280
1199 /* reconfigure tx conf */ 1281 /* reconfigure tx conf */
1200 list_for_each_entry(sdata, &local->interfaces, list) { 1282 if (hw->queues >= IEEE80211_NUM_ACS) {
1201 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 1283 list_for_each_entry(sdata, &local->interfaces, list) {
1202 sdata->vif.type == NL80211_IFTYPE_MONITOR || 1284 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1203 !ieee80211_sdata_running(sdata)) 1285 sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1204 continue; 1286 !ieee80211_sdata_running(sdata))
1287 continue;
1205 1288
1206 for (i = 0; i < hw->queues; i++) 1289 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1207 drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]); 1290 drv_conf_tx(local, sdata, i,
1291 &sdata->tx_conf[i]);
1292 }
1208 } 1293 }
1209 1294
1210 /* reconfigure hardware */ 1295 /* reconfigure hardware */
@@ -1272,6 +1357,27 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1272 ieee80211_recalc_ps(local, -1); 1357 ieee80211_recalc_ps(local, -1);
1273 1358
1274 /* 1359 /*
1360 * The sta might be in psm against the ap (e.g. because
1361 * this was the state before a hw restart), so we
1362 * explicitly send a null packet in order to make sure
1363 * it'll sync against the ap (and get out of psm).
1364 */
1365 if (!(local->hw.conf.flags & IEEE80211_CONF_PS)) {
1366 list_for_each_entry(sdata, &local->interfaces, list) {
1367 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1368 continue;
1369
1370 ieee80211_send_nullfunc(local, sdata, 0);
1371 }
1372 }
1373
1374 /* add back keys */
1375 list_for_each_entry(sdata, &local->interfaces, list)
1376 if (ieee80211_sdata_running(sdata))
1377 ieee80211_enable_keys(sdata);
1378
1379 wake_up:
1380 /*
1275 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation 1381 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
1276 * sessions can be established after a resume. 1382 * sessions can be established after a resume.
1277 * 1383 *
@@ -1292,12 +1398,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1292 mutex_unlock(&local->sta_mtx); 1398 mutex_unlock(&local->sta_mtx);
1293 } 1399 }
1294 1400
1295 /* add back keys */
1296 list_for_each_entry(sdata, &local->interfaces, list)
1297 if (ieee80211_sdata_running(sdata))
1298 ieee80211_enable_keys(sdata);
1299
1300 wake_up:
1301 ieee80211_wake_queues_by_reason(hw, 1401 ieee80211_wake_queues_by_reason(hw,
1302 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1402 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1303 1403
@@ -1561,57 +1661,55 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1561 return pos; 1661 return pos;
1562} 1662}
1563 1663
1564u8 *ieee80211_ie_build_ht_info(u8 *pos, 1664u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1565 struct ieee80211_sta_ht_cap *ht_cap,
1566 struct ieee80211_channel *channel, 1665 struct ieee80211_channel *channel,
1567 enum nl80211_channel_type channel_type) 1666 enum nl80211_channel_type channel_type,
1667 u16 prot_mode)
1568{ 1668{
1569 struct ieee80211_ht_info *ht_info; 1669 struct ieee80211_ht_operation *ht_oper;
1570 /* Build HT Information */ 1670 /* Build HT Information */
1571 *pos++ = WLAN_EID_HT_INFORMATION; 1671 *pos++ = WLAN_EID_HT_OPERATION;
1572 *pos++ = sizeof(struct ieee80211_ht_info); 1672 *pos++ = sizeof(struct ieee80211_ht_operation);
1573 ht_info = (struct ieee80211_ht_info *)pos; 1673 ht_oper = (struct ieee80211_ht_operation *)pos;
1574 ht_info->control_chan = 1674 ht_oper->primary_chan =
1575 ieee80211_frequency_to_channel(channel->center_freq); 1675 ieee80211_frequency_to_channel(channel->center_freq);
1576 switch (channel_type) { 1676 switch (channel_type) {
1577 case NL80211_CHAN_HT40MINUS: 1677 case NL80211_CHAN_HT40MINUS:
1578 ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; 1678 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1579 break; 1679 break;
1580 case NL80211_CHAN_HT40PLUS: 1680 case NL80211_CHAN_HT40PLUS:
1581 ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 1681 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1582 break; 1682 break;
1583 case NL80211_CHAN_HT20: 1683 case NL80211_CHAN_HT20:
1584 default: 1684 default:
1585 ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; 1685 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
1586 break; 1686 break;
1587 } 1687 }
1588 if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) 1688 if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
1589 ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; 1689 channel_type != NL80211_CHAN_NO_HT &&
1690 channel_type != NL80211_CHAN_HT20)
1691 ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
1590 1692
1591 /* 1693 ht_oper->operation_mode = cpu_to_le16(prot_mode);
1592 * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and 1694 ht_oper->stbc_param = 0x0000;
1593 * RIFS Mode are reserved in IBSS mode, therefore keep them at 0
1594 */
1595 ht_info->operation_mode = 0x0000;
1596 ht_info->stbc_param = 0x0000;
1597 1695
1598 /* It seems that Basic MCS set and Supported MCS set 1696 /* It seems that Basic MCS set and Supported MCS set
1599 are identical for the first 10 bytes */ 1697 are identical for the first 10 bytes */
1600 memset(&ht_info->basic_set, 0, 16); 1698 memset(&ht_oper->basic_set, 0, 16);
1601 memcpy(&ht_info->basic_set, &ht_cap->mcs, 10); 1699 memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10);
1602 1700
1603 return pos + sizeof(struct ieee80211_ht_info); 1701 return pos + sizeof(struct ieee80211_ht_operation);
1604} 1702}
1605 1703
1606enum nl80211_channel_type 1704enum nl80211_channel_type
1607ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info) 1705ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
1608{ 1706{
1609 enum nl80211_channel_type channel_type; 1707 enum nl80211_channel_type channel_type;
1610 1708
1611 if (!ht_info) 1709 if (!ht_oper)
1612 return NL80211_CHAN_NO_HT; 1710 return NL80211_CHAN_NO_HT;
1613 1711
1614 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 1712 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
1615 case IEEE80211_HT_PARAM_CHA_SEC_NONE: 1713 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
1616 channel_type = NL80211_CHAN_HT20; 1714 channel_type = NL80211_CHAN_HT20;
1617 break; 1715 break;
@@ -1628,13 +1726,15 @@ ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
1628 return channel_type; 1726 return channel_type;
1629} 1727}
1630 1728
1631int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) 1729int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
1730 struct sk_buff *skb, bool need_basic)
1632{ 1731{
1633 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 1732 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1634 struct ieee80211_local *local = sdata->local; 1733 struct ieee80211_local *local = sdata->local;
1635 struct ieee80211_supported_band *sband; 1734 struct ieee80211_supported_band *sband;
1636 int rate; 1735 int rate;
1637 u8 i, rates, *pos; 1736 u8 i, rates, *pos;
1737 u32 basic_rates = vif->bss_conf.basic_rates;
1638 1738
1639 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1739 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1640 rates = sband->n_bitrates; 1740 rates = sband->n_bitrates;
@@ -1648,20 +1748,25 @@ int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
1648 *pos++ = WLAN_EID_SUPP_RATES; 1748 *pos++ = WLAN_EID_SUPP_RATES;
1649 *pos++ = rates; 1749 *pos++ = rates;
1650 for (i = 0; i < rates; i++) { 1750 for (i = 0; i < rates; i++) {
1751 u8 basic = 0;
1752 if (need_basic && basic_rates & BIT(i))
1753 basic = 0x80;
1651 rate = sband->bitrates[i].bitrate; 1754 rate = sband->bitrates[i].bitrate;
1652 *pos++ = (u8) (rate / 5); 1755 *pos++ = basic | (u8) (rate / 5);
1653 } 1756 }
1654 1757
1655 return 0; 1758 return 0;
1656} 1759}
1657 1760
1658int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) 1761int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
1762 struct sk_buff *skb, bool need_basic)
1659{ 1763{
1660 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 1764 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1661 struct ieee80211_local *local = sdata->local; 1765 struct ieee80211_local *local = sdata->local;
1662 struct ieee80211_supported_band *sband; 1766 struct ieee80211_supported_band *sband;
1663 int rate; 1767 int rate;
1664 u8 i, exrates, *pos; 1768 u8 i, exrates, *pos;
1769 u32 basic_rates = vif->bss_conf.basic_rates;
1665 1770
1666 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1771 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1667 exrates = sband->n_bitrates; 1772 exrates = sband->n_bitrates;
@@ -1678,9 +1783,25 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
1678 *pos++ = WLAN_EID_EXT_SUPP_RATES; 1783 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1679 *pos++ = exrates; 1784 *pos++ = exrates;
1680 for (i = 8; i < sband->n_bitrates; i++) { 1785 for (i = 8; i < sband->n_bitrates; i++) {
1786 u8 basic = 0;
1787 if (need_basic && basic_rates & BIT(i))
1788 basic = 0x80;
1681 rate = sband->bitrates[i].bitrate; 1789 rate = sband->bitrates[i].bitrate;
1682 *pos++ = (u8) (rate / 5); 1790 *pos++ = basic | (u8) (rate / 5);
1683 } 1791 }
1684 } 1792 }
1685 return 0; 1793 return 0;
1686} 1794}
1795
1796int ieee80211_ave_rssi(struct ieee80211_vif *vif)
1797{
1798 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1799 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1800
1801 if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) {
1802 /* non-managed type inferfaces */
1803 return 0;
1804 }
1805 return ifmgd->ave_beacon_signal;
1806}
1807EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 68ad351479df..c04d401dae92 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -92,6 +92,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
92 int keylen, int keyidx) 92 int keylen, int keyidx)
93{ 93{
94 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 94 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
95 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
95 unsigned int hdrlen; 96 unsigned int hdrlen;
96 u8 *newhdr; 97 u8 *newhdr;
97 98
@@ -104,6 +105,13 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
104 hdrlen = ieee80211_hdrlen(hdr->frame_control); 105 hdrlen = ieee80211_hdrlen(hdr->frame_control);
105 newhdr = skb_push(skb, WEP_IV_LEN); 106 newhdr = skb_push(skb, WEP_IV_LEN);
106 memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); 107 memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
108
109 /* the HW only needs room for the IV, but not the actual IV */
110 if (info->control.hw_key &&
111 (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
112 return newhdr + hdrlen;
113
114 skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN);
107 ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); 115 ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
108 return newhdr + hdrlen; 116 return newhdr + hdrlen;
109} 117}
@@ -263,16 +271,14 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local,
263} 271}
264 272
265 273
266bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) 274static bool ieee80211_wep_is_weak_iv(struct sk_buff *skb,
275 struct ieee80211_key *key)
267{ 276{
268 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 277 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
269 unsigned int hdrlen; 278 unsigned int hdrlen;
270 u8 *ivpos; 279 u8 *ivpos;
271 u32 iv; 280 u32 iv;
272 281
273 if (!ieee80211_has_protected(hdr->frame_control))
274 return false;
275
276 hdrlen = ieee80211_hdrlen(hdr->frame_control); 282 hdrlen = ieee80211_hdrlen(hdr->frame_control);
277 ivpos = skb->data + hdrlen; 283 ivpos = skb->data + hdrlen;
278 iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; 284 iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2];
@@ -286,18 +292,27 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
286 struct sk_buff *skb = rx->skb; 292 struct sk_buff *skb = rx->skb;
287 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 293 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
288 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
295 __le16 fc = hdr->frame_control;
289 296
290 if (!ieee80211_is_data(hdr->frame_control) && 297 if (!ieee80211_is_data(fc) && !ieee80211_is_auth(fc))
291 !ieee80211_is_auth(hdr->frame_control))
292 return RX_CONTINUE; 298 return RX_CONTINUE;
293 299
294 if (!(status->flag & RX_FLAG_DECRYPTED)) { 300 if (!(status->flag & RX_FLAG_DECRYPTED)) {
301 if (skb_linearize(rx->skb))
302 return RX_DROP_UNUSABLE;
303 if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key))
304 rx->sta->wep_weak_iv_count++;
295 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) 305 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
296 return RX_DROP_UNUSABLE; 306 return RX_DROP_UNUSABLE;
297 } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 307 } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
308 if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + WEP_IV_LEN))
309 return RX_DROP_UNUSABLE;
310 if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key))
311 rx->sta->wep_weak_iv_count++;
298 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 312 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
299 /* remove ICV */ 313 /* remove ICV */
300 skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN); 314 if (pskb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN))
315 return RX_DROP_UNUSABLE;
301 } 316 }
302 317
303 return RX_CONTINUE; 318 return RX_CONTINUE;
@@ -306,14 +321,15 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
306static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) 321static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
307{ 322{
308 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 323 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
324 struct ieee80211_key_conf *hw_key = info->control.hw_key;
309 325
310 if (!info->control.hw_key) { 326 if (!hw_key) {
311 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, 327 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
312 tx->key->conf.keylen, 328 tx->key->conf.keylen,
313 tx->key->conf.keyidx)) 329 tx->key->conf.keyidx))
314 return -1; 330 return -1;
315 } else if (info->control.hw_key->flags & 331 } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
316 IEEE80211_KEY_FLAG_GENERATE_IV) { 332 (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
317 if (!ieee80211_wep_add_iv(tx->local, skb, 333 if (!ieee80211_wep_add_iv(tx->local, skb,
318 tx->key->conf.keylen, 334 tx->key->conf.keylen,
319 tx->key->conf.keyidx)) 335 tx->key->conf.keyidx))
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 01e54840a628..9615749d1f65 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -25,7 +25,6 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
25 const u8 *key, int keylen, int keyidx); 25 const u8 *key, int keylen, int keyidx);
26int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, 26int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
27 size_t klen, u8 *data, size_t data_len); 27 size_t klen, u8 *data, size_t data_len);
28bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
29 28
30ieee80211_rx_result 29ieee80211_rx_result
31ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); 30ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89511be3111e..c3d643a6536c 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,6 +52,26 @@ static int wme_downgrade_ac(struct sk_buff *skb)
52 } 52 }
53} 53}
54 54
55static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
56 struct sk_buff *skb)
57{
58 /* in case we are a client verify acm is not set for this ac */
59 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
60 if (wme_downgrade_ac(skb)) {
61 /*
62 * This should not really happen. The AP has marked all
63 * lower ACs to require admission control which is not
64 * a reasonable configuration. Allow the frame to be
65 * transmitted using AC_BK as a workaround.
66 */
67 break;
68 }
69 }
70
71 /* look up which queue to use for frames with this 1d tag */
72 return ieee802_1d_to_ac[skb->priority];
73}
74
55/* Indicate which queue to use for this fully formed 802.11 frame */ 75/* Indicate which queue to use for this fully formed 802.11 frame */
56u16 ieee80211_select_queue_80211(struct ieee80211_local *local, 76u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
57 struct sk_buff *skb, 77 struct sk_buff *skb,
@@ -59,7 +79,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
59{ 79{
60 u8 *p; 80 u8 *p;
61 81
62 if (local->hw.queues < 4) 82 if (local->hw.queues < IEEE80211_NUM_ACS)
63 return 0; 83 return 0;
64 84
65 if (!ieee80211_is_data(hdr->frame_control)) { 85 if (!ieee80211_is_data(hdr->frame_control)) {
@@ -86,9 +106,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
86 const u8 *ra = NULL; 106 const u8 *ra = NULL;
87 bool qos = false; 107 bool qos = false;
88 108
89 if (local->hw.queues < 4 || skb->len < 6) { 109 if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
90 skb->priority = 0; /* required for correct WPA/11i MIC */ 110 skb->priority = 0; /* required for correct WPA/11i MIC */
91 return min_t(u16, local->hw.queues - 1, IEEE80211_AC_BE); 111 return 0;
92 } 112 }
93 113
94 rcu_read_lock(); 114 rcu_read_lock();
@@ -139,26 +159,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
139 return ieee80211_downgrade_queue(local, skb); 159 return ieee80211_downgrade_queue(local, skb);
140} 160}
141 161
142u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
143 struct sk_buff *skb)
144{
145 /* in case we are a client verify acm is not set for this ac */
146 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
147 if (wme_downgrade_ac(skb)) {
148 /*
149 * This should not really happen. The AP has marked all
150 * lower ACs to require admission control which is not
151 * a reasonable configuration. Allow the frame to be
152 * transmitted using AC_BK as a workaround.
153 */
154 break;
155 }
156 }
157
158 /* look up which queue to use for frames with this 1d tag */
159 return ieee802_1d_to_ac[skb->priority];
160}
161
162void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, 162void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
163 struct sk_buff *skb) 163 struct sk_buff *skb)
164{ 164{
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 94edceb617ff..ca80818b7b66 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -22,8 +22,5 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
22 struct sk_buff *skb); 22 struct sk_buff *skb);
23void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, 23void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
24 struct sk_buff *skb); 24 struct sk_buff *skb);
25u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
26 struct sk_buff *skb);
27
28 25
29#endif /* _WME_H */ 26#endif /* _WME_H */
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index c6dd01a05291..b2650a9d45ff 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -27,16 +27,9 @@
27#include "rate.h" 27#include "rate.h"
28#include "driver-ops.h" 28#include "driver-ops.h"
29 29
30#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
31#define IEEE80211_AUTH_MAX_TRIES 3
32#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
33#define IEEE80211_ASSOC_MAX_TRIES 3
34
35enum work_action { 30enum work_action {
36 WORK_ACT_MISMATCH,
37 WORK_ACT_NONE, 31 WORK_ACT_NONE,
38 WORK_ACT_TIMEOUT, 32 WORK_ACT_TIMEOUT,
39 WORK_ACT_DONE,
40}; 33};
41 34
42 35
@@ -71,464 +64,6 @@ void free_work(struct ieee80211_work *wk)
71 kfree_rcu(wk, rcu_head); 64 kfree_rcu(wk, rcu_head);
72} 65}
73 66
74static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
75 struct ieee80211_supported_band *sband,
76 u32 *rates)
77{
78 int i, j, count;
79 *rates = 0;
80 count = 0;
81 for (i = 0; i < supp_rates_len; i++) {
82 int rate = (supp_rates[i] & 0x7F) * 5;
83
84 for (j = 0; j < sband->n_bitrates; j++)
85 if (sband->bitrates[j].bitrate == rate) {
86 *rates |= BIT(j);
87 count++;
88 break;
89 }
90 }
91
92 return count;
93}
94
95/* frame sending functions */
96
97static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
98 struct sk_buff *skb, const u8 *ht_info_ie,
99 struct ieee80211_supported_band *sband,
100 struct ieee80211_channel *channel,
101 enum ieee80211_smps_mode smps)
102{
103 struct ieee80211_ht_info *ht_info;
104 u8 *pos;
105 u32 flags = channel->flags;
106 u16 cap;
107 struct ieee80211_sta_ht_cap ht_cap;
108
109 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
110
111 if (!sband->ht_cap.ht_supported)
112 return;
113
114 if (!ht_info_ie)
115 return;
116
117 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
118 return;
119
120 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
121 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
122
123 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
124
125 /* determine capability flags */
126 cap = ht_cap.cap;
127
128 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
129 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
130 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
131 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
132 cap &= ~IEEE80211_HT_CAP_SGI_40;
133 }
134 break;
135 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
136 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
137 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
138 cap &= ~IEEE80211_HT_CAP_SGI_40;
139 }
140 break;
141 }
142
143 /* set SM PS mode properly */
144 cap &= ~IEEE80211_HT_CAP_SM_PS;
145 switch (smps) {
146 case IEEE80211_SMPS_AUTOMATIC:
147 case IEEE80211_SMPS_NUM_MODES:
148 WARN_ON(1);
149 case IEEE80211_SMPS_OFF:
150 cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
151 IEEE80211_HT_CAP_SM_PS_SHIFT;
152 break;
153 case IEEE80211_SMPS_STATIC:
154 cap |= WLAN_HT_CAP_SM_PS_STATIC <<
155 IEEE80211_HT_CAP_SM_PS_SHIFT;
156 break;
157 case IEEE80211_SMPS_DYNAMIC:
158 cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
159 IEEE80211_HT_CAP_SM_PS_SHIFT;
160 break;
161 }
162
163 /* reserve and fill IE */
164 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
165 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
166}
167
168static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
169 struct ieee80211_work *wk)
170{
171 struct ieee80211_local *local = sdata->local;
172 struct sk_buff *skb;
173 struct ieee80211_mgmt *mgmt;
174 u8 *pos, qos_info;
175 size_t offset = 0, noffset;
176 int i, count, rates_len, supp_rates_len;
177 u16 capab;
178 struct ieee80211_supported_band *sband;
179 u32 rates = 0;
180
181 sband = local->hw.wiphy->bands[wk->chan->band];
182
183 if (wk->assoc.supp_rates_len) {
184 /*
185 * Get all rates supported by the device and the AP as
186 * some APs don't like getting a superset of their rates
187 * in the association request (e.g. D-Link DAP 1353 in
188 * b-only mode)...
189 */
190 rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
191 wk->assoc.supp_rates_len,
192 sband, &rates);
193 } else {
194 /*
195 * In case AP not provide any supported rates information
196 * before association, we send information element(s) with
197 * all rates that we support.
198 */
199 rates = ~0;
200 rates_len = sband->n_bitrates;
201 }
202
203 skb = alloc_skb(local->hw.extra_tx_headroom +
204 sizeof(*mgmt) + /* bit too much but doesn't matter */
205 2 + wk->assoc.ssid_len + /* SSID */
206 4 + rates_len + /* (extended) rates */
207 4 + /* power capability */
208 2 + 2 * sband->n_channels + /* supported channels */
209 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
210 wk->ie_len + /* extra IEs */
211 9, /* WMM */
212 GFP_KERNEL);
213 if (!skb)
214 return;
215
216 skb_reserve(skb, local->hw.extra_tx_headroom);
217
218 capab = WLAN_CAPABILITY_ESS;
219
220 if (sband->band == IEEE80211_BAND_2GHZ) {
221 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
222 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
223 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
224 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
225 }
226
227 if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
228 capab |= WLAN_CAPABILITY_PRIVACY;
229
230 if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
231 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
232 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
233
234 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
235 memset(mgmt, 0, 24);
236 memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
237 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
238 memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
239
240 if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
241 skb_put(skb, 10);
242 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
243 IEEE80211_STYPE_REASSOC_REQ);
244 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
245 mgmt->u.reassoc_req.listen_interval =
246 cpu_to_le16(local->hw.conf.listen_interval);
247 memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
248 ETH_ALEN);
249 } else {
250 skb_put(skb, 4);
251 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
252 IEEE80211_STYPE_ASSOC_REQ);
253 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
254 mgmt->u.assoc_req.listen_interval =
255 cpu_to_le16(local->hw.conf.listen_interval);
256 }
257
258 /* SSID */
259 pos = skb_put(skb, 2 + wk->assoc.ssid_len);
260 *pos++ = WLAN_EID_SSID;
261 *pos++ = wk->assoc.ssid_len;
262 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
263
264 /* add all rates which were marked to be used above */
265 supp_rates_len = rates_len;
266 if (supp_rates_len > 8)
267 supp_rates_len = 8;
268
269 pos = skb_put(skb, supp_rates_len + 2);
270 *pos++ = WLAN_EID_SUPP_RATES;
271 *pos++ = supp_rates_len;
272
273 count = 0;
274 for (i = 0; i < sband->n_bitrates; i++) {
275 if (BIT(i) & rates) {
276 int rate = sband->bitrates[i].bitrate;
277 *pos++ = (u8) (rate / 5);
278 if (++count == 8)
279 break;
280 }
281 }
282
283 if (rates_len > count) {
284 pos = skb_put(skb, rates_len - count + 2);
285 *pos++ = WLAN_EID_EXT_SUPP_RATES;
286 *pos++ = rates_len - count;
287
288 for (i++; i < sband->n_bitrates; i++) {
289 if (BIT(i) & rates) {
290 int rate = sband->bitrates[i].bitrate;
291 *pos++ = (u8) (rate / 5);
292 }
293 }
294 }
295
296 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
297 /* 1. power capabilities */
298 pos = skb_put(skb, 4);
299 *pos++ = WLAN_EID_PWR_CAPABILITY;
300 *pos++ = 2;
301 *pos++ = 0; /* min tx power */
302 *pos++ = wk->chan->max_power; /* max tx power */
303
304 /* 2. supported channels */
305 /* TODO: get this in reg domain format */
306 pos = skb_put(skb, 2 * sband->n_channels + 2);
307 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
308 *pos++ = 2 * sband->n_channels;
309 for (i = 0; i < sband->n_channels; i++) {
310 *pos++ = ieee80211_frequency_to_channel(
311 sband->channels[i].center_freq);
312 *pos++ = 1; /* one channel in the subband*/
313 }
314 }
315
316 /* if present, add any custom IEs that go before HT */
317 if (wk->ie_len && wk->ie) {
318 static const u8 before_ht[] = {
319 WLAN_EID_SSID,
320 WLAN_EID_SUPP_RATES,
321 WLAN_EID_EXT_SUPP_RATES,
322 WLAN_EID_PWR_CAPABILITY,
323 WLAN_EID_SUPPORTED_CHANNELS,
324 WLAN_EID_RSN,
325 WLAN_EID_QOS_CAPA,
326 WLAN_EID_RRM_ENABLED_CAPABILITIES,
327 WLAN_EID_MOBILITY_DOMAIN,
328 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
329 };
330 noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
331 before_ht, ARRAY_SIZE(before_ht),
332 offset);
333 pos = skb_put(skb, noffset - offset);
334 memcpy(pos, wk->ie + offset, noffset - offset);
335 offset = noffset;
336 }
337
338 if (wk->assoc.use_11n && wk->assoc.wmm_used &&
339 local->hw.queues >= 4)
340 ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie,
341 sband, wk->chan, wk->assoc.smps);
342
343 /* if present, add any custom non-vendor IEs that go after HT */
344 if (wk->ie_len && wk->ie) {
345 noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
346 offset);
347 pos = skb_put(skb, noffset - offset);
348 memcpy(pos, wk->ie + offset, noffset - offset);
349 offset = noffset;
350 }
351
352 if (wk->assoc.wmm_used && local->hw.queues >= 4) {
353 if (wk->assoc.uapsd_used) {
354 qos_info = local->uapsd_queues;
355 qos_info |= (local->uapsd_max_sp_len <<
356 IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
357 } else {
358 qos_info = 0;
359 }
360
361 pos = skb_put(skb, 9);
362 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
363 *pos++ = 7; /* len */
364 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
365 *pos++ = 0x50;
366 *pos++ = 0xf2;
367 *pos++ = 2; /* WME */
368 *pos++ = 0; /* WME info */
369 *pos++ = 1; /* WME ver */
370 *pos++ = qos_info;
371 }
372
373 /* add any remaining custom (i.e. vendor specific here) IEs */
374 if (wk->ie_len && wk->ie) {
375 noffset = wk->ie_len;
376 pos = skb_put(skb, noffset - offset);
377 memcpy(pos, wk->ie + offset, noffset - offset);
378 }
379
380 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
381 ieee80211_tx_skb(sdata, skb);
382}
383
384static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
385 struct ieee80211_work *wk)
386{
387 struct cfg80211_bss *cbss;
388 u16 capa_val = WLAN_CAPABILITY_ESS;
389
390 if (wk->probe_auth.privacy)
391 capa_val |= WLAN_CAPABILITY_PRIVACY;
392
393 cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
394 wk->probe_auth.ssid, wk->probe_auth.ssid_len,
395 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
396 capa_val);
397 if (!cbss)
398 return;
399
400 cfg80211_unlink_bss(local->hw.wiphy, cbss);
401 cfg80211_put_bss(cbss);
402}
403
404static enum work_action __must_check
405ieee80211_direct_probe(struct ieee80211_work *wk)
406{
407 struct ieee80211_sub_if_data *sdata = wk->sdata;
408 struct ieee80211_local *local = sdata->local;
409
410 if (!wk->probe_auth.synced) {
411 int ret = drv_tx_sync(local, sdata, wk->filter_ta,
412 IEEE80211_TX_SYNC_AUTH);
413 if (ret)
414 return WORK_ACT_TIMEOUT;
415 }
416 wk->probe_auth.synced = true;
417
418 wk->probe_auth.tries++;
419 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
420 printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
421 sdata->name, wk->filter_ta);
422
423 /*
424 * Most likely AP is not in the range so remove the
425 * bss struct for that AP.
426 */
427 ieee80211_remove_auth_bss(local, wk);
428
429 return WORK_ACT_TIMEOUT;
430 }
431
432 printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
433 sdata->name, wk->filter_ta, wk->probe_auth.tries,
434 IEEE80211_AUTH_MAX_TRIES);
435
436 /*
437 * Direct probe is sent to broadcast address as some APs
438 * will not answer to direct packet in unassociated state.
439 */
440 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
441 wk->probe_auth.ssid_len, NULL, 0,
442 (u32) -1, true, false);
443
444 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
445 run_again(local, wk->timeout);
446
447 return WORK_ACT_NONE;
448}
449
450
451static enum work_action __must_check
452ieee80211_authenticate(struct ieee80211_work *wk)
453{
454 struct ieee80211_sub_if_data *sdata = wk->sdata;
455 struct ieee80211_local *local = sdata->local;
456
457 if (!wk->probe_auth.synced) {
458 int ret = drv_tx_sync(local, sdata, wk->filter_ta,
459 IEEE80211_TX_SYNC_AUTH);
460 if (ret)
461 return WORK_ACT_TIMEOUT;
462 }
463 wk->probe_auth.synced = true;
464
465 wk->probe_auth.tries++;
466 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
467 printk(KERN_DEBUG "%s: authentication with %pM"
468 " timed out\n", sdata->name, wk->filter_ta);
469
470 /*
471 * Most likely AP is not in the range so remove the
472 * bss struct for that AP.
473 */
474 ieee80211_remove_auth_bss(local, wk);
475
476 return WORK_ACT_TIMEOUT;
477 }
478
479 printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
480 sdata->name, wk->filter_ta, wk->probe_auth.tries);
481
482 ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
483 wk->ie_len, wk->filter_ta, NULL, 0, 0);
484 wk->probe_auth.transaction = 2;
485
486 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
487 run_again(local, wk->timeout);
488
489 return WORK_ACT_NONE;
490}
491
492static enum work_action __must_check
493ieee80211_associate(struct ieee80211_work *wk)
494{
495 struct ieee80211_sub_if_data *sdata = wk->sdata;
496 struct ieee80211_local *local = sdata->local;
497
498 if (!wk->assoc.synced) {
499 int ret = drv_tx_sync(local, sdata, wk->filter_ta,
500 IEEE80211_TX_SYNC_ASSOC);
501 if (ret)
502 return WORK_ACT_TIMEOUT;
503 }
504 wk->assoc.synced = true;
505
506 wk->assoc.tries++;
507 if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
508 printk(KERN_DEBUG "%s: association with %pM"
509 " timed out\n",
510 sdata->name, wk->filter_ta);
511
512 /*
513 * Most likely AP is not in the range so remove the
514 * bss struct for that AP.
515 */
516 if (wk->assoc.bss)
517 cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
518
519 return WORK_ACT_TIMEOUT;
520 }
521
522 printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
523 sdata->name, wk->filter_ta, wk->assoc.tries);
524 ieee80211_send_assoc(sdata, wk);
525
526 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
527 run_again(local, wk->timeout);
528
529 return WORK_ACT_NONE;
530}
531
532static enum work_action __must_check 67static enum work_action __must_check
533ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk) 68ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
534{ 69{
@@ -568,300 +103,6 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
568 return WORK_ACT_TIMEOUT; 103 return WORK_ACT_TIMEOUT;
569} 104}
570 105
571static enum work_action __must_check
572ieee80211_assoc_beacon_wait(struct ieee80211_work *wk)
573{
574 if (wk->started)
575 return WORK_ACT_TIMEOUT;
576
577 /*
578 * Wait up to one beacon interval ...
579 * should this be more if we miss one?
580 */
581 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
582 wk->sdata->name, wk->filter_ta);
583 wk->timeout = TU_TO_EXP_TIME(wk->assoc.bss->beacon_interval);
584 return WORK_ACT_NONE;
585}
586
587static void ieee80211_auth_challenge(struct ieee80211_work *wk,
588 struct ieee80211_mgmt *mgmt,
589 size_t len)
590{
591 struct ieee80211_sub_if_data *sdata = wk->sdata;
592 u8 *pos;
593 struct ieee802_11_elems elems;
594
595 pos = mgmt->u.auth.variable;
596 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
597 if (!elems.challenge)
598 return;
599 ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
600 elems.challenge - 2, elems.challenge_len + 2,
601 wk->filter_ta, wk->probe_auth.key,
602 wk->probe_auth.key_len, wk->probe_auth.key_idx);
603 wk->probe_auth.transaction = 4;
604}
605
606static enum work_action __must_check
607ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
608 struct ieee80211_mgmt *mgmt, size_t len)
609{
610 u16 auth_alg, auth_transaction, status_code;
611
612 if (wk->type != IEEE80211_WORK_AUTH)
613 return WORK_ACT_MISMATCH;
614
615 if (len < 24 + 6)
616 return WORK_ACT_NONE;
617
618 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
619 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
620 status_code = le16_to_cpu(mgmt->u.auth.status_code);
621
622 if (auth_alg != wk->probe_auth.algorithm ||
623 auth_transaction != wk->probe_auth.transaction)
624 return WORK_ACT_NONE;
625
626 if (status_code != WLAN_STATUS_SUCCESS) {
627 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
628 wk->sdata->name, mgmt->sa, status_code);
629 return WORK_ACT_DONE;
630 }
631
632 switch (wk->probe_auth.algorithm) {
633 case WLAN_AUTH_OPEN:
634 case WLAN_AUTH_LEAP:
635 case WLAN_AUTH_FT:
636 break;
637 case WLAN_AUTH_SHARED_KEY:
638 if (wk->probe_auth.transaction != 4) {
639 ieee80211_auth_challenge(wk, mgmt, len);
640 /* need another frame */
641 return WORK_ACT_NONE;
642 }
643 break;
644 default:
645 WARN_ON(1);
646 return WORK_ACT_NONE;
647 }
648
649 printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
650 return WORK_ACT_DONE;
651}
652
653static enum work_action __must_check
654ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
655 struct ieee80211_mgmt *mgmt, size_t len,
656 bool reassoc)
657{
658 struct ieee80211_sub_if_data *sdata = wk->sdata;
659 struct ieee80211_local *local = sdata->local;
660 u16 capab_info, status_code, aid;
661 struct ieee802_11_elems elems;
662 u8 *pos;
663
664 if (wk->type != IEEE80211_WORK_ASSOC)
665 return WORK_ACT_MISMATCH;
666
667 /*
668 * AssocResp and ReassocResp have identical structure, so process both
669 * of them in this function.
670 */
671
672 if (len < 24 + 6)
673 return WORK_ACT_NONE;
674
675 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
676 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
677 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
678
679 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
680 "status=%d aid=%d)\n",
681 sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
682 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
683
684 pos = mgmt->u.assoc_resp.variable;
685 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
686
687 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
688 elems.timeout_int && elems.timeout_int_len == 5 &&
689 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
690 u32 tu, ms;
691 tu = get_unaligned_le32(elems.timeout_int + 1);
692 ms = tu * 1024 / 1000;
693 printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
694 "comeback duration %u TU (%u ms)\n",
695 sdata->name, mgmt->sa, tu, ms);
696 wk->timeout = jiffies + msecs_to_jiffies(ms);
697 if (ms > IEEE80211_ASSOC_TIMEOUT)
698 run_again(local, wk->timeout);
699 return WORK_ACT_NONE;
700 }
701
702 if (status_code != WLAN_STATUS_SUCCESS)
703 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
704 sdata->name, mgmt->sa, status_code);
705 else
706 printk(KERN_DEBUG "%s: associated\n", sdata->name);
707
708 return WORK_ACT_DONE;
709}
710
711static enum work_action __must_check
712ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
713 struct ieee80211_mgmt *mgmt, size_t len,
714 struct ieee80211_rx_status *rx_status)
715{
716 struct ieee80211_sub_if_data *sdata = wk->sdata;
717 struct ieee80211_local *local = sdata->local;
718 size_t baselen;
719
720 ASSERT_WORK_MTX(local);
721
722 if (wk->type != IEEE80211_WORK_DIRECT_PROBE)
723 return WORK_ACT_MISMATCH;
724
725 if (len < 24 + 12)
726 return WORK_ACT_NONE;
727
728 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
729 if (baselen > len)
730 return WORK_ACT_NONE;
731
732 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
733 return WORK_ACT_DONE;
734}
735
736static enum work_action __must_check
737ieee80211_rx_mgmt_beacon(struct ieee80211_work *wk,
738 struct ieee80211_mgmt *mgmt, size_t len)
739{
740 struct ieee80211_sub_if_data *sdata = wk->sdata;
741 struct ieee80211_local *local = sdata->local;
742
743 ASSERT_WORK_MTX(local);
744
745 if (wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
746 return WORK_ACT_MISMATCH;
747
748 if (len < 24 + 12)
749 return WORK_ACT_NONE;
750
751 printk(KERN_DEBUG "%s: beacon received\n", sdata->name);
752 return WORK_ACT_DONE;
753}
754
755static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
756 struct sk_buff *skb)
757{
758 struct ieee80211_rx_status *rx_status;
759 struct ieee80211_mgmt *mgmt;
760 struct ieee80211_work *wk;
761 enum work_action rma = WORK_ACT_NONE;
762 u16 fc;
763
764 rx_status = (struct ieee80211_rx_status *) skb->cb;
765 mgmt = (struct ieee80211_mgmt *) skb->data;
766 fc = le16_to_cpu(mgmt->frame_control);
767
768 mutex_lock(&local->mtx);
769
770 list_for_each_entry(wk, &local->work_list, list) {
771 const u8 *bssid = NULL;
772
773 switch (wk->type) {
774 case IEEE80211_WORK_DIRECT_PROBE:
775 case IEEE80211_WORK_AUTH:
776 case IEEE80211_WORK_ASSOC:
777 case IEEE80211_WORK_ASSOC_BEACON_WAIT:
778 bssid = wk->filter_ta;
779 break;
780 default:
781 continue;
782 }
783
784 /*
785 * Before queuing, we already verified mgmt->sa,
786 * so this is needed just for matching.
787 */
788 if (compare_ether_addr(bssid, mgmt->bssid))
789 continue;
790
791 switch (fc & IEEE80211_FCTL_STYPE) {
792 case IEEE80211_STYPE_BEACON:
793 rma = ieee80211_rx_mgmt_beacon(wk, mgmt, skb->len);
794 break;
795 case IEEE80211_STYPE_PROBE_RESP:
796 rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
797 rx_status);
798 break;
799 case IEEE80211_STYPE_AUTH:
800 rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
801 break;
802 case IEEE80211_STYPE_ASSOC_RESP:
803 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
804 skb->len, false);
805 break;
806 case IEEE80211_STYPE_REASSOC_RESP:
807 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
808 skb->len, true);
809 break;
810 default:
811 WARN_ON(1);
812 rma = WORK_ACT_NONE;
813 }
814
815 /*
816 * We've either received an unexpected frame, or we have
817 * multiple work items and need to match the frame to the
818 * right one.
819 */
820 if (rma == WORK_ACT_MISMATCH)
821 continue;
822
823 /*
824 * We've processed this frame for that work, so it can't
825 * belong to another work struct.
826 * NB: this is also required for correctness for 'rma'!
827 */
828 break;
829 }
830
831 switch (rma) {
832 case WORK_ACT_MISMATCH:
833 /* ignore this unmatched frame */
834 break;
835 case WORK_ACT_NONE:
836 break;
837 case WORK_ACT_DONE:
838 list_del_rcu(&wk->list);
839 break;
840 default:
841 WARN(1, "unexpected: %d", rma);
842 }
843
844 mutex_unlock(&local->mtx);
845
846 if (rma != WORK_ACT_DONE)
847 goto out;
848
849 switch (wk->done(wk, skb)) {
850 case WORK_DONE_DESTROY:
851 free_work(wk);
852 break;
853 case WORK_DONE_REQUEUE:
854 synchronize_rcu();
855 wk->started = false; /* restart */
856 mutex_lock(&local->mtx);
857 list_add_tail(&wk->list, &local->work_list);
858 mutex_unlock(&local->mtx);
859 }
860
861 out:
862 kfree_skb(skb);
863}
864
865static void ieee80211_work_timer(unsigned long data) 106static void ieee80211_work_timer(unsigned long data)
866{ 107{
867 struct ieee80211_local *local = (void *) data; 108 struct ieee80211_local *local = (void *) data;
@@ -876,15 +117,11 @@ static void ieee80211_work_work(struct work_struct *work)
876{ 117{
877 struct ieee80211_local *local = 118 struct ieee80211_local *local =
878 container_of(work, struct ieee80211_local, work_work); 119 container_of(work, struct ieee80211_local, work_work);
879 struct sk_buff *skb;
880 struct ieee80211_work *wk, *tmp; 120 struct ieee80211_work *wk, *tmp;
881 LIST_HEAD(free_work); 121 LIST_HEAD(free_work);
882 enum work_action rma; 122 enum work_action rma;
883 bool remain_off_channel = false; 123 bool remain_off_channel = false;
884 124
885 if (local->scanning)
886 return;
887
888 /* 125 /*
889 * ieee80211_queue_work() should have picked up most cases, 126 * ieee80211_queue_work() should have picked up most cases,
890 * here we'll pick the rest. 127 * here we'll pick the rest.
@@ -892,12 +129,13 @@ static void ieee80211_work_work(struct work_struct *work)
892 if (WARN(local->suspended, "work scheduled while going to suspend\n")) 129 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
893 return; 130 return;
894 131
895 /* first process frames to avoid timing out while a frame is pending */
896 while ((skb = skb_dequeue(&local->work_skb_queue)))
897 ieee80211_work_rx_queued_mgmt(local, skb);
898
899 mutex_lock(&local->mtx); 132 mutex_lock(&local->mtx);
900 133
134 if (local->scanning) {
135 mutex_unlock(&local->mtx);
136 return;
137 }
138
901 ieee80211_recalc_idle(local); 139 ieee80211_recalc_idle(local);
902 140
903 list_for_each_entry_safe(wk, tmp, &local->work_list, list) { 141 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
@@ -946,24 +184,12 @@ static void ieee80211_work_work(struct work_struct *work)
946 case IEEE80211_WORK_ABORT: 184 case IEEE80211_WORK_ABORT:
947 rma = WORK_ACT_TIMEOUT; 185 rma = WORK_ACT_TIMEOUT;
948 break; 186 break;
949 case IEEE80211_WORK_DIRECT_PROBE:
950 rma = ieee80211_direct_probe(wk);
951 break;
952 case IEEE80211_WORK_AUTH:
953 rma = ieee80211_authenticate(wk);
954 break;
955 case IEEE80211_WORK_ASSOC:
956 rma = ieee80211_associate(wk);
957 break;
958 case IEEE80211_WORK_REMAIN_ON_CHANNEL: 187 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
959 rma = ieee80211_remain_on_channel_timeout(wk); 188 rma = ieee80211_remain_on_channel_timeout(wk);
960 break; 189 break;
961 case IEEE80211_WORK_OFFCHANNEL_TX: 190 case IEEE80211_WORK_OFFCHANNEL_TX:
962 rma = ieee80211_offchannel_tx(wk); 191 rma = ieee80211_offchannel_tx(wk);
963 break; 192 break;
964 case IEEE80211_WORK_ASSOC_BEACON_WAIT:
965 rma = ieee80211_assoc_beacon_wait(wk);
966 break;
967 } 193 }
968 194
969 wk->started = started; 195 wk->started = started;
@@ -1002,13 +228,8 @@ static void ieee80211_work_work(struct work_struct *work)
1002 run_again(local, jiffies + HZ/2); 228 run_again(local, jiffies + HZ/2);
1003 } 229 }
1004 230
1005 if (list_empty(&local->work_list) && local->scan_req &&
1006 !local->scanning)
1007 ieee80211_queue_delayed_work(&local->hw,
1008 &local->scan_work,
1009 round_jiffies_relative(0));
1010
1011 ieee80211_recalc_idle(local); 231 ieee80211_recalc_idle(local);
232 ieee80211_run_deferred_scan(local);
1012 233
1013 mutex_unlock(&local->mtx); 234 mutex_unlock(&local->mtx);
1014 235
@@ -1051,7 +272,6 @@ void ieee80211_work_init(struct ieee80211_local *local)
1051 setup_timer(&local->work_timer, ieee80211_work_timer, 272 setup_timer(&local->work_timer, ieee80211_work_timer,
1052 (unsigned long)local); 273 (unsigned long)local);
1053 INIT_WORK(&local->work_work, ieee80211_work_work); 274 INIT_WORK(&local->work_work, ieee80211_work_work);
1054 skb_queue_head_init(&local->work_skb_queue);
1055} 275}
1056 276
1057void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata) 277void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
@@ -1085,43 +305,6 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
1085 mutex_unlock(&local->mtx); 305 mutex_unlock(&local->mtx);
1086} 306}
1087 307
1088ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1089 struct sk_buff *skb)
1090{
1091 struct ieee80211_local *local = sdata->local;
1092 struct ieee80211_mgmt *mgmt;
1093 struct ieee80211_work *wk;
1094 u16 fc;
1095
1096 if (skb->len < 24)
1097 return RX_DROP_MONITOR;
1098
1099 mgmt = (struct ieee80211_mgmt *) skb->data;
1100 fc = le16_to_cpu(mgmt->frame_control);
1101
1102 list_for_each_entry_rcu(wk, &local->work_list, list) {
1103 if (sdata != wk->sdata)
1104 continue;
1105 if (compare_ether_addr(wk->filter_ta, mgmt->sa))
1106 continue;
1107 if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
1108 continue;
1109
1110 switch (fc & IEEE80211_FCTL_STYPE) {
1111 case IEEE80211_STYPE_AUTH:
1112 case IEEE80211_STYPE_PROBE_RESP:
1113 case IEEE80211_STYPE_ASSOC_RESP:
1114 case IEEE80211_STYPE_REASSOC_RESP:
1115 case IEEE80211_STYPE_BEACON:
1116 skb_queue_tail(&local->work_skb_queue, skb);
1117 ieee80211_queue_work(&local->hw, &local->work_work);
1118 return RX_QUEUED;
1119 }
1120 }
1121
1122 return RX_CONTINUE;
1123}
1124
1125static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk, 308static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
1126 struct sk_buff *skb) 309 struct sk_buff *skb)
1127{ 310{
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b758350919ff..bdb53aba888e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -138,6 +138,10 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
138 if (skb->len < hdrlen + MICHAEL_MIC_LEN) 138 if (skb->len < hdrlen + MICHAEL_MIC_LEN)
139 return RX_DROP_UNUSABLE; 139 return RX_DROP_UNUSABLE;
140 140
141 if (skb_linearize(rx->skb))
142 return RX_DROP_UNUSABLE;
143 hdr = (void *)skb->data;
144
141 data = skb->data + hdrlen; 145 data = skb->data + hdrlen;
142 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 146 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
143 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 147 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
@@ -179,7 +183,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
179 u8 *pos; 183 u8 *pos;
180 184
181 if (info->control.hw_key && 185 if (info->control.hw_key &&
182 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { 186 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
187 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
183 /* hwaccel - with no need for software-generated IV */ 188 /* hwaccel - with no need for software-generated IV */
184 return 0; 189 return 0;
185 } 190 }
@@ -198,8 +203,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
198 203
199 pos = skb_push(skb, TKIP_IV_LEN); 204 pos = skb_push(skb, TKIP_IV_LEN);
200 memmove(pos, pos + TKIP_IV_LEN, hdrlen); 205 memmove(pos, pos + TKIP_IV_LEN, hdrlen);
206 skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN);
201 pos += hdrlen; 207 pos += hdrlen;
202 208
209 /* the HW only needs room for the IV, but not the actual IV */
210 if (info->control.hw_key &&
211 (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
212 return 0;
213
203 /* Increase IV for the frame */ 214 /* Increase IV for the frame */
204 spin_lock_irqsave(&key->u.tkip.txlock, flags); 215 spin_lock_irqsave(&key->u.tkip.txlock, flags);
205 key->u.tkip.tx.iv16++; 216 key->u.tkip.tx.iv16++;
@@ -253,6 +264,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
253 if (!rx->sta || skb->len - hdrlen < 12) 264 if (!rx->sta || skb->len - hdrlen < 12)
254 return RX_DROP_UNUSABLE; 265 return RX_DROP_UNUSABLE;
255 266
267 /* it may be possible to optimize this a bit more */
268 if (skb_linearize(rx->skb))
269 return RX_DROP_UNUSABLE;
270 hdr = (void *)skb->data;
271
256 /* 272 /*
257 * Let TKIP code verify IV, but skip decryption. 273 * Let TKIP code verify IV, but skip decryption.
258 * In the case where hardware checks the IV as well, 274 * In the case where hardware checks the IV as well,
@@ -413,6 +429,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
413 429
414 pos = skb_push(skb, CCMP_HDR_LEN); 430 pos = skb_push(skb, CCMP_HDR_LEN);
415 memmove(pos, pos + CCMP_HDR_LEN, hdrlen); 431 memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
432 skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN);
416 433
417 /* the HW only needs room for the IV, but not the actual IV */ 434 /* the HW only needs room for the IV, but not the actual IV */
418 if (info->control.hw_key && 435 if (info->control.hw_key &&
@@ -484,6 +501,14 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
484 if (!rx->sta || data_len < 0) 501 if (!rx->sta || data_len < 0)
485 return RX_DROP_UNUSABLE; 502 return RX_DROP_UNUSABLE;
486 503
504 if (status->flag & RX_FLAG_DECRYPTED) {
505 if (!pskb_may_pull(rx->skb, hdrlen + CCMP_HDR_LEN))
506 return RX_DROP_UNUSABLE;
507 } else {
508 if (skb_linearize(rx->skb))
509 return RX_DROP_UNUSABLE;
510 }
511
487 ccmp_hdr2pn(pn, skb->data + hdrlen); 512 ccmp_hdr2pn(pn, skb->data + hdrlen);
488 513
489 queue = rx->security_idx; 514 queue = rx->security_idx;
@@ -509,7 +534,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
509 memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); 534 memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN);
510 535
511 /* Remove CCMP header and MIC */ 536 /* Remove CCMP header and MIC */
512 skb_trim(skb, skb->len - CCMP_MIC_LEN); 537 if (pskb_trim(skb, skb->len - CCMP_MIC_LEN))
538 return RX_DROP_UNUSABLE;
513 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); 539 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen);
514 skb_pull(skb, CCMP_HDR_LEN); 540 skb_pull(skb, CCMP_HDR_LEN);
515 541
@@ -609,6 +635,8 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
609 if (!ieee80211_is_mgmt(hdr->frame_control)) 635 if (!ieee80211_is_mgmt(hdr->frame_control))
610 return RX_CONTINUE; 636 return RX_CONTINUE;
611 637
638 /* management frames are already linear */
639
612 if (skb->len < 24 + sizeof(*mmie)) 640 if (skb->len < 24 + sizeof(*mmie))
613 return RX_DROP_UNUSABLE; 641 return RX_DROP_UNUSABLE;
614 642
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
new file mode 100644
index 000000000000..a967ddaa4e2f
--- /dev/null
+++ b/net/mac802154/Kconfig
@@ -0,0 +1,16 @@
1config MAC802154
2 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
3 depends on IEEE802154 && EXPERIMENTAL
4 select CRC_CCITT
5 ---help---
6 This option enables the hardware independent IEEE 802.15.4
7 networking stack for SoftMAC devices (the ones implementing
8 only PHY level of IEEE 802.15.4 standard).
9
10 Note: this implementation is neither certified, nor feature
11 complete! Compatibility with other implementations hasn't
12 been tested yet!
13
14 If you plan to use HardMAC IEEE 802.15.4 devices, you can
15 say N here. Alternatievly you can say M to compile it as
16 module.
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
new file mode 100644
index 000000000000..ec1bd3fc1273
--- /dev/null
+++ b/net/mac802154/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_MAC802154) += mac802154.o
2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
new file mode 100644
index 000000000000..e3edfb0661b0
--- /dev/null
+++ b/net/mac802154/ieee802154_dev.c
@@ -0,0 +1,294 @@
1/*
2 * Copyright (C) 2007-2012 Siemens AG
3 *
4 * Written by:
5 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
6 *
7 * Based on the code from 'linux-zigbee.sourceforge.net' project.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/netdevice.h>
26
27#include <net/netlink.h>
28#include <linux/nl802154.h>
29#include <net/mac802154.h>
30#include <net/route.h>
31#include <net/wpan-phy.h>
32
33#include "mac802154.h"
34
35int mac802154_slave_open(struct net_device *dev)
36{
37 struct mac802154_sub_if_data *priv = netdev_priv(dev);
38 struct mac802154_priv *ipriv = priv->hw;
39 int res = 0;
40
41 if (ipriv->open_count++ == 0) {
42 res = ipriv->ops->start(&ipriv->hw);
43 WARN_ON(res);
44 if (res)
45 goto err;
46 }
47
48 if (ipriv->ops->ieee_addr) {
49 res = ipriv->ops->ieee_addr(&ipriv->hw, dev->dev_addr);
50 WARN_ON(res);
51 if (res)
52 goto err;
53 mac802154_dev_set_ieee_addr(dev);
54 }
55
56 netif_start_queue(dev);
57 return 0;
58err:
59 priv->hw->open_count--;
60
61 return res;
62}
63
64int mac802154_slave_close(struct net_device *dev)
65{
66 struct mac802154_sub_if_data *priv = netdev_priv(dev);
67 struct mac802154_priv *ipriv = priv->hw;
68
69 netif_stop_queue(dev);
70
71 if (!--ipriv->open_count)
72 ipriv->ops->stop(&ipriv->hw);
73
74 return 0;
75}
76
77static int
78mac802154_netdev_register(struct wpan_phy *phy, struct net_device *dev)
79{
80 struct mac802154_sub_if_data *priv;
81 struct mac802154_priv *ipriv;
82 int err;
83
84 ipriv = wpan_phy_priv(phy);
85
86 priv = netdev_priv(dev);
87 priv->dev = dev;
88 priv->hw = ipriv;
89
90 dev->needed_headroom = ipriv->hw.extra_tx_headroom;
91
92 SET_NETDEV_DEV(dev, &ipriv->phy->dev);
93
94 mutex_lock(&ipriv->slaves_mtx);
95 if (!ipriv->running) {
96 mutex_unlock(&ipriv->slaves_mtx);
97 return -ENODEV;
98 }
99 mutex_unlock(&ipriv->slaves_mtx);
100
101 err = register_netdev(dev);
102 if (err < 0)
103 return err;
104
105 rtnl_lock();
106 mutex_lock(&ipriv->slaves_mtx);
107 list_add_tail_rcu(&priv->list, &ipriv->slaves);
108 mutex_unlock(&ipriv->slaves_mtx);
109 rtnl_unlock();
110
111 return 0;
112}
113
114static void
115mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev)
116{
117 struct mac802154_sub_if_data *sdata;
118 ASSERT_RTNL();
119
120 sdata = netdev_priv(dev);
121
122 BUG_ON(sdata->hw->phy != phy);
123
124 mutex_lock(&sdata->hw->slaves_mtx);
125 list_del_rcu(&sdata->list);
126 mutex_unlock(&sdata->hw->slaves_mtx);
127
128 synchronize_rcu();
129 unregister_netdevice(sdata->dev);
130}
131
132static struct net_device *
133mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
134{
135 struct net_device *dev;
136 int err = -ENOMEM;
137
138 switch (type) {
139 case IEEE802154_DEV_MONITOR:
140 dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
141 name, mac802154_monitor_setup);
142 break;
143 default:
144 dev = NULL;
145 err = -EINVAL;
146 break;
147 }
148 if (!dev)
149 goto err;
150
151 err = mac802154_netdev_register(phy, dev);
152 if (err)
153 goto err_free;
154
155 dev_hold(dev); /* we return an incremented device refcount */
156 return dev;
157
158err_free:
159 free_netdev(dev);
160err:
161 return ERR_PTR(err);
162}
163
164struct ieee802154_dev *
165ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
166{
167 struct wpan_phy *phy;
168 struct mac802154_priv *priv;
169 size_t priv_size;
170
171 if (!ops || !ops->xmit || !ops->ed || !ops->start ||
172 !ops->stop || !ops->set_channel) {
173 printk(KERN_ERR
174 "undefined IEEE802.15.4 device operations\n");
175 return NULL;
176 }
177
178 /* Ensure 32-byte alignment of our private data and hw private data.
179 * We use the wpan_phy priv data for both our mac802154_priv and for
180 * the driver's private data
181 *
182 * in memory it'll be like this:
183 *
184 * +-----------------------+
185 * | struct wpan_phy |
186 * +-----------------------+
187 * | struct mac802154_priv |
188 * +-----------------------+
189 * | driver's private data |
190 * +-----------------------+
191 *
192 * Due to ieee802154 layer isn't aware of driver and MAC structures,
193 * so lets allign them here.
194 */
195
196 priv_size = ALIGN(sizeof(*priv), NETDEV_ALIGN) + priv_data_len;
197
198 phy = wpan_phy_alloc(priv_size);
199 if (!phy) {
200 printk(KERN_ERR
201 "failure to allocate master IEEE802.15.4 device\n");
202 return NULL;
203 }
204
205 priv = wpan_phy_priv(phy);
206 priv->hw.phy = priv->phy = phy;
207 priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN);
208 priv->ops = ops;
209
210 INIT_LIST_HEAD(&priv->slaves);
211 mutex_init(&priv->slaves_mtx);
212
213 return &priv->hw;
214}
215EXPORT_SYMBOL(ieee802154_alloc_device);
216
217void ieee802154_free_device(struct ieee802154_dev *hw)
218{
219 struct mac802154_priv *priv = mac802154_to_priv(hw);
220
221 BUG_ON(!list_empty(&priv->slaves));
222
223 wpan_phy_free(priv->phy);
224
225 mutex_destroy(&priv->slaves_mtx);
226}
227EXPORT_SYMBOL(ieee802154_free_device);
228
229int ieee802154_register_device(struct ieee802154_dev *dev)
230{
231 struct mac802154_priv *priv = mac802154_to_priv(dev);
232 int rc = -ENOMEM;
233
234 priv->dev_workqueue =
235 create_singlethread_workqueue(wpan_phy_name(priv->phy));
236 if (!priv->dev_workqueue)
237 goto out;
238
239 wpan_phy_set_dev(priv->phy, priv->hw.parent);
240
241 priv->phy->add_iface = mac802154_add_iface;
242 priv->phy->del_iface = mac802154_del_iface;
243
244 rc = wpan_phy_register(priv->phy);
245 if (rc < 0)
246 goto out_wq;
247
248 rtnl_lock();
249
250 mutex_lock(&priv->slaves_mtx);
251 priv->running = MAC802154_DEVICE_RUN;
252 mutex_unlock(&priv->slaves_mtx);
253
254 rtnl_unlock();
255
256 return 0;
257
258out_wq:
259 destroy_workqueue(priv->dev_workqueue);
260out:
261 return rc;
262}
263EXPORT_SYMBOL(ieee802154_register_device);
264
265void ieee802154_unregister_device(struct ieee802154_dev *dev)
266{
267 struct mac802154_priv *priv = mac802154_to_priv(dev);
268 struct mac802154_sub_if_data *sdata, *next;
269
270 flush_workqueue(priv->dev_workqueue);
271 destroy_workqueue(priv->dev_workqueue);
272
273 rtnl_lock();
274
275 mutex_lock(&priv->slaves_mtx);
276 priv->running = MAC802154_DEVICE_STOPPED;
277 mutex_unlock(&priv->slaves_mtx);
278
279 list_for_each_entry_safe(sdata, next, &priv->slaves, list) {
280 mutex_lock(&sdata->hw->slaves_mtx);
281 list_del(&sdata->list);
282 mutex_unlock(&sdata->hw->slaves_mtx);
283
284 unregister_netdevice(sdata->dev);
285 }
286
287 rtnl_unlock();
288
289 wpan_phy_unregister(priv->phy);
290}
291EXPORT_SYMBOL(ieee802154_unregister_device);
292
293MODULE_DESCRIPTION("IEEE 802.15.4 implementation");
294MODULE_LICENSE("GPL v2");
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
new file mode 100644
index 000000000000..789d9c948aec
--- /dev/null
+++ b/net/mac802154/mac802154.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright (C) 2007-2012 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Written by:
18 * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
19 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
20 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23#ifndef MAC802154_H
24#define MAC802154_H
25
26/* mac802154 device private data */
27struct mac802154_priv {
28 struct ieee802154_dev hw;
29 struct ieee802154_ops *ops;
30
31 /* ieee802154 phy */
32 struct wpan_phy *phy;
33
34 int open_count;
35
36 /* As in mac80211 slaves list is modified:
37 * 1) under the RTNL
38 * 2) protected by slaves_mtx;
39 * 3) in an RCU manner
40 *
41 * So atomic readers can use any of this protection methods.
42 */
43 struct list_head slaves;
44 struct mutex slaves_mtx;
45
46 /* This one is used for scanning and other jobs not to be interfered
47 * with serial driver.
48 */
49 struct workqueue_struct *dev_workqueue;
50
51 /* SoftMAC device is registered and running. One can add subinterfaces.
52 * This flag should be modified under slaves_mtx and RTNL, so you can
53 * read them using any of protection methods.
54 */
55 bool running;
56};
57
58#define MAC802154_DEVICE_STOPPED 0x00
59#define MAC802154_DEVICE_RUN 0x01
60
61/* Slave interface definition.
62 *
63 * Slaves represent typical network interfaces available from userspace.
64 * Each ieee802154 device/transceiver may have several slaves and able
65 * to be associated with several networks at the same time.
66 */
67struct mac802154_sub_if_data {
68 struct list_head list; /* the ieee802154_priv->slaves list */
69
70 struct mac802154_priv *hw;
71 struct net_device *dev;
72
73 int type;
74
75 spinlock_t mib_lock;
76
77 __le16 pan_id;
78 __le16 short_addr;
79
80 u8 chan;
81 u8 page;
82
83 /* MAC BSN field */
84 u8 bsn;
85 /* MAC DSN field */
86 u8 dsn;
87};
88
89#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
90
91#define MAC802154_MAX_XMIT_ATTEMPTS 3
92
93#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
94
95extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
96
97int mac802154_slave_open(struct net_device *dev);
98int mac802154_slave_close(struct net_device *dev);
99
100void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
101void mac802154_monitor_setup(struct net_device *dev);
102
103netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
104 u8 page, u8 chan);
105
106/* MIB callbacks */
107void mac802154_dev_set_ieee_addr(struct net_device *dev);
108
109#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
new file mode 100644
index 000000000000..7a5d0e052cd7
--- /dev/null
+++ b/net/mac802154/mac_cmd.c
@@ -0,0 +1,45 @@
1/*
2 * MAC commands interface
3 *
4 * Copyright 2007-2012 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Sergey Lapin <slapin@ossfans.org>
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
23 */
24
25#include <linux/skbuff.h>
26#include <linux/if_arp.h>
27
28#include <net/ieee802154_netdev.h>
29#include <net/wpan-phy.h>
30#include <net/mac802154.h>
31
32#include "mac802154.h"
33
34struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
35{
36 struct mac802154_sub_if_data *priv = netdev_priv(dev);
37
38 BUG_ON(dev->type != ARPHRD_IEEE802154);
39
40 return to_phy(get_device(&priv->hw->phy->dev));
41}
42
43struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
44 .get_phy = mac802154_get_phy,
45};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
new file mode 100644
index 000000000000..ab59821ec729
--- /dev/null
+++ b/net/mac802154/mib.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2007-2012 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Written by:
18 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
19 * Sergey Lapin <slapin@ossfans.org>
20 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23
24#include <linux/if_arp.h>
25
26#include <net/mac802154.h>
27#include <net/wpan-phy.h>
28
29#include "mac802154.h"
30
31struct hw_addr_filt_notify_work {
32 struct work_struct work;
33 struct net_device *dev;
34 unsigned long changed;
35};
36
37struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
38{
39 struct mac802154_sub_if_data *priv = netdev_priv(dev);
40
41 BUG_ON(dev->type != ARPHRD_IEEE802154);
42
43 return priv->hw;
44}
45
46static void hw_addr_notify(struct work_struct *work)
47{
48 struct hw_addr_filt_notify_work *nw = container_of(work,
49 struct hw_addr_filt_notify_work, work);
50 struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
51 int res;
52
53 res = hw->ops->set_hw_addr_filt(&hw->hw,
54 &hw->hw.hw_filt,
55 nw->changed);
56 if (res)
57 pr_debug("failed changed mask %lx\n", nw->changed);
58
59 kfree(nw);
60
61 return;
62}
63
64static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
65{
66 struct mac802154_sub_if_data *priv = netdev_priv(dev);
67 struct hw_addr_filt_notify_work *work;
68
69 work = kzalloc(sizeof(*work), GFP_ATOMIC);
70 if (!work)
71 return;
72
73 INIT_WORK(&work->work, hw_addr_notify);
74 work->dev = dev;
75 work->changed = changed;
76 queue_work(priv->hw->dev_workqueue, &work->work);
77
78 return;
79}
80
81void mac802154_dev_set_ieee_addr(struct net_device *dev)
82{
83 struct mac802154_sub_if_data *priv = netdev_priv(dev);
84 struct mac802154_priv *mac = priv->hw;
85
86 if (mac->ops->set_hw_addr_filt &&
87 memcmp(mac->hw.hw_filt.ieee_addr,
88 dev->dev_addr, IEEE802154_ADDR_LEN)) {
89 memcpy(mac->hw.hw_filt.ieee_addr,
90 dev->dev_addr, IEEE802154_ADDR_LEN);
91 set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
92 }
93}
diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c
new file mode 100644
index 000000000000..434a26f76a80
--- /dev/null
+++ b/net/mac802154/monitor.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright 2007, 2008, 2009 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Written by:
18 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
19 * Sergey Lapin <slapin@ossfans.org>
20 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23
24#include <linux/netdevice.h>
25#include <linux/skbuff.h>
26#include <linux/if_arp.h>
27#include <linux/crc-ccitt.h>
28
29#include <net/ieee802154.h>
30#include <net/mac802154.h>
31#include <net/netlink.h>
32#include <net/wpan-phy.h>
33#include <linux/nl802154.h>
34
35#include "mac802154.h"
36
37static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb,
38 struct net_device *dev)
39{
40 struct mac802154_sub_if_data *priv;
41 u8 chan, page;
42
43 priv = netdev_priv(dev);
44
45 /* FIXME: locking */
46 chan = priv->hw->phy->current_channel;
47 page = priv->hw->phy->current_page;
48
49 if (chan == MAC802154_CHAN_NONE) /* not initialized */
50 return NETDEV_TX_OK;
51
52 if (WARN_ON(page >= WPAN_NUM_PAGES) ||
53 WARN_ON(chan >= WPAN_NUM_CHANNELS))
54 return NETDEV_TX_OK;
55
56 skb->skb_iif = dev->ifindex;
57 dev->stats.tx_packets++;
58 dev->stats.tx_bytes += skb->len;
59
60 return mac802154_tx(priv->hw, skb, page, chan);
61}
62
63
64void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb)
65{
66 struct sk_buff *skb2;
67 struct mac802154_sub_if_data *sdata;
68 u16 crc = crc_ccitt(0, skb->data, skb->len);
69 u8 *data;
70
71 rcu_read_lock();
72 list_for_each_entry_rcu(sdata, &priv->slaves, list) {
73 if (sdata->type != IEEE802154_DEV_MONITOR)
74 continue;
75
76 skb2 = skb_clone(skb, GFP_ATOMIC);
77 skb2->dev = sdata->dev;
78 skb2->pkt_type = PACKET_HOST;
79 data = skb_put(skb2, 2);
80 data[0] = crc & 0xff;
81 data[1] = crc >> 8;
82
83 netif_rx_ni(skb2);
84 }
85 rcu_read_unlock();
86}
87
88static const struct net_device_ops mac802154_monitor_ops = {
89 .ndo_open = mac802154_slave_open,
90 .ndo_stop = mac802154_slave_close,
91 .ndo_start_xmit = mac802154_monitor_xmit,
92};
93
94void mac802154_monitor_setup(struct net_device *dev)
95{
96 struct mac802154_sub_if_data *priv;
97
98 dev->addr_len = 0;
99 dev->hard_header_len = 0;
100 dev->needed_tailroom = 2; /* room for FCS */
101 dev->mtu = IEEE802154_MTU;
102 dev->tx_queue_len = 10;
103 dev->type = ARPHRD_IEEE802154_MONITOR;
104 dev->flags = IFF_NOARP | IFF_BROADCAST;
105 dev->watchdog_timeo = 0;
106
107 dev->destructor = free_netdev;
108 dev->netdev_ops = &mac802154_monitor_ops;
109 dev->ml_priv = &mac802154_mlme_reduced;
110
111 priv = netdev_priv(dev);
112 priv->type = IEEE802154_DEV_MONITOR;
113
114 priv->chan = MAC802154_CHAN_NONE; /* not initialized */
115 priv->page = 0;
116}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
new file mode 100644
index 000000000000..4a7d76d4f8bc
--- /dev/null
+++ b/net/mac802154/rx.c
@@ -0,0 +1,114 @@
1/*
2 * Copyright (C) 2007-2012 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Written by:
18 * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
19 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
20 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/workqueue.h>
27#include <linux/netdevice.h>
28#include <linux/crc-ccitt.h>
29
30#include <net/mac802154.h>
31#include <net/ieee802154_netdev.h>
32
33#include "mac802154.h"
34
35/* The IEEE 802.15.4 standard defines 4 MAC packet types:
36 * - beacon frame
37 * - MAC command frame
38 * - acknowledgement frame
39 * - data frame
40 *
41 * and only the data frame should be pushed to the upper layers, other types
42 * are just internal MAC layer management information. So only data packets
43 * are going to be sent to the networking queue, all other will be processed
44 * right here by using the device workqueue.
45 */
46struct rx_work {
47 struct sk_buff *skb;
48 struct work_struct work;
49 struct ieee802154_dev *dev;
50 u8 lqi;
51};
52
53static void
54mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
55{
56 struct mac802154_priv *priv = mac802154_to_priv(hw);
57
58 mac_cb(skb)->lqi = lqi;
59 skb->protocol = htons(ETH_P_IEEE802154);
60 skb_reset_mac_header(skb);
61
62 BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
63
64 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
65 u16 crc;
66
67 if (skb->len < 2) {
68 pr_debug("got invalid frame\n");
69 goto out;
70 }
71 crc = crc_ccitt(0, skb->data, skb->len);
72 if (crc) {
73 pr_debug("CRC mismatch\n");
74 goto out;
75 }
76 skb_trim(skb, skb->len - 2); /* CRC */
77 }
78
79 mac802154_monitors_rx(priv, skb);
80out:
81 dev_kfree_skb(skb);
82 return;
83}
84
85static void mac802154_rx_worker(struct work_struct *work)
86{
87 struct rx_work *rw = container_of(work, struct rx_work, work);
88 struct sk_buff *skb = rw->skb;
89
90 mac802154_subif_rx(rw->dev, skb, rw->lqi);
91 kfree(rw);
92}
93
94void
95ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi)
96{
97 struct mac802154_priv *priv = mac802154_to_priv(dev);
98 struct rx_work *work;
99
100 if (!skb)
101 return;
102
103 work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC);
104 if (!work)
105 return;
106
107 INIT_WORK(&work->work, mac802154_rx_worker);
108 work->skb = skb;
109 work->dev = dev;
110 work->lqi = lqi;
111
112 queue_work(priv->dev_workqueue, &work->work);
113}
114EXPORT_SYMBOL(ieee802154_rx_irqsafe);
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
new file mode 100644
index 000000000000..8781d8f904d9
--- /dev/null
+++ b/net/mac802154/tx.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright 2007-2012 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Written by:
18 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
19 * Sergey Lapin <slapin@ossfans.org>
20 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23
24#include <linux/netdevice.h>
25#include <linux/if_arp.h>
26#include <linux/crc-ccitt.h>
27
28#include <net/mac802154.h>
29#include <net/wpan-phy.h>
30
31#include "mac802154.h"
32
33/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
34 * packets through the workqueue.
35 */
36struct xmit_work {
37 struct sk_buff *skb;
38 struct work_struct work;
39 struct mac802154_priv *priv;
40 u8 chan;
41 u8 page;
42 u8 xmit_attempts;
43};
44
45static void mac802154_xmit_worker(struct work_struct *work)
46{
47 struct xmit_work *xw = container_of(work, struct xmit_work, work);
48 int res;
49
50 mutex_lock(&xw->priv->phy->pib_lock);
51 if (xw->priv->phy->current_channel != xw->chan ||
52 xw->priv->phy->current_page != xw->page) {
53 res = xw->priv->ops->set_channel(&xw->priv->hw,
54 xw->page,
55 xw->chan);
56 if (res) {
57 pr_debug("set_channel failed\n");
58 goto out;
59 }
60 }
61
62 res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb);
63
64out:
65 mutex_unlock(&xw->priv->phy->pib_lock);
66
67 if (res) {
68 if (xw->xmit_attempts++ < MAC802154_MAX_XMIT_ATTEMPTS) {
69 queue_work(xw->priv->dev_workqueue, &xw->work);
70 return;
71 } else
72 pr_debug("transmission failed for %d times",
73 MAC802154_MAX_XMIT_ATTEMPTS);
74 }
75
76 dev_kfree_skb(xw->skb);
77
78 kfree(xw);
79}
80
81netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
82 u8 page, u8 chan)
83{
84 struct xmit_work *work;
85
86 if (!(priv->phy->channels_supported[page] & (1 << chan)))
87 WARN_ON(1);
88 return NETDEV_TX_OK;
89
90 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
91 u16 crc = crc_ccitt(0, skb->data, skb->len);
92 u8 *data = skb_put(skb, 2);
93 data[0] = crc & 0xff;
94 data[1] = crc >> 8;
95 }
96
97 if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) {
98 dev_kfree_skb(skb);
99 return NETDEV_TX_OK;
100 }
101
102 work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC);
103 if (!work)
104 return NETDEV_TX_BUSY;
105
106 INIT_WORK(&work->work, mac802154_xmit_worker);
107 work->skb = skb;
108 work->priv = priv;
109 work->page = page;
110 work->chan = chan;
111 work->xmit_attempts = 0;
112
113 queue_work(priv->dev_workqueue, &work->work);
114
115 return NETDEV_TX_OK;
116}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f8ac4ef0b794..209c1ed43368 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -103,6 +103,16 @@ config NF_CONNTRACK_EVENTS
103 103
104 If unsure, say `N'. 104 If unsure, say `N'.
105 105
106config NF_CONNTRACK_TIMEOUT
107 bool 'Connection tracking timeout'
108 depends on NETFILTER_ADVANCED
109 help
110 This option enables support for connection tracking timeout
111 extension. This allows you to attach timeout policies to flow
112 via the CT target.
113
114 If unsure, say `N'.
115
106config NF_CONNTRACK_TIMESTAMP 116config NF_CONNTRACK_TIMESTAMP
107 bool 'Connection tracking timestamping' 117 bool 'Connection tracking timestamping'
108 depends on NETFILTER_ADVANCED 118 depends on NETFILTER_ADVANCED
@@ -314,6 +324,17 @@ config NF_CT_NETLINK
314 help 324 help
315 This option enables support for a netlink-based userspace interface 325 This option enables support for a netlink-based userspace interface
316 326
327config NF_CT_NETLINK_TIMEOUT
328 tristate 'Connection tracking timeout tuning via Netlink'
329 select NETFILTER_NETLINK
330 depends on NETFILTER_ADVANCED
331 help
332 This option enables support for connection tracking timeout
333 fine-grain tuning. This allows you to attach specific timeout
334 policies to flows, instead of using the global timeout policy.
335
336 If unsure, say `N'.
337
317endif # NF_CONNTRACK 338endif # NF_CONNTRACK
318 339
319# transparent proxy support 340# transparent proxy support
@@ -488,6 +509,21 @@ config NETFILTER_XT_TARGET_HL
488 since you can easily create immortal packets that loop 509 since you can easily create immortal packets that loop
489 forever on the network. 510 forever on the network.
490 511
512config NETFILTER_XT_TARGET_HMARK
513 tristate '"HMARK" target support'
514 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
515 depends on NETFILTER_ADVANCED
516 ---help---
517 This option adds the "HMARK" target.
518
519 The target allows you to create rules in the "raw" and "mangle" tables
520 which set the skbuff mark by means of hash calculation within a given
521 range. The nfmark can influence the routing method (see "Use netfilter
522 MARK value as routing key") and can also be used by other subsystems to
523 change their behaviour.
524
525 To compile it as a module, choose M here. If unsure, say N.
526
491config NETFILTER_XT_TARGET_IDLETIMER 527config NETFILTER_XT_TARGET_IDLETIMER
492 tristate "IDLETIMER target support" 528 tristate "IDLETIMER target support"
493 depends on NETFILTER_ADVANCED 529 depends on NETFILTER_ADVANCED
@@ -524,6 +560,15 @@ config NETFILTER_XT_TARGET_LED
524 For more information on the LEDs available on your system, see 560 For more information on the LEDs available on your system, see
525 Documentation/leds/leds-class.txt 561 Documentation/leds/leds-class.txt
526 562
563config NETFILTER_XT_TARGET_LOG
564 tristate "LOG target support"
565 default m if NETFILTER_ADVANCED=n
566 help
567 This option adds a `LOG' target, which allows you to create rules in
568 any iptables table which records the packet header to the syslog.
569
570 To compile it as a module, choose M here. If unsure, say N.
571
527config NETFILTER_XT_TARGET_MARK 572config NETFILTER_XT_TARGET_MARK
528 tristate '"MARK" target support' 573 tristate '"MARK" target support'
529 depends on NETFILTER_ADVANCED 574 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 40f4c3d636c5..4e7960cc7b97 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,7 @@
1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o 1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
2 2
3nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o 3nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
4nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
4nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o 5nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
5nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o 6nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
6 7
@@ -22,6 +23,7 @@ obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
22 23
23# netlink interface for nf_conntrack 24# netlink interface for nf_conntrack
24obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o 25obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
26obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
25 27
26# connection tracking helpers 28# connection tracking helpers
27nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o 29nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
@@ -57,7 +59,9 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
57obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o 59obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
58obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o 60obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
59obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o 61obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
62obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
60obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o 63obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
64obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
61obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 65obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
62obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o 66obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
63obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o 67obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index b4e8ff05b301..e19f3653db23 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -56,7 +56,7 @@ struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
56EXPORT_SYMBOL(nf_hooks); 56EXPORT_SYMBOL(nf_hooks);
57 57
58#if defined(CONFIG_JUMP_LABEL) 58#if defined(CONFIG_JUMP_LABEL)
59struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 59struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
60EXPORT_SYMBOL(nf_hooks_needed); 60EXPORT_SYMBOL(nf_hooks_needed);
61#endif 61#endif
62 62
@@ -77,7 +77,7 @@ int nf_register_hook(struct nf_hook_ops *reg)
77 list_add_rcu(&reg->list, elem->list.prev); 77 list_add_rcu(&reg->list, elem->list.prev);
78 mutex_unlock(&nf_hook_mutex); 78 mutex_unlock(&nf_hook_mutex);
79#if defined(CONFIG_JUMP_LABEL) 79#if defined(CONFIG_JUMP_LABEL)
80 jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); 80 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
81#endif 81#endif
82 return 0; 82 return 0;
83} 83}
@@ -89,7 +89,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
89 list_del_rcu(&reg->list); 89 list_del_rcu(&reg->list);
90 mutex_unlock(&nf_hook_mutex); 90 mutex_unlock(&nf_hook_mutex);
91#if defined(CONFIG_JUMP_LABEL) 91#if defined(CONFIG_JUMP_LABEL)
92 jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); 92 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
93#endif 93#endif
94 synchronize_net(); 94 synchronize_net();
95} 95}
@@ -290,12 +290,3 @@ void __init netfilter_init(void)
290 if (netfilter_log_init() < 0) 290 if (netfilter_log_init() < 0)
291 panic("cannot initialize nf_log"); 291 panic("cannot initialize nf_log");
292} 292}
293
294#ifdef CONFIG_SYSCTL
295struct ctl_path nf_net_netfilter_sysctl_path[] = {
296 { .procname = "net", },
297 { .procname = "netfilter", },
298 { }
299};
300EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path);
301#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index e3e73997c3be..7e1b061aeeba 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -109,8 +109,9 @@ bitmap_ip_list(const struct ip_set *set,
109 } else 109 } else
110 goto nla_put_failure; 110 goto nla_put_failure;
111 } 111 }
112 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, 112 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
113 htonl(map->first_ip + id * map->hosts)); 113 htonl(map->first_ip + id * map->hosts)))
114 goto nla_put_failure;
114 ipset_nest_end(skb, nested); 115 ipset_nest_end(skb, nested);
115 } 116 }
116 ipset_nest_end(skb, atd); 117 ipset_nest_end(skb, atd);
@@ -194,10 +195,11 @@ bitmap_ip_tlist(const struct ip_set *set,
194 } else 195 } else
195 goto nla_put_failure; 196 goto nla_put_failure;
196 } 197 }
197 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, 198 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
198 htonl(map->first_ip + id * map->hosts)); 199 htonl(map->first_ip + id * map->hosts)) ||
199 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 200 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
200 htonl(ip_set_timeout_get(members[id]))); 201 htonl(ip_set_timeout_get(members[id]))))
202 goto nla_put_failure;
201 ipset_nest_end(skb, nested); 203 ipset_nest_end(skb, nested);
202 } 204 }
203 ipset_nest_end(skb, adt); 205 ipset_nest_end(skb, adt);
@@ -334,15 +336,16 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
334 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 336 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
335 if (!nested) 337 if (!nested)
336 goto nla_put_failure; 338 goto nla_put_failure;
337 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); 339 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 340 nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
339 if (map->netmask != 32) 341 (map->netmask != 32 &&
340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); 342 nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) ||
341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 343 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
342 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 344 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
343 htonl(sizeof(*map) + map->memsize)); 345 htonl(sizeof(*map) + map->memsize)) ||
344 if (with_timeout(map->timeout)) 346 (with_timeout(map->timeout) &&
345 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 347 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
348 goto nla_put_failure;
346 ipset_nest_end(skb, nested); 349 ipset_nest_end(skb, nested);
347 350
348 return 0; 351 return 0;
@@ -442,7 +445,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
442 map->timeout = IPSET_NO_TIMEOUT; 445 map->timeout = IPSET_NO_TIMEOUT;
443 446
444 set->data = map; 447 set->data = map;
445 set->family = AF_INET; 448 set->family = NFPROTO_IPV4;
446 449
447 return true; 450 return true;
448} 451}
@@ -550,7 +553,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
550 .protocol = IPSET_PROTOCOL, 553 .protocol = IPSET_PROTOCOL,
551 .features = IPSET_TYPE_IP, 554 .features = IPSET_TYPE_IP,
552 .dimension = IPSET_DIM_ONE, 555 .dimension = IPSET_DIM_ONE,
553 .family = AF_INET, 556 .family = NFPROTO_IPV4,
554 .revision_min = 0, 557 .revision_min = 0,
555 .revision_max = 0, 558 .revision_max = 0,
556 .create = bitmap_ip_create, 559 .create = bitmap_ip_create,
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 56096f544978..d7eaf10edb6d 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -111,7 +111,7 @@ bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
111 return -EAGAIN; 111 return -EAGAIN;
112 case MAC_FILLED: 112 case MAC_FILLED:
113 return data->ether == NULL || 113 return data->ether == NULL ||
114 compare_ether_addr(data->ether, elem->ether) == 0; 114 ether_addr_equal(data->ether, elem->ether);
115 } 115 }
116 return 0; 116 return 0;
117} 117}
@@ -186,11 +186,12 @@ bitmap_ipmac_list(const struct ip_set *set,
186 } else 186 } else
187 goto nla_put_failure; 187 goto nla_put_failure;
188 } 188 }
189 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, 189 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
190 htonl(map->first_ip + id)); 190 htonl(map->first_ip + id)) ||
191 if (elem->match == MAC_FILLED) 191 (elem->match == MAC_FILLED &&
192 NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN, 192 nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
193 elem->ether); 193 elem->ether)))
194 goto nla_put_failure;
194 ipset_nest_end(skb, nested); 195 ipset_nest_end(skb, nested);
195 } 196 }
196 ipset_nest_end(skb, atd); 197 ipset_nest_end(skb, atd);
@@ -224,7 +225,7 @@ bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
224 return -EAGAIN; 225 return -EAGAIN;
225 case MAC_FILLED: 226 case MAC_FILLED:
226 return (data->ether == NULL || 227 return (data->ether == NULL ||
227 compare_ether_addr(data->ether, elem->ether) == 0) && 228 ether_addr_equal(data->ether, elem->ether)) &&
228 !bitmap_expired(map, data->id); 229 !bitmap_expired(map, data->id);
229 } 230 }
230 return 0; 231 return 0;
@@ -314,14 +315,16 @@ bitmap_ipmac_tlist(const struct ip_set *set,
314 } else 315 } else
315 goto nla_put_failure; 316 goto nla_put_failure;
316 } 317 }
317 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, 318 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
318 htonl(map->first_ip + id)); 319 htonl(map->first_ip + id)) ||
319 if (elem->match == MAC_FILLED) 320 (elem->match == MAC_FILLED &&
320 NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN, 321 nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
321 elem->ether); 322 elem->ether)))
323 goto nla_put_failure;
322 timeout = elem->match == MAC_UNSET ? elem->timeout 324 timeout = elem->match == MAC_UNSET ? elem->timeout
323 : ip_set_timeout_get(elem->timeout); 325 : ip_set_timeout_get(elem->timeout);
324 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)); 326 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
327 goto nla_put_failure;
325 ipset_nest_end(skb, nested); 328 ipset_nest_end(skb, nested);
326 } 329 }
327 ipset_nest_end(skb, atd); 330 ipset_nest_end(skb, atd);
@@ -438,14 +441,16 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
438 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 441 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
439 if (!nested) 442 if (!nested)
440 goto nla_put_failure; 443 goto nla_put_failure;
441 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); 444 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
442 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 445 nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
443 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 446 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
444 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 447 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
445 htonl(sizeof(*map) 448 htonl(sizeof(*map) +
446 + (map->last_ip - map->first_ip + 1) * map->dsize)); 449 ((map->last_ip - map->first_ip + 1) *
447 if (with_timeout(map->timeout)) 450 map->dsize))) ||
448 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 451 (with_timeout(map->timeout) &&
452 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
453 goto nla_put_failure;
449 ipset_nest_end(skb, nested); 454 ipset_nest_end(skb, nested);
450 455
451 return 0; 456 return 0;
@@ -543,7 +548,7 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
543 map->timeout = IPSET_NO_TIMEOUT; 548 map->timeout = IPSET_NO_TIMEOUT;
544 549
545 set->data = map; 550 set->data = map;
546 set->family = AF_INET; 551 set->family = NFPROTO_IPV4;
547 552
548 return true; 553 return true;
549} 554}
@@ -623,7 +628,7 @@ static struct ip_set_type bitmap_ipmac_type = {
623 .protocol = IPSET_PROTOCOL, 628 .protocol = IPSET_PROTOCOL,
624 .features = IPSET_TYPE_IP | IPSET_TYPE_MAC, 629 .features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
625 .dimension = IPSET_DIM_TWO, 630 .dimension = IPSET_DIM_TWO,
626 .family = AF_INET, 631 .family = NFPROTO_IPV4,
627 .revision_min = 0, 632 .revision_min = 0,
628 .revision_max = 0, 633 .revision_max = 0,
629 .create = bitmap_ipmac_create, 634 .create = bitmap_ipmac_create,
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 29ba93bb94be..b9f1fce7053b 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -96,8 +96,9 @@ bitmap_port_list(const struct ip_set *set,
96 } else 96 } else
97 goto nla_put_failure; 97 goto nla_put_failure;
98 } 98 }
99 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, 99 if (nla_put_net16(skb, IPSET_ATTR_PORT,
100 htons(map->first_port + id)); 100 htons(map->first_port + id)))
101 goto nla_put_failure;
101 ipset_nest_end(skb, nested); 102 ipset_nest_end(skb, nested);
102 } 103 }
103 ipset_nest_end(skb, atd); 104 ipset_nest_end(skb, atd);
@@ -183,10 +184,11 @@ bitmap_port_tlist(const struct ip_set *set,
183 } else 184 } else
184 goto nla_put_failure; 185 goto nla_put_failure;
185 } 186 }
186 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, 187 if (nla_put_net16(skb, IPSET_ATTR_PORT,
187 htons(map->first_port + id)); 188 htons(map->first_port + id)) ||
188 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 189 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
189 htonl(ip_set_timeout_get(members[id]))); 190 htonl(ip_set_timeout_get(members[id]))))
191 goto nla_put_failure;
190 ipset_nest_end(skb, nested); 192 ipset_nest_end(skb, nested);
191 } 193 }
192 ipset_nest_end(skb, adt); 194 ipset_nest_end(skb, adt);
@@ -320,13 +322,14 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
320 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 322 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
321 if (!nested) 323 if (!nested)
322 goto nla_put_failure; 324 goto nla_put_failure;
323 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); 325 if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
324 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); 326 nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) ||
325 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 327 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
326 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 328 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
327 htonl(sizeof(*map) + map->memsize)); 329 htonl(sizeof(*map) + map->memsize)) ||
328 if (with_timeout(map->timeout)) 330 (with_timeout(map->timeout) &&
329 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 331 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
332 goto nla_put_failure;
330 ipset_nest_end(skb, nested); 333 ipset_nest_end(skb, nested);
331 334
332 return 0; 335 return 0;
@@ -422,7 +425,7 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
422 map->timeout = IPSET_NO_TIMEOUT; 425 map->timeout = IPSET_NO_TIMEOUT;
423 426
424 set->data = map; 427 set->data = map;
425 set->family = AF_UNSPEC; 428 set->family = NFPROTO_UNSPEC;
426 429
427 return true; 430 return true;
428} 431}
@@ -483,7 +486,7 @@ static struct ip_set_type bitmap_port_type = {
483 .protocol = IPSET_PROTOCOL, 486 .protocol = IPSET_PROTOCOL,
484 .features = IPSET_TYPE_PORT, 487 .features = IPSET_TYPE_PORT,
485 .dimension = IPSET_DIM_ONE, 488 .dimension = IPSET_DIM_ONE,
486 .family = AF_UNSPEC, 489 .family = NFPROTO_UNSPEC,
487 .revision_min = 0, 490 .revision_min = 0,
488 .revision_max = 0, 491 .revision_max = 0,
489 .create = bitmap_port_create, 492 .create = bitmap_port_create,
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 32dbf0fa89db..819c342f5b30 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -69,7 +69,7 @@ find_set_type(const char *name, u8 family, u8 revision)
69 69
70 list_for_each_entry_rcu(type, &ip_set_type_list, list) 70 list_for_each_entry_rcu(type, &ip_set_type_list, list)
71 if (STREQ(type->name, name) && 71 if (STREQ(type->name, name) &&
72 (type->family == family || type->family == AF_UNSPEC) && 72 (type->family == family || type->family == NFPROTO_UNSPEC) &&
73 revision >= type->revision_min && 73 revision >= type->revision_min &&
74 revision <= type->revision_max) 74 revision <= type->revision_max)
75 return type; 75 return type;
@@ -149,7 +149,7 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
149 rcu_read_lock(); 149 rcu_read_lock();
150 list_for_each_entry_rcu(type, &ip_set_type_list, list) 150 list_for_each_entry_rcu(type, &ip_set_type_list, list)
151 if (STREQ(type->name, name) && 151 if (STREQ(type->name, name) &&
152 (type->family == family || type->family == AF_UNSPEC)) { 152 (type->family == family || type->family == NFPROTO_UNSPEC)) {
153 found = true; 153 found = true;
154 if (type->revision_min < *min) 154 if (type->revision_min < *min)
155 *min = type->revision_min; 155 *min = type->revision_min;
@@ -164,8 +164,8 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
164 __find_set_type_minmax(name, family, min, max, true); 164 __find_set_type_minmax(name, family, min, max, true);
165} 165}
166 166
167#define family_name(f) ((f) == AF_INET ? "inet" : \ 167#define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
168 (f) == AF_INET6 ? "inet6" : "any") 168 (f) == NFPROTO_IPV6 ? "inet6" : "any")
169 169
170/* Register a set type structure. The type is identified by 170/* Register a set type structure. The type is identified by
171 * the unique triple of name, family and revision. 171 * the unique triple of name, family and revision.
@@ -354,7 +354,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
354 pr_debug("set %s, index %u\n", set->name, index); 354 pr_debug("set %s, index %u\n", set->name, index);
355 355
356 if (opt->dim < set->type->dimension || 356 if (opt->dim < set->type->dimension ||
357 !(opt->family == set->family || set->family == AF_UNSPEC)) 357 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
358 return 0; 358 return 0;
359 359
360 read_lock_bh(&set->lock); 360 read_lock_bh(&set->lock);
@@ -387,7 +387,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
387 pr_debug("set %s, index %u\n", set->name, index); 387 pr_debug("set %s, index %u\n", set->name, index);
388 388
389 if (opt->dim < set->type->dimension || 389 if (opt->dim < set->type->dimension ||
390 !(opt->family == set->family || set->family == AF_UNSPEC)) 390 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
391 return 0; 391 return 0;
392 392
393 write_lock_bh(&set->lock); 393 write_lock_bh(&set->lock);
@@ -410,7 +410,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
410 pr_debug("set %s, index %u\n", set->name, index); 410 pr_debug("set %s, index %u\n", set->name, index);
411 411
412 if (opt->dim < set->type->dimension || 412 if (opt->dim < set->type->dimension ||
413 !(opt->family == set->family || set->family == AF_UNSPEC)) 413 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
414 return 0; 414 return 0;
415 415
416 write_lock_bh(&set->lock); 416 write_lock_bh(&set->lock);
@@ -575,7 +575,7 @@ start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
575 return NULL; 575 return NULL;
576 576
577 nfmsg = nlmsg_data(nlh); 577 nfmsg = nlmsg_data(nlh);
578 nfmsg->nfgen_family = AF_INET; 578 nfmsg->nfgen_family = NFPROTO_IPV4;
579 nfmsg->version = NFNETLINK_V0; 579 nfmsg->version = NFNETLINK_V0;
580 nfmsg->res_id = 0; 580 nfmsg->res_id = 0;
581 581
@@ -1092,19 +1092,21 @@ dump_last:
1092 ret = -EMSGSIZE; 1092 ret = -EMSGSIZE;
1093 goto release_refcount; 1093 goto release_refcount;
1094 } 1094 }
1095 NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1095 if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
1096 NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name); 1096 nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
1097 goto nla_put_failure;
1097 if (dump_flags & IPSET_FLAG_LIST_SETNAME) 1098 if (dump_flags & IPSET_FLAG_LIST_SETNAME)
1098 goto next_set; 1099 goto next_set;
1099 switch (cb->args[2]) { 1100 switch (cb->args[2]) {
1100 case 0: 1101 case 0:
1101 /* Core header data */ 1102 /* Core header data */
1102 NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME, 1103 if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
1103 set->type->name); 1104 set->type->name) ||
1104 NLA_PUT_U8(skb, IPSET_ATTR_FAMILY, 1105 nla_put_u8(skb, IPSET_ATTR_FAMILY,
1105 set->family); 1106 set->family) ||
1106 NLA_PUT_U8(skb, IPSET_ATTR_REVISION, 1107 nla_put_u8(skb, IPSET_ATTR_REVISION,
1107 set->revision); 1108 set->revision))
1109 goto nla_put_failure;
1108 ret = set->variant->head(set, skb); 1110 ret = set->variant->head(set, skb);
1109 if (ret < 0) 1111 if (ret < 0)
1110 goto release_refcount; 1112 goto release_refcount;
@@ -1162,9 +1164,13 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
1162 if (unlikely(protocol_failed(attr))) 1164 if (unlikely(protocol_failed(attr)))
1163 return -IPSET_ERR_PROTOCOL; 1165 return -IPSET_ERR_PROTOCOL;
1164 1166
1165 return netlink_dump_start(ctnl, skb, nlh, 1167 {
1166 ip_set_dump_start, 1168 struct netlink_dump_control c = {
1167 ip_set_dump_done, 0); 1169 .dump = ip_set_dump_start,
1170 .done = ip_set_dump_done,
1171 };
1172 return netlink_dump_start(ctnl, skb, nlh, &c);
1173 }
1168} 1174}
1169 1175
1170/* Add, del and test */ 1176/* Add, del and test */
@@ -1406,11 +1412,12 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
1406 IPSET_CMD_HEADER); 1412 IPSET_CMD_HEADER);
1407 if (!nlh2) 1413 if (!nlh2)
1408 goto nlmsg_failure; 1414 goto nlmsg_failure;
1409 NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1415 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
1410 NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name); 1416 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
1411 NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name); 1417 nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
1412 NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family); 1418 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
1413 NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision); 1419 nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
1420 goto nla_put_failure;
1414 nlmsg_end(skb2, nlh2); 1421 nlmsg_end(skb2, nlh2);
1415 1422
1416 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1423 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1465,11 +1472,12 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
1465 IPSET_CMD_TYPE); 1472 IPSET_CMD_TYPE);
1466 if (!nlh2) 1473 if (!nlh2)
1467 goto nlmsg_failure; 1474 goto nlmsg_failure;
1468 NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1475 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
1469 NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename); 1476 nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
1470 NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family); 1477 nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
1471 NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max); 1478 nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
1472 NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min); 1479 nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
1480 goto nla_put_failure;
1473 nlmsg_end(skb2, nlh2); 1481 nlmsg_end(skb2, nlh2);
1474 1482
1475 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len); 1483 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
@@ -1513,7 +1521,8 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
1513 IPSET_CMD_PROTOCOL); 1521 IPSET_CMD_PROTOCOL);
1514 if (!nlh2) 1522 if (!nlh2)
1515 goto nlmsg_failure; 1523 goto nlmsg_failure;
1516 NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1524 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
1525 goto nla_put_failure;
1517 nlmsg_end(skb2, nlh2); 1526 nlmsg_end(skb2, nlh2);
1518 1527
1519 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1528 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1609,7 +1618,7 @@ static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
1609static int 1618static int
1610ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) 1619ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1611{ 1620{
1612 unsigned *op; 1621 unsigned int *op;
1613 void *data; 1622 void *data;
1614 int copylen = *len, ret = 0; 1623 int copylen = *len, ret = 0;
1615 1624
@@ -1617,7 +1626,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1617 return -EPERM; 1626 return -EPERM;
1618 if (optval != SO_IP_SET) 1627 if (optval != SO_IP_SET)
1619 return -EBADF; 1628 return -EBADF;
1620 if (*len < sizeof(unsigned)) 1629 if (*len < sizeof(unsigned int))
1621 return -EINVAL; 1630 return -EINVAL;
1622 1631
1623 data = vmalloc(*len); 1632 data = vmalloc(*len);
@@ -1627,7 +1636,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1627 ret = -EFAULT; 1636 ret = -EFAULT;
1628 goto done; 1637 goto done;
1629 } 1638 }
1630 op = (unsigned *) data; 1639 op = (unsigned int *) data;
1631 1640
1632 if (*op < IP_SET_OP_VERSION) { 1641 if (*op < IP_SET_OP_VERSION) {
1633 /* Check the version at the beginning of operations */ 1642 /* Check the version at the beginning of operations */
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 1f03556666f4..6fdf88ae2353 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -136,10 +136,10 @@ ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
136 u8 proto; 136 u8 proto;
137 137
138 switch (pf) { 138 switch (pf) {
139 case AF_INET: 139 case NFPROTO_IPV4:
140 ret = ip_set_get_ip4_port(skb, src, port, &proto); 140 ret = ip_set_get_ip4_port(skb, src, port, &proto);
141 break; 141 break;
142 case AF_INET6: 142 case NFPROTO_IPV6:
143 ret = ip_set_get_ip6_port(skb, src, port, &proto); 143 ret = ip_set_get_ip6_port(skb, src, port, &proto);
144 break; 144 break;
145 default: 145 default:
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 4015fcaf87bc..a68dbd4f1e4e 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -81,7 +81,8 @@ hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
81static inline bool 81static inline bool
82hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) 82hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
83{ 83{
84 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 84 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip))
85 goto nla_put_failure;
85 return 0; 86 return 0;
86 87
87nla_put_failure: 88nla_put_failure:
@@ -94,9 +95,10 @@ hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
94 const struct hash_ip4_telem *tdata = 95 const struct hash_ip4_telem *tdata =
95 (const struct hash_ip4_telem *)data; 96 (const struct hash_ip4_telem *)data;
96 97
97 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 98 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
98 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 99 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
99 htonl(ip_set_timeout_get(tdata->timeout))); 100 htonl(ip_set_timeout_get(tdata->timeout))))
101 goto nla_put_failure;
100 102
101 return 0; 103 return 0;
102 104
@@ -262,7 +264,8 @@ ip6_netmask(union nf_inet_addr *ip, u8 prefix)
262static bool 264static bool
263hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) 265hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
264{ 266{
265 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 267 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6))
268 goto nla_put_failure;
266 return 0; 269 return 0;
267 270
268nla_put_failure: 271nla_put_failure:
@@ -275,9 +278,10 @@ hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
275 const struct hash_ip6_telem *e = 278 const struct hash_ip6_telem *e =
276 (const struct hash_ip6_telem *)data; 279 (const struct hash_ip6_telem *)data;
277 280
278 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 281 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
279 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 282 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
280 htonl(ip_set_timeout_get(e->timeout))); 283 htonl(ip_set_timeout_get(e->timeout))))
284 goto nla_put_failure;
281 return 0; 285 return 0;
282 286
283nla_put_failure: 287nla_put_failure:
@@ -364,13 +368,14 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
364{ 368{
365 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 369 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
366 u8 netmask, hbits; 370 u8 netmask, hbits;
371 size_t hsize;
367 struct ip_set_hash *h; 372 struct ip_set_hash *h;
368 373
369 if (!(set->family == AF_INET || set->family == AF_INET6)) 374 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
370 return -IPSET_ERR_INVALID_FAMILY; 375 return -IPSET_ERR_INVALID_FAMILY;
371 netmask = set->family == AF_INET ? 32 : 128; 376 netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
372 pr_debug("Create set %s with family %s\n", 377 pr_debug("Create set %s with family %s\n",
373 set->name, set->family == AF_INET ? "inet" : "inet6"); 378 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
374 379
375 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 380 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
376 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || 381 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
@@ -389,8 +394,8 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
389 if (tb[IPSET_ATTR_NETMASK]) { 394 if (tb[IPSET_ATTR_NETMASK]) {
390 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); 395 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
391 396
392 if ((set->family == AF_INET && netmask > 32) || 397 if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
393 (set->family == AF_INET6 && netmask > 128) || 398 (set->family == NFPROTO_IPV6 && netmask > 128) ||
394 netmask == 0) 399 netmask == 0)
395 return -IPSET_ERR_INVALID_NETMASK; 400 return -IPSET_ERR_INVALID_NETMASK;
396 } 401 }
@@ -405,9 +410,12 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
405 h->timeout = IPSET_NO_TIMEOUT; 410 h->timeout = IPSET_NO_TIMEOUT;
406 411
407 hbits = htable_bits(hashsize); 412 hbits = htable_bits(hashsize);
408 h->table = ip_set_alloc( 413 hsize = htable_size(hbits);
409 sizeof(struct htable) 414 if (hsize == 0) {
410 + jhash_size(hbits) * sizeof(struct hbucket)); 415 kfree(h);
416 return -ENOMEM;
417 }
418 h->table = ip_set_alloc(hsize);
411 if (!h->table) { 419 if (!h->table) {
412 kfree(h); 420 kfree(h);
413 return -ENOMEM; 421 return -ENOMEM;
@@ -419,15 +427,15 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
419 if (tb[IPSET_ATTR_TIMEOUT]) { 427 if (tb[IPSET_ATTR_TIMEOUT]) {
420 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 428 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
421 429
422 set->variant = set->family == AF_INET 430 set->variant = set->family == NFPROTO_IPV4
423 ? &hash_ip4_tvariant : &hash_ip6_tvariant; 431 ? &hash_ip4_tvariant : &hash_ip6_tvariant;
424 432
425 if (set->family == AF_INET) 433 if (set->family == NFPROTO_IPV4)
426 hash_ip4_gc_init(set); 434 hash_ip4_gc_init(set);
427 else 435 else
428 hash_ip6_gc_init(set); 436 hash_ip6_gc_init(set);
429 } else { 437 } else {
430 set->variant = set->family == AF_INET 438 set->variant = set->family == NFPROTO_IPV4
431 ? &hash_ip4_variant : &hash_ip6_variant; 439 ? &hash_ip4_variant : &hash_ip6_variant;
432 } 440 }
433 441
@@ -443,7 +451,7 @@ static struct ip_set_type hash_ip_type __read_mostly = {
443 .protocol = IPSET_PROTOCOL, 451 .protocol = IPSET_PROTOCOL,
444 .features = IPSET_TYPE_IP, 452 .features = IPSET_TYPE_IP,
445 .dimension = IPSET_DIM_ONE, 453 .dimension = IPSET_DIM_ONE,
446 .family = AF_UNSPEC, 454 .family = NFPROTO_UNSPEC,
447 .revision_min = 0, 455 .revision_min = 0,
448 .revision_max = 0, 456 .revision_max = 0,
449 .create = hash_ip_create, 457 .create = hash_ip_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 37d667e3f6f8..92722bb82eea 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -93,9 +93,10 @@ static bool
93hash_ipport4_data_list(struct sk_buff *skb, 93hash_ipport4_data_list(struct sk_buff *skb,
94 const struct hash_ipport4_elem *data) 94 const struct hash_ipport4_elem *data)
95{ 95{
96 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 96 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
97 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 97 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
98 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 98 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
99 goto nla_put_failure;
99 return 0; 100 return 0;
100 101
101nla_put_failure: 102nla_put_failure:
@@ -109,12 +110,12 @@ hash_ipport4_data_tlist(struct sk_buff *skb,
109 const struct hash_ipport4_telem *tdata = 110 const struct hash_ipport4_telem *tdata =
110 (const struct hash_ipport4_telem *)data; 111 (const struct hash_ipport4_telem *)data;
111 112
112 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 113 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
113 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 114 nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
114 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 115 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
115 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 116 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
116 htonl(ip_set_timeout_get(tdata->timeout))); 117 htonl(ip_set_timeout_get(tdata->timeout))))
117 118 goto nla_put_failure;
118 return 0; 119 return 0;
119 120
120nla_put_failure: 121nla_put_failure:
@@ -308,9 +309,10 @@ static bool
308hash_ipport6_data_list(struct sk_buff *skb, 309hash_ipport6_data_list(struct sk_buff *skb,
309 const struct hash_ipport6_elem *data) 310 const struct hash_ipport6_elem *data)
310{ 311{
311 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 312 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
312 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 313 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
313 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 314 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
315 goto nla_put_failure;
314 return 0; 316 return 0;
315 317
316nla_put_failure: 318nla_put_failure:
@@ -324,11 +326,12 @@ hash_ipport6_data_tlist(struct sk_buff *skb,
324 const struct hash_ipport6_telem *e = 326 const struct hash_ipport6_telem *e =
325 (const struct hash_ipport6_telem *)data; 327 (const struct hash_ipport6_telem *)data;
326 328
327 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 329 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
328 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 330 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
329 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 331 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
330 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 332 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
331 htonl(ip_set_timeout_get(e->timeout))); 333 htonl(ip_set_timeout_get(e->timeout))))
334 goto nla_put_failure;
332 return 0; 335 return 0;
333 336
334nla_put_failure: 337nla_put_failure:
@@ -449,8 +452,9 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
449 struct ip_set_hash *h; 452 struct ip_set_hash *h;
450 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 453 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
451 u8 hbits; 454 u8 hbits;
455 size_t hsize;
452 456
453 if (!(set->family == AF_INET || set->family == AF_INET6)) 457 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
454 return -IPSET_ERR_INVALID_FAMILY; 458 return -IPSET_ERR_INVALID_FAMILY;
455 459
456 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 460 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -476,9 +480,12 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
476 h->timeout = IPSET_NO_TIMEOUT; 480 h->timeout = IPSET_NO_TIMEOUT;
477 481
478 hbits = htable_bits(hashsize); 482 hbits = htable_bits(hashsize);
479 h->table = ip_set_alloc( 483 hsize = htable_size(hbits);
480 sizeof(struct htable) 484 if (hsize == 0) {
481 + jhash_size(hbits) * sizeof(struct hbucket)); 485 kfree(h);
486 return -ENOMEM;
487 }
488 h->table = ip_set_alloc(hsize);
482 if (!h->table) { 489 if (!h->table) {
483 kfree(h); 490 kfree(h);
484 return -ENOMEM; 491 return -ENOMEM;
@@ -490,15 +497,15 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
490 if (tb[IPSET_ATTR_TIMEOUT]) { 497 if (tb[IPSET_ATTR_TIMEOUT]) {
491 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 498 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
492 499
493 set->variant = set->family == AF_INET 500 set->variant = set->family == NFPROTO_IPV4
494 ? &hash_ipport4_tvariant : &hash_ipport6_tvariant; 501 ? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
495 502
496 if (set->family == AF_INET) 503 if (set->family == NFPROTO_IPV4)
497 hash_ipport4_gc_init(set); 504 hash_ipport4_gc_init(set);
498 else 505 else
499 hash_ipport6_gc_init(set); 506 hash_ipport6_gc_init(set);
500 } else { 507 } else {
501 set->variant = set->family == AF_INET 508 set->variant = set->family == NFPROTO_IPV4
502 ? &hash_ipport4_variant : &hash_ipport6_variant; 509 ? &hash_ipport4_variant : &hash_ipport6_variant;
503 } 510 }
504 511
@@ -514,7 +521,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
514 .protocol = IPSET_PROTOCOL, 521 .protocol = IPSET_PROTOCOL,
515 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 522 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
516 .dimension = IPSET_DIM_TWO, 523 .dimension = IPSET_DIM_TWO,
517 .family = AF_UNSPEC, 524 .family = NFPROTO_UNSPEC,
518 .revision_min = 0, 525 .revision_min = 0,
519 .revision_max = 1, /* SCTP and UDPLITE support added */ 526 .revision_max = 1, /* SCTP and UDPLITE support added */
520 .create = hash_ipport_create, 527 .create = hash_ipport_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index e69e2718fbe1..0637ce096def 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -94,10 +94,11 @@ static bool
94hash_ipportip4_data_list(struct sk_buff *skb, 94hash_ipportip4_data_list(struct sk_buff *skb,
95 const struct hash_ipportip4_elem *data) 95 const struct hash_ipportip4_elem *data)
96{ 96{
97 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 97 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
98 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); 98 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
99 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 99 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
100 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 100 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
101 goto nla_put_failure;
101 return 0; 102 return 0;
102 103
103nla_put_failure: 104nla_put_failure:
@@ -111,13 +112,13 @@ hash_ipportip4_data_tlist(struct sk_buff *skb,
111 const struct hash_ipportip4_telem *tdata = 112 const struct hash_ipportip4_telem *tdata =
112 (const struct hash_ipportip4_telem *)data; 113 (const struct hash_ipportip4_telem *)data;
113 114
114 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 115 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
115 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); 116 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
116 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 117 nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
117 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 118 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
118 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 119 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
119 htonl(ip_set_timeout_get(tdata->timeout))); 120 htonl(ip_set_timeout_get(tdata->timeout))))
120 121 goto nla_put_failure;
121 return 0; 122 return 0;
122 123
123nla_put_failure: 124nla_put_failure:
@@ -319,10 +320,11 @@ static bool
319hash_ipportip6_data_list(struct sk_buff *skb, 320hash_ipportip6_data_list(struct sk_buff *skb,
320 const struct hash_ipportip6_elem *data) 321 const struct hash_ipportip6_elem *data)
321{ 322{
322 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 323 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
323 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 324 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
324 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 325 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
325 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 326 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
327 goto nla_put_failure;
326 return 0; 328 return 0;
327 329
328nla_put_failure: 330nla_put_failure:
@@ -336,12 +338,13 @@ hash_ipportip6_data_tlist(struct sk_buff *skb,
336 const struct hash_ipportip6_telem *e = 338 const struct hash_ipportip6_telem *e =
337 (const struct hash_ipportip6_telem *)data; 339 (const struct hash_ipportip6_telem *)data;
338 340
339 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 341 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
340 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 342 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
341 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 343 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
342 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 344 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
343 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 345 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
344 htonl(ip_set_timeout_get(e->timeout))); 346 htonl(ip_set_timeout_get(e->timeout))))
347 goto nla_put_failure;
345 return 0; 348 return 0;
346 349
347nla_put_failure: 350nla_put_failure:
@@ -467,8 +470,9 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
467 struct ip_set_hash *h; 470 struct ip_set_hash *h;
468 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 471 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
469 u8 hbits; 472 u8 hbits;
473 size_t hsize;
470 474
471 if (!(set->family == AF_INET || set->family == AF_INET6)) 475 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
472 return -IPSET_ERR_INVALID_FAMILY; 476 return -IPSET_ERR_INVALID_FAMILY;
473 477
474 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 478 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -494,9 +498,12 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
494 h->timeout = IPSET_NO_TIMEOUT; 498 h->timeout = IPSET_NO_TIMEOUT;
495 499
496 hbits = htable_bits(hashsize); 500 hbits = htable_bits(hashsize);
497 h->table = ip_set_alloc( 501 hsize = htable_size(hbits);
498 sizeof(struct htable) 502 if (hsize == 0) {
499 + jhash_size(hbits) * sizeof(struct hbucket)); 503 kfree(h);
504 return -ENOMEM;
505 }
506 h->table = ip_set_alloc(hsize);
500 if (!h->table) { 507 if (!h->table) {
501 kfree(h); 508 kfree(h);
502 return -ENOMEM; 509 return -ENOMEM;
@@ -508,15 +515,15 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
508 if (tb[IPSET_ATTR_TIMEOUT]) { 515 if (tb[IPSET_ATTR_TIMEOUT]) {
509 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 516 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
510 517
511 set->variant = set->family == AF_INET 518 set->variant = set->family == NFPROTO_IPV4
512 ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant; 519 ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
513 520
514 if (set->family == AF_INET) 521 if (set->family == NFPROTO_IPV4)
515 hash_ipportip4_gc_init(set); 522 hash_ipportip4_gc_init(set);
516 else 523 else
517 hash_ipportip6_gc_init(set); 524 hash_ipportip6_gc_init(set);
518 } else { 525 } else {
519 set->variant = set->family == AF_INET 526 set->variant = set->family == NFPROTO_IPV4
520 ? &hash_ipportip4_variant : &hash_ipportip6_variant; 527 ? &hash_ipportip4_variant : &hash_ipportip6_variant;
521 } 528 }
522 529
@@ -532,7 +539,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
532 .protocol = IPSET_PROTOCOL, 539 .protocol = IPSET_PROTOCOL,
533 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 540 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
534 .dimension = IPSET_DIM_THREE, 541 .dimension = IPSET_DIM_THREE,
535 .family = AF_UNSPEC, 542 .family = NFPROTO_UNSPEC,
536 .revision_min = 0, 543 .revision_min = 0,
537 .revision_max = 1, /* SCTP and UDPLITE support added */ 544 .revision_max = 1, /* SCTP and UDPLITE support added */
538 .create = hash_ipportip_create, 545 .create = hash_ipportip_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 64199b4e93c9..1ce21ca976e1 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -41,12 +41,19 @@ hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
41 41
42/* The type variant functions: IPv4 */ 42/* The type variant functions: IPv4 */
43 43
44/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
45 * However this way we have to store internally cidr - 1,
46 * dancing back and forth.
47 */
48#define IP_SET_HASH_WITH_NETS_PACKED
49
44/* Member elements without timeout */ 50/* Member elements without timeout */
45struct hash_ipportnet4_elem { 51struct hash_ipportnet4_elem {
46 __be32 ip; 52 __be32 ip;
47 __be32 ip2; 53 __be32 ip2;
48 __be16 port; 54 __be16 port;
49 u8 cidr; 55 u8 cidr:7;
56 u8 nomatch:1;
50 u8 proto; 57 u8 proto;
51}; 58};
52 59
@@ -55,7 +62,8 @@ struct hash_ipportnet4_telem {
55 __be32 ip; 62 __be32 ip;
56 __be32 ip2; 63 __be32 ip2;
57 __be16 port; 64 __be16 port;
58 u8 cidr; 65 u8 cidr:7;
66 u8 nomatch:1;
59 u8 proto; 67 u8 proto;
60 unsigned long timeout; 68 unsigned long timeout;
61}; 69};
@@ -86,10 +94,22 @@ hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
86} 94}
87 95
88static inline void 96static inline void
97hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
98{
99 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
100}
101
102static inline bool
103hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
104{
105 return !elem->nomatch;
106}
107
108static inline void
89hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) 109hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
90{ 110{
91 elem->ip2 &= ip_set_netmask(cidr); 111 elem->ip2 &= ip_set_netmask(cidr);
92 elem->cidr = cidr; 112 elem->cidr = cidr - 1;
93} 113}
94 114
95static inline void 115static inline void
@@ -102,11 +122,16 @@ static bool
102hash_ipportnet4_data_list(struct sk_buff *skb, 122hash_ipportnet4_data_list(struct sk_buff *skb,
103 const struct hash_ipportnet4_elem *data) 123 const struct hash_ipportnet4_elem *data)
104{ 124{
105 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 125 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
106 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); 126
107 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 127 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
108 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); 128 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
109 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 129 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
130 nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
131 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
132 (flags &&
133 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
134 goto nla_put_failure;
110 return 0; 135 return 0;
111 136
112nla_put_failure: 137nla_put_failure:
@@ -119,15 +144,18 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb,
119{ 144{
120 const struct hash_ipportnet4_telem *tdata = 145 const struct hash_ipportnet4_telem *tdata =
121 (const struct hash_ipportnet4_telem *)data; 146 (const struct hash_ipportnet4_telem *)data;
122 147 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
123 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 148
124 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); 149 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
125 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 150 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
126 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); 151 nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
127 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 152 nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
128 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 153 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
129 htonl(ip_set_timeout_get(tdata->timeout))); 154 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
130 155 htonl(ip_set_timeout_get(tdata->timeout))) ||
156 (flags &&
157 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
158 goto nla_put_failure;
131 return 0; 159 return 0;
132 160
133nla_put_failure: 161nla_put_failure:
@@ -158,13 +186,11 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
158 const struct ip_set_hash *h = set->data; 186 const struct ip_set_hash *h = set->data;
159 ipset_adtfn adtfn = set->variant->adt[adt]; 187 ipset_adtfn adtfn = set->variant->adt[adt];
160 struct hash_ipportnet4_elem data = { 188 struct hash_ipportnet4_elem data = {
161 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 189 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
162 }; 190 };
163 191
164 if (data.cidr == 0)
165 return -EINVAL;
166 if (adt == IPSET_TEST) 192 if (adt == IPSET_TEST)
167 data.cidr = HOST_MASK; 193 data.cidr = HOST_MASK - 1;
168 194
169 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, 195 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
170 &data.port, &data.proto)) 196 &data.port, &data.proto))
@@ -172,7 +198,7 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
172 198
173 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); 199 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
174 ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); 200 ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
175 data.ip2 &= ip_set_netmask(data.cidr); 201 data.ip2 &= ip_set_netmask(data.cidr + 1);
176 202
177 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); 203 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
178} 204}
@@ -183,17 +209,19 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
183{ 209{
184 const struct ip_set_hash *h = set->data; 210 const struct ip_set_hash *h = set->data;
185 ipset_adtfn adtfn = set->variant->adt[adt]; 211 ipset_adtfn adtfn = set->variant->adt[adt];
186 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; 212 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
187 u32 ip, ip_to = 0, p = 0, port, port_to; 213 u32 ip, ip_to = 0, p = 0, port, port_to;
188 u32 ip2_from = 0, ip2_to, ip2_last, ip2; 214 u32 ip2_from = 0, ip2_to, ip2_last, ip2;
189 u32 timeout = h->timeout; 215 u32 timeout = h->timeout;
190 bool with_ports = false; 216 bool with_ports = false;
217 u8 cidr;
191 int ret; 218 int ret;
192 219
193 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || 220 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
194 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 221 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
195 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 222 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
196 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 223 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
224 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
197 return -IPSET_ERR_PROTOCOL; 225 return -IPSET_ERR_PROTOCOL;
198 226
199 if (tb[IPSET_ATTR_LINENO]) 227 if (tb[IPSET_ATTR_LINENO])
@@ -208,9 +236,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
208 return ret; 236 return ret;
209 237
210 if (tb[IPSET_ATTR_CIDR2]) { 238 if (tb[IPSET_ATTR_CIDR2]) {
211 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); 239 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
212 if (!data.cidr) 240 if (!cidr || cidr > HOST_MASK)
213 return -IPSET_ERR_INVALID_CIDR; 241 return -IPSET_ERR_INVALID_CIDR;
242 data.cidr = cidr - 1;
214 } 243 }
215 244
216 if (tb[IPSET_ATTR_PORT]) 245 if (tb[IPSET_ATTR_PORT])
@@ -236,12 +265,18 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
236 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 265 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
237 } 266 }
238 267
268 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
269 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
270 if (cadt_flags & IPSET_FLAG_NOMATCH)
271 flags |= (cadt_flags << 16);
272 }
273
239 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; 274 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
240 if (adt == IPSET_TEST || 275 if (adt == IPSET_TEST ||
241 !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || 276 !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
242 tb[IPSET_ATTR_IP2_TO])) { 277 tb[IPSET_ATTR_IP2_TO])) {
243 data.ip = htonl(ip); 278 data.ip = htonl(ip);
244 data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr)); 279 data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr + 1));
245 ret = adtfn(set, &data, timeout, flags); 280 ret = adtfn(set, &data, timeout, flags);
246 return ip_set_eexist(ret, flags) ? 0 : ret; 281 return ip_set_eexist(ret, flags) ? 0 : ret;
247 } 282 }
@@ -275,7 +310,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
275 if (ip2_from + UINT_MAX == ip2_to) 310 if (ip2_from + UINT_MAX == ip2_to)
276 return -IPSET_ERR_HASH_RANGE; 311 return -IPSET_ERR_HASH_RANGE;
277 } else { 312 } else {
278 ip_set_mask_from_to(ip2_from, ip2_to, data.cidr); 313 ip_set_mask_from_to(ip2_from, ip2_to, data.cidr + 1);
279 } 314 }
280 315
281 if (retried) 316 if (retried)
@@ -290,7 +325,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
290 while (!after(ip2, ip2_to)) { 325 while (!after(ip2, ip2_to)) {
291 data.ip2 = htonl(ip2); 326 data.ip2 = htonl(ip2);
292 ip2_last = ip_set_range_to_cidr(ip2, ip2_to, 327 ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
293 &data.cidr); 328 &cidr);
329 data.cidr = cidr - 1;
294 ret = adtfn(set, &data, timeout, flags); 330 ret = adtfn(set, &data, timeout, flags);
295 331
296 if (ret && !ip_set_eexist(ret, flags)) 332 if (ret && !ip_set_eexist(ret, flags))
@@ -321,7 +357,8 @@ struct hash_ipportnet6_elem {
321 union nf_inet_addr ip; 357 union nf_inet_addr ip;
322 union nf_inet_addr ip2; 358 union nf_inet_addr ip2;
323 __be16 port; 359 __be16 port;
324 u8 cidr; 360 u8 cidr:7;
361 u8 nomatch:1;
325 u8 proto; 362 u8 proto;
326}; 363};
327 364
@@ -329,7 +366,8 @@ struct hash_ipportnet6_telem {
329 union nf_inet_addr ip; 366 union nf_inet_addr ip;
330 union nf_inet_addr ip2; 367 union nf_inet_addr ip2;
331 __be16 port; 368 __be16 port;
332 u8 cidr; 369 u8 cidr:7;
370 u8 nomatch:1;
333 u8 proto; 371 u8 proto;
334 unsigned long timeout; 372 unsigned long timeout;
335}; 373};
@@ -360,6 +398,18 @@ hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
360} 398}
361 399
362static inline void 400static inline void
401hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
402{
403 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
404}
405
406static inline bool
407hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
408{
409 return !elem->nomatch;
410}
411
412static inline void
363hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem) 413hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
364{ 414{
365 elem->proto = 0; 415 elem->proto = 0;
@@ -378,18 +428,23 @@ static inline void
378hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) 428hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
379{ 429{
380 ip6_netmask(&elem->ip2, cidr); 430 ip6_netmask(&elem->ip2, cidr);
381 elem->cidr = cidr; 431 elem->cidr = cidr - 1;
382} 432}
383 433
384static bool 434static bool
385hash_ipportnet6_data_list(struct sk_buff *skb, 435hash_ipportnet6_data_list(struct sk_buff *skb,
386 const struct hash_ipportnet6_elem *data) 436 const struct hash_ipportnet6_elem *data)
387{ 437{
388 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 438 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
389 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 439
390 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 440 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
391 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); 441 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
392 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 442 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
443 nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
444 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
445 (flags &&
446 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
447 goto nla_put_failure;
393 return 0; 448 return 0;
394 449
395nla_put_failure: 450nla_put_failure:
@@ -402,14 +457,18 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb,
402{ 457{
403 const struct hash_ipportnet6_telem *e = 458 const struct hash_ipportnet6_telem *e =
404 (const struct hash_ipportnet6_telem *)data; 459 (const struct hash_ipportnet6_telem *)data;
405 460 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
406 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 461
407 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 462 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
408 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 463 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
409 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); 464 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
410 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 465 nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
411 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 466 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
412 htonl(ip_set_timeout_get(e->timeout))); 467 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
468 htonl(ip_set_timeout_get(e->timeout))) ||
469 (flags &&
470 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
471 goto nla_put_failure;
413 return 0; 472 return 0;
414 473
415nla_put_failure: 474nla_put_failure:
@@ -438,13 +497,11 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
438 const struct ip_set_hash *h = set->data; 497 const struct ip_set_hash *h = set->data;
439 ipset_adtfn adtfn = set->variant->adt[adt]; 498 ipset_adtfn adtfn = set->variant->adt[adt];
440 struct hash_ipportnet6_elem data = { 499 struct hash_ipportnet6_elem data = {
441 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 500 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
442 }; 501 };
443 502
444 if (data.cidr == 0)
445 return -EINVAL;
446 if (adt == IPSET_TEST) 503 if (adt == IPSET_TEST)
447 data.cidr = HOST_MASK; 504 data.cidr = HOST_MASK - 1;
448 505
449 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, 506 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
450 &data.port, &data.proto)) 507 &data.port, &data.proto))
@@ -452,7 +509,7 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
452 509
453 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 510 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
454 ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); 511 ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
455 ip6_netmask(&data.ip2, data.cidr); 512 ip6_netmask(&data.ip2, data.cidr + 1);
456 513
457 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); 514 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
458} 515}
@@ -463,16 +520,18 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
463{ 520{
464 const struct ip_set_hash *h = set->data; 521 const struct ip_set_hash *h = set->data;
465 ipset_adtfn adtfn = set->variant->adt[adt]; 522 ipset_adtfn adtfn = set->variant->adt[adt];
466 struct hash_ipportnet6_elem data = { .cidr = HOST_MASK }; 523 struct hash_ipportnet6_elem data = { .cidr = HOST_MASK - 1 };
467 u32 port, port_to; 524 u32 port, port_to;
468 u32 timeout = h->timeout; 525 u32 timeout = h->timeout;
469 bool with_ports = false; 526 bool with_ports = false;
527 u8 cidr;
470 int ret; 528 int ret;
471 529
472 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || 530 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
473 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 531 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
474 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 532 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
475 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || 533 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
534 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
476 tb[IPSET_ATTR_IP_TO] || 535 tb[IPSET_ATTR_IP_TO] ||
477 tb[IPSET_ATTR_CIDR])) 536 tb[IPSET_ATTR_CIDR]))
478 return -IPSET_ERR_PROTOCOL; 537 return -IPSET_ERR_PROTOCOL;
@@ -490,13 +549,14 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
490 if (ret) 549 if (ret)
491 return ret; 550 return ret;
492 551
493 if (tb[IPSET_ATTR_CIDR2]) 552 if (tb[IPSET_ATTR_CIDR2]) {
494 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); 553 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
495 554 if (!cidr || cidr > HOST_MASK)
496 if (!data.cidr) 555 return -IPSET_ERR_INVALID_CIDR;
497 return -IPSET_ERR_INVALID_CIDR; 556 data.cidr = cidr - 1;
557 }
498 558
499 ip6_netmask(&data.ip2, data.cidr); 559 ip6_netmask(&data.ip2, data.cidr + 1);
500 560
501 if (tb[IPSET_ATTR_PORT]) 561 if (tb[IPSET_ATTR_PORT])
502 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); 562 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -521,6 +581,12 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
521 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 581 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
522 } 582 }
523 583
584 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
585 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
586 if (cadt_flags & IPSET_FLAG_NOMATCH)
587 flags |= (cadt_flags << 16);
588 }
589
524 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 590 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
525 ret = adtfn(set, &data, timeout, flags); 591 ret = adtfn(set, &data, timeout, flags);
526 return ip_set_eexist(ret, flags) ? 0 : ret; 592 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -553,8 +619,9 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
553 struct ip_set_hash *h; 619 struct ip_set_hash *h;
554 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 620 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
555 u8 hbits; 621 u8 hbits;
622 size_t hsize;
556 623
557 if (!(set->family == AF_INET || set->family == AF_INET6)) 624 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
558 return -IPSET_ERR_INVALID_FAMILY; 625 return -IPSET_ERR_INVALID_FAMILY;
559 626
560 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 627 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -573,7 +640,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
573 640
574 h = kzalloc(sizeof(*h) 641 h = kzalloc(sizeof(*h)
575 + sizeof(struct ip_set_hash_nets) 642 + sizeof(struct ip_set_hash_nets)
576 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); 643 * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
577 if (!h) 644 if (!h)
578 return -ENOMEM; 645 return -ENOMEM;
579 646
@@ -582,9 +649,12 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
582 h->timeout = IPSET_NO_TIMEOUT; 649 h->timeout = IPSET_NO_TIMEOUT;
583 650
584 hbits = htable_bits(hashsize); 651 hbits = htable_bits(hashsize);
585 h->table = ip_set_alloc( 652 hsize = htable_size(hbits);
586 sizeof(struct htable) 653 if (hsize == 0) {
587 + jhash_size(hbits) * sizeof(struct hbucket)); 654 kfree(h);
655 return -ENOMEM;
656 }
657 h->table = ip_set_alloc(hsize);
588 if (!h->table) { 658 if (!h->table) {
589 kfree(h); 659 kfree(h);
590 return -ENOMEM; 660 return -ENOMEM;
@@ -596,16 +666,16 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
596 if (tb[IPSET_ATTR_TIMEOUT]) { 666 if (tb[IPSET_ATTR_TIMEOUT]) {
597 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 667 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
598 668
599 set->variant = set->family == AF_INET 669 set->variant = set->family == NFPROTO_IPV4
600 ? &hash_ipportnet4_tvariant 670 ? &hash_ipportnet4_tvariant
601 : &hash_ipportnet6_tvariant; 671 : &hash_ipportnet6_tvariant;
602 672
603 if (set->family == AF_INET) 673 if (set->family == NFPROTO_IPV4)
604 hash_ipportnet4_gc_init(set); 674 hash_ipportnet4_gc_init(set);
605 else 675 else
606 hash_ipportnet6_gc_init(set); 676 hash_ipportnet6_gc_init(set);
607 } else { 677 } else {
608 set->variant = set->family == AF_INET 678 set->variant = set->family == NFPROTO_IPV4
609 ? &hash_ipportnet4_variant : &hash_ipportnet6_variant; 679 ? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
610 } 680 }
611 681
@@ -621,10 +691,11 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
621 .protocol = IPSET_PROTOCOL, 691 .protocol = IPSET_PROTOCOL,
622 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 692 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
623 .dimension = IPSET_DIM_THREE, 693 .dimension = IPSET_DIM_THREE,
624 .family = AF_UNSPEC, 694 .family = NFPROTO_UNSPEC,
625 .revision_min = 0, 695 .revision_min = 0,
626 /* 1 SCTP and UDPLITE support added */ 696 /* 1 SCTP and UDPLITE support added */
627 .revision_max = 2, /* Range as input support for IPv4 added */ 697 /* 2 Range as input support for IPv4 added */
698 .revision_max = 3, /* nomatch flag support added */
628 .create = hash_ipportnet_create, 699 .create = hash_ipportnet_create,
629 .create_policy = { 700 .create_policy = {
630 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 701 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -643,6 +714,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
643 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 714 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
644 [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, 715 [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
645 [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, 716 [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
717 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
646 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 718 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
647 [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, 719 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
648 }, 720 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 28988196775e..c57a6a09906d 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -43,7 +43,7 @@ hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
43struct hash_net4_elem { 43struct hash_net4_elem {
44 __be32 ip; 44 __be32 ip;
45 u16 padding0; 45 u16 padding0;
46 u8 padding1; 46 u8 nomatch;
47 u8 cidr; 47 u8 cidr;
48}; 48};
49 49
@@ -51,7 +51,7 @@ struct hash_net4_elem {
51struct hash_net4_telem { 51struct hash_net4_telem {
52 __be32 ip; 52 __be32 ip;
53 u16 padding0; 53 u16 padding0;
54 u8 padding1; 54 u8 nomatch;
55 u8 cidr; 55 u8 cidr;
56 unsigned long timeout; 56 unsigned long timeout;
57}; 57};
@@ -61,7 +61,8 @@ hash_net4_data_equal(const struct hash_net4_elem *ip1,
61 const struct hash_net4_elem *ip2, 61 const struct hash_net4_elem *ip2,
62 u32 *multi) 62 u32 *multi)
63{ 63{
64 return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr; 64 return ip1->ip == ip2->ip &&
65 ip1->cidr == ip2->cidr;
65} 66}
66 67
67static inline bool 68static inline bool
@@ -76,6 +77,19 @@ hash_net4_data_copy(struct hash_net4_elem *dst,
76{ 77{
77 dst->ip = src->ip; 78 dst->ip = src->ip;
78 dst->cidr = src->cidr; 79 dst->cidr = src->cidr;
80 dst->nomatch = src->nomatch;
81}
82
83static inline void
84hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
85{
86 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
87}
88
89static inline bool
90hash_net4_data_match(const struct hash_net4_elem *elem)
91{
92 return !elem->nomatch;
79} 93}
80 94
81static inline void 95static inline void
@@ -95,8 +109,13 @@ hash_net4_data_zero_out(struct hash_net4_elem *elem)
95static bool 109static bool
96hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data) 110hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
97{ 111{
98 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 112 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
99 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 113
114 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
115 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
116 (flags &&
117 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
118 goto nla_put_failure;
100 return 0; 119 return 0;
101 120
102nla_put_failure: 121nla_put_failure:
@@ -108,12 +127,15 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
108{ 127{
109 const struct hash_net4_telem *tdata = 128 const struct hash_net4_telem *tdata =
110 (const struct hash_net4_telem *)data; 129 (const struct hash_net4_telem *)data;
111 130 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
112 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 131
113 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr); 132 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
114 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 133 nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) ||
115 htonl(ip_set_timeout_get(tdata->timeout))); 134 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
116 135 htonl(ip_set_timeout_get(tdata->timeout))) ||
136 (flags &&
137 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
138 goto nla_put_failure;
117 return 0; 139 return 0;
118 140
119nla_put_failure: 141nla_put_failure:
@@ -167,7 +189,8 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
167 int ret; 189 int ret;
168 190
169 if (unlikely(!tb[IPSET_ATTR_IP] || 191 if (unlikely(!tb[IPSET_ATTR_IP] ||
170 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 192 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
193 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
171 return -IPSET_ERR_PROTOCOL; 194 return -IPSET_ERR_PROTOCOL;
172 195
173 if (tb[IPSET_ATTR_LINENO]) 196 if (tb[IPSET_ATTR_LINENO])
@@ -179,7 +202,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
179 202
180 if (tb[IPSET_ATTR_CIDR]) { 203 if (tb[IPSET_ATTR_CIDR]) {
181 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 204 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
182 if (!data.cidr) 205 if (!data.cidr || data.cidr > HOST_MASK)
183 return -IPSET_ERR_INVALID_CIDR; 206 return -IPSET_ERR_INVALID_CIDR;
184 } 207 }
185 208
@@ -189,6 +212,12 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
189 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 212 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
190 } 213 }
191 214
215 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
216 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
217 if (cadt_flags & IPSET_FLAG_NOMATCH)
218 flags |= (cadt_flags << 16);
219 }
220
192 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 221 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
193 data.ip = htonl(ip & ip_set_hostmask(data.cidr)); 222 data.ip = htonl(ip & ip_set_hostmask(data.cidr));
194 ret = adtfn(set, &data, timeout, flags); 223 ret = adtfn(set, &data, timeout, flags);
@@ -236,14 +265,14 @@ hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
236struct hash_net6_elem { 265struct hash_net6_elem {
237 union nf_inet_addr ip; 266 union nf_inet_addr ip;
238 u16 padding0; 267 u16 padding0;
239 u8 padding1; 268 u8 nomatch;
240 u8 cidr; 269 u8 cidr;
241}; 270};
242 271
243struct hash_net6_telem { 272struct hash_net6_telem {
244 union nf_inet_addr ip; 273 union nf_inet_addr ip;
245 u16 padding0; 274 u16 padding0;
246 u8 padding1; 275 u8 nomatch;
247 u8 cidr; 276 u8 cidr;
248 unsigned long timeout; 277 unsigned long timeout;
249}; 278};
@@ -269,6 +298,19 @@ hash_net6_data_copy(struct hash_net6_elem *dst,
269{ 298{
270 dst->ip.in6 = src->ip.in6; 299 dst->ip.in6 = src->ip.in6;
271 dst->cidr = src->cidr; 300 dst->cidr = src->cidr;
301 dst->nomatch = src->nomatch;
302}
303
304static inline void
305hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
306{
307 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
308}
309
310static inline bool
311hash_net6_data_match(const struct hash_net6_elem *elem)
312{
313 return !elem->nomatch;
272} 314}
273 315
274static inline void 316static inline void
@@ -296,8 +338,13 @@ hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
296static bool 338static bool
297hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data) 339hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
298{ 340{
299 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 341 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
300 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 342
343 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
344 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
345 (flags &&
346 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
347 goto nla_put_failure;
301 return 0; 348 return 0;
302 349
303nla_put_failure: 350nla_put_failure:
@@ -309,11 +356,15 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
309{ 356{
310 const struct hash_net6_telem *e = 357 const struct hash_net6_telem *e =
311 (const struct hash_net6_telem *)data; 358 (const struct hash_net6_telem *)data;
312 359 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
313 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 360
314 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr); 361 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
315 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 362 nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) ||
316 htonl(ip_set_timeout_get(e->timeout))); 363 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
364 htonl(ip_set_timeout_get(e->timeout))) ||
365 (flags &&
366 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
367 goto nla_put_failure;
317 return 0; 368 return 0;
318 369
319nla_put_failure: 370nla_put_failure:
@@ -366,7 +417,8 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
366 int ret; 417 int ret;
367 418
368 if (unlikely(!tb[IPSET_ATTR_IP] || 419 if (unlikely(!tb[IPSET_ATTR_IP] ||
369 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 420 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
421 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
370 return -IPSET_ERR_PROTOCOL; 422 return -IPSET_ERR_PROTOCOL;
371 if (unlikely(tb[IPSET_ATTR_IP_TO])) 423 if (unlikely(tb[IPSET_ATTR_IP_TO]))
372 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; 424 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -381,7 +433,7 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
381 if (tb[IPSET_ATTR_CIDR]) 433 if (tb[IPSET_ATTR_CIDR])
382 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 434 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
383 435
384 if (!data.cidr) 436 if (!data.cidr || data.cidr > HOST_MASK)
385 return -IPSET_ERR_INVALID_CIDR; 437 return -IPSET_ERR_INVALID_CIDR;
386 438
387 ip6_netmask(&data.ip, data.cidr); 439 ip6_netmask(&data.ip, data.cidr);
@@ -392,6 +444,12 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
392 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 444 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
393 } 445 }
394 446
447 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
448 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
449 if (cadt_flags & IPSET_FLAG_NOMATCH)
450 flags |= (cadt_flags << 16);
451 }
452
395 ret = adtfn(set, &data, timeout, flags); 453 ret = adtfn(set, &data, timeout, flags);
396 454
397 return ip_set_eexist(ret, flags) ? 0 : ret; 455 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -405,8 +463,9 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
405 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 463 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
406 struct ip_set_hash *h; 464 struct ip_set_hash *h;
407 u8 hbits; 465 u8 hbits;
466 size_t hsize;
408 467
409 if (!(set->family == AF_INET || set->family == AF_INET6)) 468 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
410 return -IPSET_ERR_INVALID_FAMILY; 469 return -IPSET_ERR_INVALID_FAMILY;
411 470
412 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 471 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -425,7 +484,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
425 484
426 h = kzalloc(sizeof(*h) 485 h = kzalloc(sizeof(*h)
427 + sizeof(struct ip_set_hash_nets) 486 + sizeof(struct ip_set_hash_nets)
428 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); 487 * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
429 if (!h) 488 if (!h)
430 return -ENOMEM; 489 return -ENOMEM;
431 490
@@ -434,9 +493,12 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
434 h->timeout = IPSET_NO_TIMEOUT; 493 h->timeout = IPSET_NO_TIMEOUT;
435 494
436 hbits = htable_bits(hashsize); 495 hbits = htable_bits(hashsize);
437 h->table = ip_set_alloc( 496 hsize = htable_size(hbits);
438 sizeof(struct htable) 497 if (hsize == 0) {
439 + jhash_size(hbits) * sizeof(struct hbucket)); 498 kfree(h);
499 return -ENOMEM;
500 }
501 h->table = ip_set_alloc(hsize);
440 if (!h->table) { 502 if (!h->table) {
441 kfree(h); 503 kfree(h);
442 return -ENOMEM; 504 return -ENOMEM;
@@ -448,15 +510,15 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
448 if (tb[IPSET_ATTR_TIMEOUT]) { 510 if (tb[IPSET_ATTR_TIMEOUT]) {
449 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 511 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
450 512
451 set->variant = set->family == AF_INET 513 set->variant = set->family == NFPROTO_IPV4
452 ? &hash_net4_tvariant : &hash_net6_tvariant; 514 ? &hash_net4_tvariant : &hash_net6_tvariant;
453 515
454 if (set->family == AF_INET) 516 if (set->family == NFPROTO_IPV4)
455 hash_net4_gc_init(set); 517 hash_net4_gc_init(set);
456 else 518 else
457 hash_net6_gc_init(set); 519 hash_net6_gc_init(set);
458 } else { 520 } else {
459 set->variant = set->family == AF_INET 521 set->variant = set->family == NFPROTO_IPV4
460 ? &hash_net4_variant : &hash_net6_variant; 522 ? &hash_net4_variant : &hash_net6_variant;
461 } 523 }
462 524
@@ -472,9 +534,10 @@ static struct ip_set_type hash_net_type __read_mostly = {
472 .protocol = IPSET_PROTOCOL, 534 .protocol = IPSET_PROTOCOL,
473 .features = IPSET_TYPE_IP, 535 .features = IPSET_TYPE_IP,
474 .dimension = IPSET_DIM_ONE, 536 .dimension = IPSET_DIM_ONE,
475 .family = AF_UNSPEC, 537 .family = NFPROTO_UNSPEC,
476 .revision_min = 0, 538 .revision_min = 0,
477 .revision_max = 1, /* Range as input support for IPv4 added */ 539 /* = 1 Range as input support for IPv4 added */
540 .revision_max = 2, /* nomatch flag support added */
478 .create = hash_net_create, 541 .create = hash_net_create,
479 .create_policy = { 542 .create_policy = {
480 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 543 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -488,6 +551,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
488 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, 551 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
489 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 552 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
490 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 553 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
554 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
491 }, 555 },
492 .me = THIS_MODULE, 556 .me = THIS_MODULE,
493}; 557};
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index e13095deb50d..ee863943c826 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -163,7 +163,8 @@ struct hash_netiface4_elem_hashed {
163 __be32 ip; 163 __be32 ip;
164 u8 physdev; 164 u8 physdev;
165 u8 cidr; 165 u8 cidr;
166 u16 padding; 166 u8 nomatch;
167 u8 padding;
167}; 168};
168 169
169#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) 170#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
@@ -173,7 +174,8 @@ struct hash_netiface4_elem {
173 __be32 ip; 174 __be32 ip;
174 u8 physdev; 175 u8 physdev;
175 u8 cidr; 176 u8 cidr;
176 u16 padding; 177 u8 nomatch;
178 u8 padding;
177 const char *iface; 179 const char *iface;
178}; 180};
179 181
@@ -182,7 +184,8 @@ struct hash_netiface4_telem {
182 __be32 ip; 184 __be32 ip;
183 u8 physdev; 185 u8 physdev;
184 u8 cidr; 186 u8 cidr;
185 u16 padding; 187 u8 nomatch;
188 u8 padding;
186 const char *iface; 189 const char *iface;
187 unsigned long timeout; 190 unsigned long timeout;
188}; 191};
@@ -207,11 +210,25 @@ hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
207 210
208static inline void 211static inline void
209hash_netiface4_data_copy(struct hash_netiface4_elem *dst, 212hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
210 const struct hash_netiface4_elem *src) { 213 const struct hash_netiface4_elem *src)
214{
211 dst->ip = src->ip; 215 dst->ip = src->ip;
212 dst->cidr = src->cidr; 216 dst->cidr = src->cidr;
213 dst->physdev = src->physdev; 217 dst->physdev = src->physdev;
214 dst->iface = src->iface; 218 dst->iface = src->iface;
219 dst->nomatch = src->nomatch;
220}
221
222static inline void
223hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
224{
225 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
226}
227
228static inline bool
229hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
230{
231 return !elem->nomatch;
215} 232}
216 233
217static inline void 234static inline void
@@ -233,11 +250,14 @@ hash_netiface4_data_list(struct sk_buff *skb,
233{ 250{
234 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; 251 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
235 252
236 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 253 if (data->nomatch)
237 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 254 flags |= IPSET_FLAG_NOMATCH;
238 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 255 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
239 if (flags) 256 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
240 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); 257 nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
258 (flags &&
259 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
260 goto nla_put_failure;
241 return 0; 261 return 0;
242 262
243nla_put_failure: 263nla_put_failure:
@@ -252,13 +272,16 @@ hash_netiface4_data_tlist(struct sk_buff *skb,
252 (const struct hash_netiface4_telem *)data; 272 (const struct hash_netiface4_telem *)data;
253 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; 273 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
254 274
255 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 275 if (data->nomatch)
256 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 276 flags |= IPSET_FLAG_NOMATCH;
257 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 277 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
258 if (flags) 278 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
259 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); 279 nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
260 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 280 (flags &&
261 htonl(ip_set_timeout_get(tdata->timeout))); 281 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
282 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
283 htonl(ip_set_timeout_get(tdata->timeout))))
284 goto nla_put_failure;
262 285
263 return 0; 286 return 0;
264 287
@@ -361,7 +384,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
361 384
362 if (tb[IPSET_ATTR_CIDR]) { 385 if (tb[IPSET_ATTR_CIDR]) {
363 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 386 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
364 if (!data.cidr) 387 if (!data.cidr || data.cidr > HOST_MASK)
365 return -IPSET_ERR_INVALID_CIDR; 388 return -IPSET_ERR_INVALID_CIDR;
366 } 389 }
367 390
@@ -387,6 +410,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
387 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); 410 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
388 if (cadt_flags & IPSET_FLAG_PHYSDEV) 411 if (cadt_flags & IPSET_FLAG_PHYSDEV)
389 data.physdev = 1; 412 data.physdev = 1;
413 if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
414 flags |= (cadt_flags << 16);
390 } 415 }
391 416
392 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 417 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
@@ -440,7 +465,8 @@ struct hash_netiface6_elem_hashed {
440 union nf_inet_addr ip; 465 union nf_inet_addr ip;
441 u8 physdev; 466 u8 physdev;
442 u8 cidr; 467 u8 cidr;
443 u16 padding; 468 u8 nomatch;
469 u8 padding;
444}; 470};
445 471
446#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) 472#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
@@ -449,7 +475,8 @@ struct hash_netiface6_elem {
449 union nf_inet_addr ip; 475 union nf_inet_addr ip;
450 u8 physdev; 476 u8 physdev;
451 u8 cidr; 477 u8 cidr;
452 u16 padding; 478 u8 nomatch;
479 u8 padding;
453 const char *iface; 480 const char *iface;
454}; 481};
455 482
@@ -457,7 +484,8 @@ struct hash_netiface6_telem {
457 union nf_inet_addr ip; 484 union nf_inet_addr ip;
458 u8 physdev; 485 u8 physdev;
459 u8 cidr; 486 u8 cidr;
460 u16 padding; 487 u8 nomatch;
488 u8 padding;
461 const char *iface; 489 const char *iface;
462 unsigned long timeout; 490 unsigned long timeout;
463}; 491};
@@ -488,8 +516,21 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
488} 516}
489 517
490static inline void 518static inline void
519hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
520{
521 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
522}
523
524static inline bool
525hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
526{
527 return !elem->nomatch;
528}
529
530static inline void
491hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) 531hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
492{ 532{
533 elem->cidr = 0;
493} 534}
494 535
495static inline void 536static inline void
@@ -514,11 +555,14 @@ hash_netiface6_data_list(struct sk_buff *skb,
514{ 555{
515 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; 556 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
516 557
517 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 558 if (data->nomatch)
518 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 559 flags |= IPSET_FLAG_NOMATCH;
519 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 560 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
520 if (flags) 561 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
521 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); 562 nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
563 (flags &&
564 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
565 goto nla_put_failure;
522 return 0; 566 return 0;
523 567
524nla_put_failure: 568nla_put_failure:
@@ -533,13 +577,16 @@ hash_netiface6_data_tlist(struct sk_buff *skb,
533 (const struct hash_netiface6_telem *)data; 577 (const struct hash_netiface6_telem *)data;
534 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; 578 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
535 579
536 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 580 if (data->nomatch)
537 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 581 flags |= IPSET_FLAG_NOMATCH;
538 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 582 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
539 if (flags) 583 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
540 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); 584 nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
541 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 585 (flags &&
542 htonl(ip_set_timeout_get(e->timeout))); 586 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
587 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
588 htonl(ip_set_timeout_get(e->timeout))))
589 goto nla_put_failure;
543 return 0; 590 return 0;
544 591
545nla_put_failure: 592nla_put_failure:
@@ -636,7 +683,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
636 683
637 if (tb[IPSET_ATTR_CIDR]) 684 if (tb[IPSET_ATTR_CIDR])
638 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 685 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
639 if (!data.cidr) 686 if (!data.cidr || data.cidr > HOST_MASK)
640 return -IPSET_ERR_INVALID_CIDR; 687 return -IPSET_ERR_INVALID_CIDR;
641 ip6_netmask(&data.ip, data.cidr); 688 ip6_netmask(&data.ip, data.cidr);
642 689
@@ -662,6 +709,8 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
662 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); 709 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
663 if (cadt_flags & IPSET_FLAG_PHYSDEV) 710 if (cadt_flags & IPSET_FLAG_PHYSDEV)
664 data.physdev = 1; 711 data.physdev = 1;
712 if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
713 flags |= (cadt_flags << 16);
665 } 714 }
666 715
667 ret = adtfn(set, &data, timeout, flags); 716 ret = adtfn(set, &data, timeout, flags);
@@ -677,8 +726,9 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
677 struct ip_set_hash *h; 726 struct ip_set_hash *h;
678 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 727 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
679 u8 hbits; 728 u8 hbits;
729 size_t hsize;
680 730
681 if (!(set->family == AF_INET || set->family == AF_INET6)) 731 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
682 return -IPSET_ERR_INVALID_FAMILY; 732 return -IPSET_ERR_INVALID_FAMILY;
683 733
684 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 734 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -697,7 +747,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
697 747
698 h = kzalloc(sizeof(*h) 748 h = kzalloc(sizeof(*h)
699 + sizeof(struct ip_set_hash_nets) 749 + sizeof(struct ip_set_hash_nets)
700 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); 750 * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
701 if (!h) 751 if (!h)
702 return -ENOMEM; 752 return -ENOMEM;
703 753
@@ -707,9 +757,12 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
707 h->ahash_max = AHASH_MAX_SIZE; 757 h->ahash_max = AHASH_MAX_SIZE;
708 758
709 hbits = htable_bits(hashsize); 759 hbits = htable_bits(hashsize);
710 h->table = ip_set_alloc( 760 hsize = htable_size(hbits);
711 sizeof(struct htable) 761 if (hsize == 0) {
712 + jhash_size(hbits) * sizeof(struct hbucket)); 762 kfree(h);
763 return -ENOMEM;
764 }
765 h->table = ip_set_alloc(hsize);
713 if (!h->table) { 766 if (!h->table) {
714 kfree(h); 767 kfree(h);
715 return -ENOMEM; 768 return -ENOMEM;
@@ -722,15 +775,15 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
722 if (tb[IPSET_ATTR_TIMEOUT]) { 775 if (tb[IPSET_ATTR_TIMEOUT]) {
723 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 776 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
724 777
725 set->variant = set->family == AF_INET 778 set->variant = set->family == NFPROTO_IPV4
726 ? &hash_netiface4_tvariant : &hash_netiface6_tvariant; 779 ? &hash_netiface4_tvariant : &hash_netiface6_tvariant;
727 780
728 if (set->family == AF_INET) 781 if (set->family == NFPROTO_IPV4)
729 hash_netiface4_gc_init(set); 782 hash_netiface4_gc_init(set);
730 else 783 else
731 hash_netiface6_gc_init(set); 784 hash_netiface6_gc_init(set);
732 } else { 785 } else {
733 set->variant = set->family == AF_INET 786 set->variant = set->family == NFPROTO_IPV4
734 ? &hash_netiface4_variant : &hash_netiface6_variant; 787 ? &hash_netiface4_variant : &hash_netiface6_variant;
735 } 788 }
736 789
@@ -746,8 +799,9 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
746 .protocol = IPSET_PROTOCOL, 799 .protocol = IPSET_PROTOCOL,
747 .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE, 800 .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE,
748 .dimension = IPSET_DIM_TWO, 801 .dimension = IPSET_DIM_TWO,
749 .family = AF_UNSPEC, 802 .family = NFPROTO_UNSPEC,
750 .revision_min = 0, 803 .revision_min = 0,
804 .revision_max = 1, /* nomatch flag support added */
751 .create = hash_netiface_create, 805 .create = hash_netiface_create,
752 .create_policy = { 806 .create_policy = {
753 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 807 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 8f9de7207ec9..fc3143a2d41b 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -40,12 +40,19 @@ hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
40 40
41/* The type variant functions: IPv4 */ 41/* The type variant functions: IPv4 */
42 42
43/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
44 * However this way we have to store internally cidr - 1,
45 * dancing back and forth.
46 */
47#define IP_SET_HASH_WITH_NETS_PACKED
48
43/* Member elements without timeout */ 49/* Member elements without timeout */
44struct hash_netport4_elem { 50struct hash_netport4_elem {
45 __be32 ip; 51 __be32 ip;
46 __be16 port; 52 __be16 port;
47 u8 proto; 53 u8 proto;
48 u8 cidr; 54 u8 cidr:7;
55 u8 nomatch:1;
49}; 56};
50 57
51/* Member elements with timeout support */ 58/* Member elements with timeout support */
@@ -53,7 +60,8 @@ struct hash_netport4_telem {
53 __be32 ip; 60 __be32 ip;
54 __be16 port; 61 __be16 port;
55 u8 proto; 62 u8 proto;
56 u8 cidr; 63 u8 cidr:7;
64 u8 nomatch:1;
57 unsigned long timeout; 65 unsigned long timeout;
58}; 66};
59 67
@@ -82,13 +90,26 @@ hash_netport4_data_copy(struct hash_netport4_elem *dst,
82 dst->port = src->port; 90 dst->port = src->port;
83 dst->proto = src->proto; 91 dst->proto = src->proto;
84 dst->cidr = src->cidr; 92 dst->cidr = src->cidr;
93 dst->nomatch = src->nomatch;
94}
95
96static inline void
97hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
98{
99 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
100}
101
102static inline bool
103hash_netport4_data_match(const struct hash_netport4_elem *elem)
104{
105 return !elem->nomatch;
85} 106}
86 107
87static inline void 108static inline void
88hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr) 109hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
89{ 110{
90 elem->ip &= ip_set_netmask(cidr); 111 elem->ip &= ip_set_netmask(cidr);
91 elem->cidr = cidr; 112 elem->cidr = cidr - 1;
92} 113}
93 114
94static inline void 115static inline void
@@ -101,10 +122,15 @@ static bool
101hash_netport4_data_list(struct sk_buff *skb, 122hash_netport4_data_list(struct sk_buff *skb,
102 const struct hash_netport4_elem *data) 123 const struct hash_netport4_elem *data)
103{ 124{
104 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 125 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
105 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 126
106 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 127 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
107 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 128 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
129 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
130 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
131 (flags &&
132 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
133 goto nla_put_failure;
108 return 0; 134 return 0;
109 135
110nla_put_failure: 136nla_put_failure:
@@ -117,14 +143,17 @@ hash_netport4_data_tlist(struct sk_buff *skb,
117{ 143{
118 const struct hash_netport4_telem *tdata = 144 const struct hash_netport4_telem *tdata =
119 (const struct hash_netport4_telem *)data; 145 (const struct hash_netport4_telem *)data;
120 146 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
121 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 147
122 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 148 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
123 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 149 nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
124 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 150 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
125 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 151 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
126 htonl(ip_set_timeout_get(tdata->timeout))); 152 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
127 153 htonl(ip_set_timeout_get(tdata->timeout))) ||
154 (flags &&
155 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
156 goto nla_put_failure;
128 return 0; 157 return 0;
129 158
130nla_put_failure: 159nla_put_failure:
@@ -154,20 +183,18 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
154 const struct ip_set_hash *h = set->data; 183 const struct ip_set_hash *h = set->data;
155 ipset_adtfn adtfn = set->variant->adt[adt]; 184 ipset_adtfn adtfn = set->variant->adt[adt];
156 struct hash_netport4_elem data = { 185 struct hash_netport4_elem data = {
157 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 186 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
158 }; 187 };
159 188
160 if (data.cidr == 0)
161 return -EINVAL;
162 if (adt == IPSET_TEST) 189 if (adt == IPSET_TEST)
163 data.cidr = HOST_MASK; 190 data.cidr = HOST_MASK - 1;
164 191
165 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, 192 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
166 &data.port, &data.proto)) 193 &data.port, &data.proto))
167 return -EINVAL; 194 return -EINVAL;
168 195
169 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); 196 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
170 data.ip &= ip_set_netmask(data.cidr); 197 data.ip &= ip_set_netmask(data.cidr + 1);
171 198
172 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); 199 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
173} 200}
@@ -178,16 +205,18 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
178{ 205{
179 const struct ip_set_hash *h = set->data; 206 const struct ip_set_hash *h = set->data;
180 ipset_adtfn adtfn = set->variant->adt[adt]; 207 ipset_adtfn adtfn = set->variant->adt[adt];
181 struct hash_netport4_elem data = { .cidr = HOST_MASK }; 208 struct hash_netport4_elem data = { .cidr = HOST_MASK - 1 };
182 u32 port, port_to, p = 0, ip = 0, ip_to, last; 209 u32 port, port_to, p = 0, ip = 0, ip_to, last;
183 u32 timeout = h->timeout; 210 u32 timeout = h->timeout;
184 bool with_ports = false; 211 bool with_ports = false;
212 u8 cidr;
185 int ret; 213 int ret;
186 214
187 if (unlikely(!tb[IPSET_ATTR_IP] || 215 if (unlikely(!tb[IPSET_ATTR_IP] ||
188 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 216 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
189 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 217 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
190 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 218 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
219 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
191 return -IPSET_ERR_PROTOCOL; 220 return -IPSET_ERR_PROTOCOL;
192 221
193 if (tb[IPSET_ATTR_LINENO]) 222 if (tb[IPSET_ATTR_LINENO])
@@ -198,9 +227,10 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
198 return ret; 227 return ret;
199 228
200 if (tb[IPSET_ATTR_CIDR]) { 229 if (tb[IPSET_ATTR_CIDR]) {
201 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 230 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
202 if (!data.cidr) 231 if (!cidr || cidr > HOST_MASK)
203 return -IPSET_ERR_INVALID_CIDR; 232 return -IPSET_ERR_INVALID_CIDR;
233 data.cidr = cidr - 1;
204 } 234 }
205 235
206 if (tb[IPSET_ATTR_PORT]) 236 if (tb[IPSET_ATTR_PORT])
@@ -227,8 +257,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
227 } 257 }
228 258
229 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; 259 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
260
261 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
262 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
263 if (cadt_flags & IPSET_FLAG_NOMATCH)
264 flags |= (cadt_flags << 16);
265 }
266
230 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { 267 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
231 data.ip = htonl(ip & ip_set_hostmask(data.cidr)); 268 data.ip = htonl(ip & ip_set_hostmask(data.cidr + 1));
232 ret = adtfn(set, &data, timeout, flags); 269 ret = adtfn(set, &data, timeout, flags);
233 return ip_set_eexist(ret, flags) ? 0 : ret; 270 return ip_set_eexist(ret, flags) ? 0 : ret;
234 } 271 }
@@ -248,14 +285,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
248 if (ip + UINT_MAX == ip_to) 285 if (ip + UINT_MAX == ip_to)
249 return -IPSET_ERR_HASH_RANGE; 286 return -IPSET_ERR_HASH_RANGE;
250 } else { 287 } else {
251 ip_set_mask_from_to(ip, ip_to, data.cidr); 288 ip_set_mask_from_to(ip, ip_to, data.cidr + 1);
252 } 289 }
253 290
254 if (retried) 291 if (retried)
255 ip = h->next.ip; 292 ip = h->next.ip;
256 while (!after(ip, ip_to)) { 293 while (!after(ip, ip_to)) {
257 data.ip = htonl(ip); 294 data.ip = htonl(ip);
258 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); 295 last = ip_set_range_to_cidr(ip, ip_to, &cidr);
296 data.cidr = cidr - 1;
259 p = retried && ip == h->next.ip ? h->next.port : port; 297 p = retried && ip == h->next.ip ? h->next.port : port;
260 for (; p <= port_to; p++) { 298 for (; p <= port_to; p++) {
261 data.port = htons(p); 299 data.port = htons(p);
@@ -288,14 +326,16 @@ struct hash_netport6_elem {
288 union nf_inet_addr ip; 326 union nf_inet_addr ip;
289 __be16 port; 327 __be16 port;
290 u8 proto; 328 u8 proto;
291 u8 cidr; 329 u8 cidr:7;
330 u8 nomatch:1;
292}; 331};
293 332
294struct hash_netport6_telem { 333struct hash_netport6_telem {
295 union nf_inet_addr ip; 334 union nf_inet_addr ip;
296 __be16 port; 335 __be16 port;
297 u8 proto; 336 u8 proto;
298 u8 cidr; 337 u8 cidr:7;
338 u8 nomatch:1;
299 unsigned long timeout; 339 unsigned long timeout;
300}; 340};
301 341
@@ -324,6 +364,18 @@ hash_netport6_data_copy(struct hash_netport6_elem *dst,
324} 364}
325 365
326static inline void 366static inline void
367hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
368{
369 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
370}
371
372static inline bool
373hash_netport6_data_match(const struct hash_netport6_elem *elem)
374{
375 return !elem->nomatch;
376}
377
378static inline void
327hash_netport6_data_zero_out(struct hash_netport6_elem *elem) 379hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
328{ 380{
329 elem->proto = 0; 381 elem->proto = 0;
@@ -342,17 +394,22 @@ static inline void
342hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr) 394hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
343{ 395{
344 ip6_netmask(&elem->ip, cidr); 396 ip6_netmask(&elem->ip, cidr);
345 elem->cidr = cidr; 397 elem->cidr = cidr - 1;
346} 398}
347 399
348static bool 400static bool
349hash_netport6_data_list(struct sk_buff *skb, 401hash_netport6_data_list(struct sk_buff *skb,
350 const struct hash_netport6_elem *data) 402 const struct hash_netport6_elem *data)
351{ 403{
352 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 404 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
353 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 405
354 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 406 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
355 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 407 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
408 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
409 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
410 (flags &&
411 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
412 goto nla_put_failure;
356 return 0; 413 return 0;
357 414
358nla_put_failure: 415nla_put_failure:
@@ -365,13 +422,17 @@ hash_netport6_data_tlist(struct sk_buff *skb,
365{ 422{
366 const struct hash_netport6_telem *e = 423 const struct hash_netport6_telem *e =
367 (const struct hash_netport6_telem *)data; 424 (const struct hash_netport6_telem *)data;
368 425 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
369 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 426
370 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 427 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
371 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 428 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
372 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 429 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
373 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 430 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
374 htonl(ip_set_timeout_get(e->timeout))); 431 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
432 htonl(ip_set_timeout_get(e->timeout))) ||
433 (flags &&
434 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
435 goto nla_put_failure;
375 return 0; 436 return 0;
376 437
377nla_put_failure: 438nla_put_failure:
@@ -400,20 +461,18 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
400 const struct ip_set_hash *h = set->data; 461 const struct ip_set_hash *h = set->data;
401 ipset_adtfn adtfn = set->variant->adt[adt]; 462 ipset_adtfn adtfn = set->variant->adt[adt];
402 struct hash_netport6_elem data = { 463 struct hash_netport6_elem data = {
403 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 464 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
404 }; 465 };
405 466
406 if (data.cidr == 0)
407 return -EINVAL;
408 if (adt == IPSET_TEST) 467 if (adt == IPSET_TEST)
409 data.cidr = HOST_MASK; 468 data.cidr = HOST_MASK - 1;
410 469
411 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, 470 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
412 &data.port, &data.proto)) 471 &data.port, &data.proto))
413 return -EINVAL; 472 return -EINVAL;
414 473
415 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 474 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
416 ip6_netmask(&data.ip, data.cidr); 475 ip6_netmask(&data.ip, data.cidr + 1);
417 476
418 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); 477 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
419} 478}
@@ -424,16 +483,18 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
424{ 483{
425 const struct ip_set_hash *h = set->data; 484 const struct ip_set_hash *h = set->data;
426 ipset_adtfn adtfn = set->variant->adt[adt]; 485 ipset_adtfn adtfn = set->variant->adt[adt];
427 struct hash_netport6_elem data = { .cidr = HOST_MASK }; 486 struct hash_netport6_elem data = { .cidr = HOST_MASK - 1 };
428 u32 port, port_to; 487 u32 port, port_to;
429 u32 timeout = h->timeout; 488 u32 timeout = h->timeout;
430 bool with_ports = false; 489 bool with_ports = false;
490 u8 cidr;
431 int ret; 491 int ret;
432 492
433 if (unlikely(!tb[IPSET_ATTR_IP] || 493 if (unlikely(!tb[IPSET_ATTR_IP] ||
434 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 494 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
435 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 495 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
436 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 496 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
497 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
437 return -IPSET_ERR_PROTOCOL; 498 return -IPSET_ERR_PROTOCOL;
438 if (unlikely(tb[IPSET_ATTR_IP_TO])) 499 if (unlikely(tb[IPSET_ATTR_IP_TO]))
439 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; 500 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -445,11 +506,13 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
445 if (ret) 506 if (ret)
446 return ret; 507 return ret;
447 508
448 if (tb[IPSET_ATTR_CIDR]) 509 if (tb[IPSET_ATTR_CIDR]) {
449 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 510 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
450 if (!data.cidr) 511 if (!cidr || cidr > HOST_MASK)
451 return -IPSET_ERR_INVALID_CIDR; 512 return -IPSET_ERR_INVALID_CIDR;
452 ip6_netmask(&data.ip, data.cidr); 513 data.cidr = cidr - 1;
514 }
515 ip6_netmask(&data.ip, data.cidr + 1);
453 516
454 if (tb[IPSET_ATTR_PORT]) 517 if (tb[IPSET_ATTR_PORT])
455 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); 518 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -474,6 +537,12 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
474 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 537 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
475 } 538 }
476 539
540 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
541 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
542 if (cadt_flags & IPSET_FLAG_NOMATCH)
543 flags |= (cadt_flags << 16);
544 }
545
477 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 546 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
478 ret = adtfn(set, &data, timeout, flags); 547 ret = adtfn(set, &data, timeout, flags);
479 return ip_set_eexist(ret, flags) ? 0 : ret; 548 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -506,8 +575,9 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
506 struct ip_set_hash *h; 575 struct ip_set_hash *h;
507 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 576 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
508 u8 hbits; 577 u8 hbits;
578 size_t hsize;
509 579
510 if (!(set->family == AF_INET || set->family == AF_INET6)) 580 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
511 return -IPSET_ERR_INVALID_FAMILY; 581 return -IPSET_ERR_INVALID_FAMILY;
512 582
513 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 583 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -526,7 +596,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
526 596
527 h = kzalloc(sizeof(*h) 597 h = kzalloc(sizeof(*h)
528 + sizeof(struct ip_set_hash_nets) 598 + sizeof(struct ip_set_hash_nets)
529 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); 599 * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
530 if (!h) 600 if (!h)
531 return -ENOMEM; 601 return -ENOMEM;
532 602
@@ -535,9 +605,12 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
535 h->timeout = IPSET_NO_TIMEOUT; 605 h->timeout = IPSET_NO_TIMEOUT;
536 606
537 hbits = htable_bits(hashsize); 607 hbits = htable_bits(hashsize);
538 h->table = ip_set_alloc( 608 hsize = htable_size(hbits);
539 sizeof(struct htable) 609 if (hsize == 0) {
540 + jhash_size(hbits) * sizeof(struct hbucket)); 610 kfree(h);
611 return -ENOMEM;
612 }
613 h->table = ip_set_alloc(hsize);
541 if (!h->table) { 614 if (!h->table) {
542 kfree(h); 615 kfree(h);
543 return -ENOMEM; 616 return -ENOMEM;
@@ -549,15 +622,15 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
549 if (tb[IPSET_ATTR_TIMEOUT]) { 622 if (tb[IPSET_ATTR_TIMEOUT]) {
550 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 623 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
551 624
552 set->variant = set->family == AF_INET 625 set->variant = set->family == NFPROTO_IPV4
553 ? &hash_netport4_tvariant : &hash_netport6_tvariant; 626 ? &hash_netport4_tvariant : &hash_netport6_tvariant;
554 627
555 if (set->family == AF_INET) 628 if (set->family == NFPROTO_IPV4)
556 hash_netport4_gc_init(set); 629 hash_netport4_gc_init(set);
557 else 630 else
558 hash_netport6_gc_init(set); 631 hash_netport6_gc_init(set);
559 } else { 632 } else {
560 set->variant = set->family == AF_INET 633 set->variant = set->family == NFPROTO_IPV4
561 ? &hash_netport4_variant : &hash_netport6_variant; 634 ? &hash_netport4_variant : &hash_netport6_variant;
562 } 635 }
563 636
@@ -573,10 +646,11 @@ static struct ip_set_type hash_netport_type __read_mostly = {
573 .protocol = IPSET_PROTOCOL, 646 .protocol = IPSET_PROTOCOL,
574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 647 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
575 .dimension = IPSET_DIM_TWO, 648 .dimension = IPSET_DIM_TWO,
576 .family = AF_UNSPEC, 649 .family = NFPROTO_UNSPEC,
577 .revision_min = 0, 650 .revision_min = 0,
578 /* 1 SCTP and UDPLITE support added */ 651 /* 1 SCTP and UDPLITE support added */
579 .revision_max = 2, /* Range as input support for IPv4 added */ 652 /* 2, Range as input support for IPv4 added */
653 .revision_max = 3, /* nomatch flag support added */
580 .create = hash_netport_create, 654 .create = hash_netport_create,
581 .create_policy = { 655 .create_policy = {
582 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 656 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -595,6 +669,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
595 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 669 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
596 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 670 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
597 [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, 671 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
672 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
598 }, 673 },
599 .me = THIS_MODULE, 674 .me = THIS_MODULE,
600}; 675};
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 4d10819d462e..6cb1225765f9 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -402,12 +402,13 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
402 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 402 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
403 if (!nested) 403 if (!nested)
404 goto nla_put_failure; 404 goto nla_put_failure;
405 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); 405 if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
406 if (with_timeout(map->timeout)) 406 (with_timeout(map->timeout) &&
407 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 407 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
408 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 408 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
409 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 409 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
410 htonl(sizeof(*map) + map->size * map->dsize)); 410 htonl(sizeof(*map) + map->size * map->dsize)))
411 goto nla_put_failure;
411 ipset_nest_end(skb, nested); 412 ipset_nest_end(skb, nested);
412 413
413 return 0; 414 return 0;
@@ -442,13 +443,15 @@ list_set_list(const struct ip_set *set,
442 } else 443 } else
443 goto nla_put_failure; 444 goto nla_put_failure;
444 } 445 }
445 NLA_PUT_STRING(skb, IPSET_ATTR_NAME, 446 if (nla_put_string(skb, IPSET_ATTR_NAME,
446 ip_set_name_byindex(e->id)); 447 ip_set_name_byindex(e->id)))
448 goto nla_put_failure;
447 if (with_timeout(map->timeout)) { 449 if (with_timeout(map->timeout)) {
448 const struct set_telem *te = 450 const struct set_telem *te =
449 (const struct set_telem *) e; 451 (const struct set_telem *) e;
450 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 452 __be32 to = htonl(ip_set_timeout_get(te->timeout));
451 htonl(ip_set_timeout_get(te->timeout))); 453 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to))
454 goto nla_put_failure;
452 } 455 }
453 ipset_nest_end(skb, nested); 456 ipset_nest_end(skb, nested);
454 } 457 }
@@ -575,7 +578,7 @@ static struct ip_set_type list_set_type __read_mostly = {
575 .protocol = IPSET_PROTOCOL, 578 .protocol = IPSET_PROTOCOL,
576 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST, 579 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
577 .dimension = IPSET_DIM_ONE, 580 .dimension = IPSET_DIM_ONE,
578 .family = AF_UNSPEC, 581 .family = NFPROTO_UNSPEC,
579 .revision_min = 0, 582 .revision_min = 0,
580 .revision_max = 0, 583 .revision_max = 0,
581 .create = list_set_create, 584 .create = list_set_create,
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index fe6cb4304d72..64f9e8f13207 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -31,7 +31,6 @@
31#include <net/net_namespace.h> 31#include <net/net_namespace.h>
32#include <net/protocol.h> 32#include <net/protocol.h>
33#include <net/tcp.h> 33#include <net/tcp.h>
34#include <asm/system.h>
35#include <linux/stat.h> 34#include <linux/stat.h>
36#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
37#include <linux/seq_file.h> 36#include <linux/seq_file.h>
@@ -314,7 +313,7 @@ vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
314 * Assumes already checked proto==IPPROTO_TCP and diff!=0. 313 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
315 */ 314 */
316static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, 315static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
317 unsigned flag, __u32 seq, int diff) 316 unsigned int flag, __u32 seq, int diff)
318{ 317{
319 /* spinlock is to keep updating cp->flags atomic */ 318 /* spinlock is to keep updating cp->flags atomic */
320 spin_lock(&cp->lock); 319 spin_lock(&cp->lock);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 29fa5badde75..1548df9a7524 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -86,42 +86,42 @@ struct ip_vs_aligned_lock
86static struct ip_vs_aligned_lock 86static struct ip_vs_aligned_lock
87__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned; 87__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
88 88
89static inline void ct_read_lock(unsigned key) 89static inline void ct_read_lock(unsigned int key)
90{ 90{
91 read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 91 read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
92} 92}
93 93
94static inline void ct_read_unlock(unsigned key) 94static inline void ct_read_unlock(unsigned int key)
95{ 95{
96 read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 96 read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
97} 97}
98 98
99static inline void ct_write_lock(unsigned key) 99static inline void ct_write_lock(unsigned int key)
100{ 100{
101 write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 101 write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
102} 102}
103 103
104static inline void ct_write_unlock(unsigned key) 104static inline void ct_write_unlock(unsigned int key)
105{ 105{
106 write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 106 write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
107} 107}
108 108
109static inline void ct_read_lock_bh(unsigned key) 109static inline void ct_read_lock_bh(unsigned int key)
110{ 110{
111 read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 111 read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
112} 112}
113 113
114static inline void ct_read_unlock_bh(unsigned key) 114static inline void ct_read_unlock_bh(unsigned int key)
115{ 115{
116 read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 116 read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
117} 117}
118 118
119static inline void ct_write_lock_bh(unsigned key) 119static inline void ct_write_lock_bh(unsigned int key)
120{ 120{
121 write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 121 write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
122} 122}
123 123
124static inline void ct_write_unlock_bh(unsigned key) 124static inline void ct_write_unlock_bh(unsigned int key)
125{ 125{
126 write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 126 write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
127} 127}
@@ -130,7 +130,7 @@ static inline void ct_write_unlock_bh(unsigned key)
130/* 130/*
131 * Returns hash value for IPVS connection entry 131 * Returns hash value for IPVS connection entry
132 */ 132 */
133static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto, 133static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto,
134 const union nf_inet_addr *addr, 134 const union nf_inet_addr *addr,
135 __be16 port) 135 __be16 port)
136{ 136{
@@ -188,7 +188,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
188 */ 188 */
189static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) 189static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
190{ 190{
191 unsigned hash; 191 unsigned int hash;
192 int ret; 192 int ret;
193 193
194 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) 194 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
@@ -224,7 +224,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
224 */ 224 */
225static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) 225static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
226{ 226{
227 unsigned hash; 227 unsigned int hash;
228 int ret; 228 int ret;
229 229
230 /* unhash it and decrease its reference counter */ 230 /* unhash it and decrease its reference counter */
@@ -257,7 +257,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
257static inline struct ip_vs_conn * 257static inline struct ip_vs_conn *
258__ip_vs_conn_in_get(const struct ip_vs_conn_param *p) 258__ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
259{ 259{
260 unsigned hash; 260 unsigned int hash;
261 struct ip_vs_conn *cp; 261 struct ip_vs_conn *cp;
262 struct hlist_node *n; 262 struct hlist_node *n;
263 263
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
344/* Get reference to connection template */ 344/* Get reference to connection template */
345struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) 345struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
346{ 346{
347 unsigned hash; 347 unsigned int hash;
348 struct ip_vs_conn *cp; 348 struct ip_vs_conn *cp;
349 struct hlist_node *n; 349 struct hlist_node *n;
350 350
@@ -394,7 +394,7 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
394 * p->vaddr, p->vport: pkt dest address (foreign host) */ 394 * p->vaddr, p->vport: pkt dest address (foreign host) */
395struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) 395struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
396{ 396{
397 unsigned hash; 397 unsigned int hash;
398 struct ip_vs_conn *cp, *ret=NULL; 398 struct ip_vs_conn *cp, *ret=NULL;
399 struct hlist_node *n; 399 struct hlist_node *n;
400 400
@@ -548,6 +548,7 @@ static inline void
548ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) 548ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
549{ 549{
550 unsigned int conn_flags; 550 unsigned int conn_flags;
551 __u32 flags;
551 552
552 /* if dest is NULL, then return directly */ 553 /* if dest is NULL, then return directly */
553 if (!dest) 554 if (!dest)
@@ -559,17 +560,19 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
559 conn_flags = atomic_read(&dest->conn_flags); 560 conn_flags = atomic_read(&dest->conn_flags);
560 if (cp->protocol != IPPROTO_UDP) 561 if (cp->protocol != IPPROTO_UDP)
561 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET; 562 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
563 flags = cp->flags;
562 /* Bind with the destination and its corresponding transmitter */ 564 /* Bind with the destination and its corresponding transmitter */
563 if (cp->flags & IP_VS_CONN_F_SYNC) { 565 if (flags & IP_VS_CONN_F_SYNC) {
564 /* if the connection is not template and is created 566 /* if the connection is not template and is created
565 * by sync, preserve the activity flag. 567 * by sync, preserve the activity flag.
566 */ 568 */
567 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) 569 if (!(flags & IP_VS_CONN_F_TEMPLATE))
568 conn_flags &= ~IP_VS_CONN_F_INACTIVE; 570 conn_flags &= ~IP_VS_CONN_F_INACTIVE;
569 /* connections inherit forwarding method from dest */ 571 /* connections inherit forwarding method from dest */
570 cp->flags &= ~IP_VS_CONN_F_FWD_MASK; 572 flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT);
571 } 573 }
572 cp->flags |= conn_flags; 574 flags |= conn_flags;
575 cp->flags = flags;
573 cp->dest = dest; 576 cp->dest = dest;
574 577
575 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d " 578 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
@@ -584,12 +587,12 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
584 atomic_read(&dest->refcnt)); 587 atomic_read(&dest->refcnt));
585 588
586 /* Update the connection counters */ 589 /* Update the connection counters */
587 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { 590 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
588 /* It is a normal connection, so increase the inactive 591 /* It is a normal connection, so modify the counters
589 connection counter because it is in TCP SYNRECV 592 * according to the flags, later the protocol can
590 state (inactive) or other protocol inacive state */ 593 * update them on state change
591 if ((cp->flags & IP_VS_CONN_F_SYNC) && 594 */
592 (!(cp->flags & IP_VS_CONN_F_INACTIVE))) 595 if (!(flags & IP_VS_CONN_F_INACTIVE))
593 atomic_inc(&dest->activeconns); 596 atomic_inc(&dest->activeconns);
594 else 597 else
595 atomic_inc(&dest->inactconns); 598 atomic_inc(&dest->inactconns);
@@ -613,14 +616,40 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
613{ 616{
614 struct ip_vs_dest *dest; 617 struct ip_vs_dest *dest;
615 618
616 if ((cp) && (!cp->dest)) { 619 dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
617 dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, 620 cp->dport, &cp->vaddr, cp->vport,
618 cp->dport, &cp->vaddr, cp->vport, 621 cp->protocol, cp->fwmark, cp->flags);
619 cp->protocol, cp->fwmark, cp->flags); 622 if (dest) {
623 struct ip_vs_proto_data *pd;
624
625 spin_lock(&cp->lock);
626 if (cp->dest) {
627 spin_unlock(&cp->lock);
628 return dest;
629 }
630
631 /* Applications work depending on the forwarding method
632 * but better to reassign them always when binding dest */
633 if (cp->app)
634 ip_vs_unbind_app(cp);
635
620 ip_vs_bind_dest(cp, dest); 636 ip_vs_bind_dest(cp, dest);
621 return dest; 637 spin_unlock(&cp->lock);
622 } else 638
623 return NULL; 639 /* Update its packet transmitter */
640 cp->packet_xmit = NULL;
641#ifdef CONFIG_IP_VS_IPV6
642 if (cp->af == AF_INET6)
643 ip_vs_bind_xmit_v6(cp);
644 else
645#endif
646 ip_vs_bind_xmit(cp);
647
648 pd = ip_vs_proto_data_get(ip_vs_conn_net(cp), cp->protocol);
649 if (pd && atomic_read(&pd->appcnt))
650 ip_vs_bind_app(cp, pd->pp);
651 }
652 return dest;
624} 653}
625 654
626 655
@@ -743,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
743static void ip_vs_conn_expire(unsigned long data) 772static void ip_vs_conn_expire(unsigned long data)
744{ 773{
745 struct ip_vs_conn *cp = (struct ip_vs_conn *)data; 774 struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
746 struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); 775 struct net *net = ip_vs_conn_net(cp);
776 struct netns_ipvs *ipvs = net_ipvs(net);
747 777
748 cp->timeout = 60*HZ; 778 cp->timeout = 60*HZ;
749 779
@@ -808,6 +838,9 @@ static void ip_vs_conn_expire(unsigned long data)
808 atomic_read(&cp->refcnt)-1, 838 atomic_read(&cp->refcnt)-1,
809 atomic_read(&cp->n_control)); 839 atomic_read(&cp->n_control));
810 840
841 if (ipvs->sync_state & IP_VS_STATE_MASTER)
842 ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs));
843
811 ip_vs_conn_put(cp); 844 ip_vs_conn_put(cp);
812} 845}
813 846
@@ -824,7 +857,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
824 */ 857 */
825struct ip_vs_conn * 858struct ip_vs_conn *
826ip_vs_conn_new(const struct ip_vs_conn_param *p, 859ip_vs_conn_new(const struct ip_vs_conn_param *p,
827 const union nf_inet_addr *daddr, __be16 dport, unsigned flags, 860 const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
828 struct ip_vs_dest *dest, __u32 fwmark) 861 struct ip_vs_dest *dest, __u32 fwmark)
829{ 862{
830 struct ip_vs_conn *cp; 863 struct ip_vs_conn *cp;
@@ -881,6 +914,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
881 /* Set its state and timeout */ 914 /* Set its state and timeout */
882 cp->state = 0; 915 cp->state = 0;
883 cp->timeout = 3*HZ; 916 cp->timeout = 3*HZ;
917 cp->sync_endtime = jiffies & ~3UL;
884 918
885 /* Bind its packet transmitter */ 919 /* Bind its packet transmitter */
886#ifdef CONFIG_IP_VS_IPV6 920#ifdef CONFIG_IP_VS_IPV6
@@ -1057,7 +1091,7 @@ static const struct file_operations ip_vs_conn_fops = {
1057 .release = seq_release_net, 1091 .release = seq_release_net,
1058}; 1092};
1059 1093
1060static const char *ip_vs_origin_name(unsigned flags) 1094static const char *ip_vs_origin_name(unsigned int flags)
1061{ 1095{
1062 if (flags & IP_VS_CONN_F_SYNC) 1096 if (flags & IP_VS_CONN_F_SYNC)
1063 return "SYNC"; 1097 return "SYNC";
@@ -1169,7 +1203,7 @@ void ip_vs_random_dropentry(struct net *net)
1169 * Randomly scan 1/32 of the whole table every second 1203 * Randomly scan 1/32 of the whole table every second
1170 */ 1204 */
1171 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { 1205 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1172 unsigned hash = net_random() & ip_vs_conn_tab_mask; 1206 unsigned int hash = net_random() & ip_vs_conn_tab_mask;
1173 struct hlist_node *n; 1207 struct hlist_node *n;
1174 1208
1175 /* 1209 /*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816e7788..a54b018c6eea 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -80,7 +80,7 @@ static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
80#define icmp_id(icmph) (((icmph)->un).echo.id) 80#define icmp_id(icmph) (((icmph)->un).echo.id)
81#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier) 81#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
82 82
83const char *ip_vs_proto_name(unsigned proto) 83const char *ip_vs_proto_name(unsigned int proto)
84{ 84{
85 static char buf[20]; 85 static char buf[20];
86 86
@@ -1613,34 +1613,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1613 else 1613 else
1614 pkts = atomic_add_return(1, &cp->in_pkts); 1614 pkts = atomic_add_return(1, &cp->in_pkts);
1615 1615
1616 if ((ipvs->sync_state & IP_VS_STATE_MASTER) && 1616 if (ipvs->sync_state & IP_VS_STATE_MASTER)
1617 cp->protocol == IPPROTO_SCTP) { 1617 ip_vs_sync_conn(net, cp, pkts);
1618 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1619 (pkts % sysctl_sync_period(ipvs)
1620 == sysctl_sync_threshold(ipvs))) ||
1621 (cp->old_state != cp->state &&
1622 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
1623 (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
1624 (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
1625 ip_vs_sync_conn(net, cp);
1626 goto out;
1627 }
1628 }
1629
1630 /* Keep this block last: TCP and others with pp->num_states <= 1 */
1631 else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
1632 (((cp->protocol != IPPROTO_TCP ||
1633 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
1634 (pkts % sysctl_sync_period(ipvs)
1635 == sysctl_sync_threshold(ipvs))) ||
1636 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
1637 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
1638 (cp->state == IP_VS_TCP_S_CLOSE) ||
1639 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1640 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1641 ip_vs_sync_conn(net, cp);
1642out:
1643 cp->old_state = cp->state;
1644 1618
1645 ip_vs_conn_put(cp); 1619 ip_vs_conn_put(cp);
1646 return ret; 1620 return ret;
@@ -1924,6 +1898,7 @@ protocol_fail:
1924control_fail: 1898control_fail:
1925 ip_vs_estimator_net_cleanup(net); 1899 ip_vs_estimator_net_cleanup(net);
1926estimator_fail: 1900estimator_fail:
1901 net->ipvs = NULL;
1927 return -ENOMEM; 1902 return -ENOMEM;
1928} 1903}
1929 1904
@@ -1936,6 +1911,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
1936 ip_vs_control_net_cleanup(net); 1911 ip_vs_control_net_cleanup(net);
1937 ip_vs_estimator_net_cleanup(net); 1912 ip_vs_estimator_net_cleanup(net);
1938 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); 1913 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1914 net->ipvs = NULL;
1939} 1915}
1940 1916
1941static void __net_exit __ip_vs_dev_cleanup(struct net *net) 1917static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1969,18 @@ static int __init ip_vs_init(void)
1993 goto cleanup_dev; 1969 goto cleanup_dev;
1994 } 1970 }
1995 1971
1972 ret = ip_vs_register_nl_ioctl();
1973 if (ret < 0) {
1974 pr_err("can't register netlink/ioctl.\n");
1975 goto cleanup_hooks;
1976 }
1977
1996 pr_info("ipvs loaded.\n"); 1978 pr_info("ipvs loaded.\n");
1997 1979
1998 return ret; 1980 return ret;
1999 1981
1982cleanup_hooks:
1983 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2000cleanup_dev: 1984cleanup_dev:
2001 unregister_pernet_device(&ipvs_core_dev_ops); 1985 unregister_pernet_device(&ipvs_core_dev_ops);
2002cleanup_sub: 1986cleanup_sub:
@@ -2012,6 +1996,7 @@ exit:
2012 1996
2013static void __exit ip_vs_cleanup(void) 1997static void __exit ip_vs_cleanup(void)
2014{ 1998{
1999 ip_vs_unregister_nl_ioctl();
2015 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2000 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2016 unregister_pernet_device(&ipvs_core_dev_ops); 2001 unregister_pernet_device(&ipvs_core_dev_ops);
2017 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2002 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe189af61..dd811b8dd97c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -265,11 +265,11 @@ static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
265/* 265/*
266 * Returns hash value for virtual service 266 * Returns hash value for virtual service
267 */ 267 */
268static inline unsigned 268static inline unsigned int
269ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, 269ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
270 const union nf_inet_addr *addr, __be16 port) 270 const union nf_inet_addr *addr, __be16 port)
271{ 271{
272 register unsigned porth = ntohs(port); 272 register unsigned int porth = ntohs(port);
273 __be32 addr_fold = addr->ip; 273 __be32 addr_fold = addr->ip;
274 274
275#ifdef CONFIG_IP_VS_IPV6 275#ifdef CONFIG_IP_VS_IPV6
@@ -286,7 +286,7 @@ ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
286/* 286/*
287 * Returns hash value of fwmark for virtual service lookup 287 * Returns hash value of fwmark for virtual service lookup
288 */ 288 */
289static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) 289static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
290{ 290{
291 return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK; 291 return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
292} 292}
@@ -298,7 +298,7 @@ static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
298 */ 298 */
299static int ip_vs_svc_hash(struct ip_vs_service *svc) 299static int ip_vs_svc_hash(struct ip_vs_service *svc)
300{ 300{
301 unsigned hash; 301 unsigned int hash;
302 302
303 if (svc->flags & IP_VS_SVC_F_HASHED) { 303 if (svc->flags & IP_VS_SVC_F_HASHED) {
304 pr_err("%s(): request for already hashed, called from %pF\n", 304 pr_err("%s(): request for already hashed, called from %pF\n",
@@ -361,7 +361,7 @@ static inline struct ip_vs_service *
361__ip_vs_service_find(struct net *net, int af, __u16 protocol, 361__ip_vs_service_find(struct net *net, int af, __u16 protocol,
362 const union nf_inet_addr *vaddr, __be16 vport) 362 const union nf_inet_addr *vaddr, __be16 vport)
363{ 363{
364 unsigned hash; 364 unsigned int hash;
365 struct ip_vs_service *svc; 365 struct ip_vs_service *svc;
366 366
367 /* Check for "full" addressed entries */ 367 /* Check for "full" addressed entries */
@@ -388,7 +388,7 @@ __ip_vs_service_find(struct net *net, int af, __u16 protocol,
388static inline struct ip_vs_service * 388static inline struct ip_vs_service *
389__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark) 389__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
390{ 390{
391 unsigned hash; 391 unsigned int hash;
392 struct ip_vs_service *svc; 392 struct ip_vs_service *svc;
393 393
394 /* Check for fwmark addressed entries */ 394 /* Check for fwmark addressed entries */
@@ -489,11 +489,11 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
489/* 489/*
490 * Returns hash value for real service 490 * Returns hash value for real service
491 */ 491 */
492static inline unsigned ip_vs_rs_hashkey(int af, 492static inline unsigned int ip_vs_rs_hashkey(int af,
493 const union nf_inet_addr *addr, 493 const union nf_inet_addr *addr,
494 __be16 port) 494 __be16 port)
495{ 495{
496 register unsigned porth = ntohs(port); 496 register unsigned int porth = ntohs(port);
497 __be32 addr_fold = addr->ip; 497 __be32 addr_fold = addr->ip;
498 498
499#ifdef CONFIG_IP_VS_IPV6 499#ifdef CONFIG_IP_VS_IPV6
@@ -512,7 +512,7 @@ static inline unsigned ip_vs_rs_hashkey(int af,
512 */ 512 */
513static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) 513static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
514{ 514{
515 unsigned hash; 515 unsigned int hash;
516 516
517 if (!list_empty(&dest->d_list)) { 517 if (!list_empty(&dest->d_list)) {
518 return 0; 518 return 0;
@@ -555,7 +555,7 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
555 __be16 dport) 555 __be16 dport)
556{ 556{
557 struct netns_ipvs *ipvs = net_ipvs(net); 557 struct netns_ipvs *ipvs = net_ipvs(net);
558 unsigned hash; 558 unsigned int hash;
559 struct ip_vs_dest *dest; 559 struct ip_vs_dest *dest;
560 560
561 /* 561 /*
@@ -842,7 +842,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
842 struct ip_vs_dest **dest_p) 842 struct ip_vs_dest **dest_p)
843{ 843{
844 struct ip_vs_dest *dest; 844 struct ip_vs_dest *dest;
845 unsigned atype; 845 unsigned int atype;
846 846
847 EnterFunction(2); 847 EnterFunction(2);
848 848
@@ -1599,6 +1599,10 @@ static int ip_vs_zero_all(struct net *net)
1599} 1599}
1600 1600
1601#ifdef CONFIG_SYSCTL 1601#ifdef CONFIG_SYSCTL
1602
1603static int zero;
1604static int three = 3;
1605
1602static int 1606static int
1603proc_do_defense_mode(ctl_table *table, int write, 1607proc_do_defense_mode(ctl_table *table, int write,
1604 void __user *buffer, size_t *lenp, loff_t *ppos) 1608 void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -1632,7 +1636,8 @@ proc_do_sync_threshold(ctl_table *table, int write,
1632 memcpy(val, valp, sizeof(val)); 1636 memcpy(val, valp, sizeof(val));
1633 1637
1634 rc = proc_dointvec(table, write, buffer, lenp, ppos); 1638 rc = proc_dointvec(table, write, buffer, lenp, ppos);
1635 if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) { 1639 if (write && (valp[0] < 0 || valp[1] < 0 ||
1640 (valp[0] >= valp[1] && valp[1]))) {
1636 /* Restore the correct value */ 1641 /* Restore the correct value */
1637 memcpy(valp, val, sizeof(val)); 1642 memcpy(valp, val, sizeof(val));
1638 } 1643 }
@@ -1652,9 +1657,24 @@ proc_do_sync_mode(ctl_table *table, int write,
1652 if ((*valp < 0) || (*valp > 1)) { 1657 if ((*valp < 0) || (*valp > 1)) {
1653 /* Restore the correct value */ 1658 /* Restore the correct value */
1654 *valp = val; 1659 *valp = val;
1655 } else { 1660 }
1656 struct net *net = current->nsproxy->net_ns; 1661 }
1657 ip_vs_sync_switch_mode(net, val); 1662 return rc;
1663}
1664
1665static int
1666proc_do_sync_ports(ctl_table *table, int write,
1667 void __user *buffer, size_t *lenp, loff_t *ppos)
1668{
1669 int *valp = table->data;
1670 int val = *valp;
1671 int rc;
1672
1673 rc = proc_dointvec(table, write, buffer, lenp, ppos);
1674 if (write && (*valp != val)) {
1675 if (*valp < 1 || !is_power_of_2(*valp)) {
1676 /* Restore the correct value */
1677 *valp = val;
1658 } 1678 }
1659 } 1679 }
1660 return rc; 1680 return rc;
@@ -1718,6 +1738,24 @@ static struct ctl_table vs_vars[] = {
1718 .proc_handler = &proc_do_sync_mode, 1738 .proc_handler = &proc_do_sync_mode,
1719 }, 1739 },
1720 { 1740 {
1741 .procname = "sync_ports",
1742 .maxlen = sizeof(int),
1743 .mode = 0644,
1744 .proc_handler = &proc_do_sync_ports,
1745 },
1746 {
1747 .procname = "sync_qlen_max",
1748 .maxlen = sizeof(int),
1749 .mode = 0644,
1750 .proc_handler = proc_dointvec,
1751 },
1752 {
1753 .procname = "sync_sock_size",
1754 .maxlen = sizeof(int),
1755 .mode = 0644,
1756 .proc_handler = proc_dointvec,
1757 },
1758 {
1721 .procname = "cache_bypass", 1759 .procname = "cache_bypass",
1722 .maxlen = sizeof(int), 1760 .maxlen = sizeof(int),
1723 .mode = 0644, 1761 .mode = 0644,
@@ -1743,6 +1781,20 @@ static struct ctl_table vs_vars[] = {
1743 .proc_handler = proc_do_sync_threshold, 1781 .proc_handler = proc_do_sync_threshold,
1744 }, 1782 },
1745 { 1783 {
1784 .procname = "sync_refresh_period",
1785 .maxlen = sizeof(int),
1786 .mode = 0644,
1787 .proc_handler = proc_dointvec_jiffies,
1788 },
1789 {
1790 .procname = "sync_retries",
1791 .maxlen = sizeof(int),
1792 .mode = 0644,
1793 .proc_handler = proc_dointvec_minmax,
1794 .extra1 = &zero,
1795 .extra2 = &three,
1796 },
1797 {
1746 .procname = "nat_icmp_send", 1798 .procname = "nat_icmp_send",
1747 .maxlen = sizeof(int), 1799 .maxlen = sizeof(int),
1748 .mode = 0644, 1800 .mode = 0644,
@@ -1846,13 +1898,6 @@ static struct ctl_table vs_vars[] = {
1846 { } 1898 { }
1847}; 1899};
1848 1900
1849const struct ctl_path net_vs_ctl_path[] = {
1850 { .procname = "net", },
1851 { .procname = "ipv4", },
1852 { .procname = "vs", },
1853 { }
1854};
1855EXPORT_SYMBOL_GPL(net_vs_ctl_path);
1856#endif 1901#endif
1857 1902
1858#ifdef CONFIG_PROC_FS 1903#ifdef CONFIG_PROC_FS
@@ -1867,7 +1912,7 @@ struct ip_vs_iter {
1867 * Write the contents of the VS rule table to a PROCfs file. 1912 * Write the contents of the VS rule table to a PROCfs file.
1868 * (It is kept just for backward compatibility) 1913 * (It is kept just for backward compatibility)
1869 */ 1914 */
1870static inline const char *ip_vs_fwd_name(unsigned flags) 1915static inline const char *ip_vs_fwd_name(unsigned int flags)
1871{ 1916{
1872 switch (flags & IP_VS_CONN_F_FWD_MASK) { 1917 switch (flags & IP_VS_CONN_F_FWD_MASK) {
1873 case IP_VS_CONN_F_LOCALNODE: 1918 case IP_VS_CONN_F_LOCALNODE:
@@ -2816,17 +2861,17 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
2816 2861
2817 ip_vs_copy_stats(&ustats, stats); 2862 ip_vs_copy_stats(&ustats, stats);
2818 2863
2819 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns); 2864 if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
2820 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts); 2865 nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
2821 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts); 2866 nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
2822 NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes); 2867 nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
2823 NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes); 2868 nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
2824 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps); 2869 nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
2825 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps); 2870 nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
2826 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps); 2871 nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
2827 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps); 2872 nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
2828 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps); 2873 nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
2829 2874 goto nla_put_failure;
2830 nla_nest_end(skb, nl_stats); 2875 nla_nest_end(skb, nl_stats);
2831 2876
2832 return 0; 2877 return 0;
@@ -2847,23 +2892,25 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2847 if (!nl_service) 2892 if (!nl_service)
2848 return -EMSGSIZE; 2893 return -EMSGSIZE;
2849 2894
2850 NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af); 2895 if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
2851 2896 goto nla_put_failure;
2852 if (svc->fwmark) { 2897 if (svc->fwmark) {
2853 NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark); 2898 if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
2899 goto nla_put_failure;
2854 } else { 2900 } else {
2855 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); 2901 if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
2856 NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr); 2902 nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
2857 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port); 2903 nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port))
2904 goto nla_put_failure;
2858 } 2905 }
2859 2906
2860 NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name); 2907 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) ||
2861 if (svc->pe) 2908 (svc->pe &&
2862 NLA_PUT_STRING(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name); 2909 nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) ||
2863 NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags); 2910 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
2864 NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ); 2911 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
2865 NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask); 2912 nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
2866 2913 goto nla_put_failure;
2867 if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats)) 2914 if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
2868 goto nla_put_failure; 2915 goto nla_put_failure;
2869 2916
@@ -3038,21 +3085,22 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
3038 if (!nl_dest) 3085 if (!nl_dest)
3039 return -EMSGSIZE; 3086 return -EMSGSIZE;
3040 3087
3041 NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr); 3088 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
3042 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); 3089 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
3043 3090 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
3044 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, 3091 (atomic_read(&dest->conn_flags) &
3045 atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); 3092 IP_VS_CONN_F_FWD_MASK)) ||
3046 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); 3093 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
3047 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); 3094 atomic_read(&dest->weight)) ||
3048 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); 3095 nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
3049 NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, 3096 nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
3050 atomic_read(&dest->activeconns)); 3097 nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
3051 NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS, 3098 atomic_read(&dest->activeconns)) ||
3052 atomic_read(&dest->inactconns)); 3099 nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
3053 NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, 3100 atomic_read(&dest->inactconns)) ||
3054 atomic_read(&dest->persistconns)); 3101 nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
3055 3102 atomic_read(&dest->persistconns)))
3103 goto nla_put_failure;
3056 if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) 3104 if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
3057 goto nla_put_failure; 3105 goto nla_put_failure;
3058 3106
@@ -3181,10 +3229,10 @@ static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
3181 if (!nl_daemon) 3229 if (!nl_daemon)
3182 return -EMSGSIZE; 3230 return -EMSGSIZE;
3183 3231
3184 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state); 3232 if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
3185 NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn); 3233 nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
3186 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid); 3234 nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
3187 3235 goto nla_put_failure;
3188 nla_nest_end(skb, nl_daemon); 3236 nla_nest_end(skb, nl_daemon);
3189 3237
3190 return 0; 3238 return 0;
@@ -3473,21 +3521,26 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
3473 3521
3474 __ip_vs_get_timeouts(net, &t); 3522 __ip_vs_get_timeouts(net, &t);
3475#ifdef CONFIG_IP_VS_PROTO_TCP 3523#ifdef CONFIG_IP_VS_PROTO_TCP
3476 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); 3524 if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
3477 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, 3525 t.tcp_timeout) ||
3478 t.tcp_fin_timeout); 3526 nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
3527 t.tcp_fin_timeout))
3528 goto nla_put_failure;
3479#endif 3529#endif
3480#ifdef CONFIG_IP_VS_PROTO_UDP 3530#ifdef CONFIG_IP_VS_PROTO_UDP
3481 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout); 3531 if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
3532 goto nla_put_failure;
3482#endif 3533#endif
3483 3534
3484 break; 3535 break;
3485 } 3536 }
3486 3537
3487 case IPVS_CMD_GET_INFO: 3538 case IPVS_CMD_GET_INFO:
3488 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); 3539 if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
3489 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, 3540 IP_VS_VERSION_CODE) ||
3490 ip_vs_conn_tab_size); 3541 nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
3542 ip_vs_conn_tab_size))
3543 goto nla_put_failure;
3491 break; 3544 break;
3492 } 3545 }
3493 3546
@@ -3654,6 +3707,12 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3654 tbl[idx++].data = &ipvs->sysctl_snat_reroute; 3707 tbl[idx++].data = &ipvs->sysctl_snat_reroute;
3655 ipvs->sysctl_sync_ver = 1; 3708 ipvs->sysctl_sync_ver = 1;
3656 tbl[idx++].data = &ipvs->sysctl_sync_ver; 3709 tbl[idx++].data = &ipvs->sysctl_sync_ver;
3710 ipvs->sysctl_sync_ports = 1;
3711 tbl[idx++].data = &ipvs->sysctl_sync_ports;
3712 ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32;
3713 tbl[idx++].data = &ipvs->sysctl_sync_qlen_max;
3714 ipvs->sysctl_sync_sock_size = 0;
3715 tbl[idx++].data = &ipvs->sysctl_sync_sock_size;
3657 tbl[idx++].data = &ipvs->sysctl_cache_bypass; 3716 tbl[idx++].data = &ipvs->sysctl_cache_bypass;
3658 tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn; 3717 tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
3659 tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template; 3718 tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
@@ -3661,11 +3720,14 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3661 ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; 3720 ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
3662 tbl[idx].data = &ipvs->sysctl_sync_threshold; 3721 tbl[idx].data = &ipvs->sysctl_sync_threshold;
3663 tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); 3722 tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
3723 ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
3724 tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
3725 ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
3726 tbl[idx++].data = &ipvs->sysctl_sync_retries;
3664 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; 3727 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
3665 3728
3666 3729
3667 ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path, 3730 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
3668 tbl);
3669 if (ipvs->sysctl_hdr == NULL) { 3731 if (ipvs->sysctl_hdr == NULL) {
3670 if (!net_eq(net, &init_net)) 3732 if (!net_eq(net, &init_net))
3671 kfree(tbl); 3733 kfree(tbl);
@@ -3680,7 +3742,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3680 return 0; 3742 return 0;
3681} 3743}
3682 3744
3683void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) 3745void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3684{ 3746{
3685 struct netns_ipvs *ipvs = net_ipvs(net); 3747 struct netns_ipvs *ipvs = net_ipvs(net);
3686 3748
@@ -3692,7 +3754,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
3692#else 3754#else
3693 3755
3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; } 3756int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
3695void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { } 3757void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
3696 3758
3697#endif 3759#endif
3698 3760
@@ -3750,21 +3812,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3750 free_percpu(ipvs->tot_stats.cpustats); 3812 free_percpu(ipvs->tot_stats.cpustats);
3751} 3813}
3752 3814
3753int __init ip_vs_control_init(void) 3815int __init ip_vs_register_nl_ioctl(void)
3754{ 3816{
3755 int idx;
3756 int ret; 3817 int ret;
3757 3818
3758 EnterFunction(2);
3759
3760 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3761 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3762 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3763 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3764 }
3765
3766 smp_wmb(); /* Do we really need it now ? */
3767
3768 ret = nf_register_sockopt(&ip_vs_sockopts); 3819 ret = nf_register_sockopt(&ip_vs_sockopts);
3769 if (ret) { 3820 if (ret) {
3770 pr_err("cannot register sockopt.\n"); 3821 pr_err("cannot register sockopt.\n");
@@ -3776,28 +3827,47 @@ int __init ip_vs_control_init(void)
3776 pr_err("cannot register Generic Netlink interface.\n"); 3827 pr_err("cannot register Generic Netlink interface.\n");
3777 goto err_genl; 3828 goto err_genl;
3778 } 3829 }
3779
3780 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3781 if (ret < 0)
3782 goto err_notf;
3783
3784 LeaveFunction(2);
3785 return 0; 3830 return 0;
3786 3831
3787err_notf:
3788 ip_vs_genl_unregister();
3789err_genl: 3832err_genl:
3790 nf_unregister_sockopt(&ip_vs_sockopts); 3833 nf_unregister_sockopt(&ip_vs_sockopts);
3791err_sock: 3834err_sock:
3792 return ret; 3835 return ret;
3793} 3836}
3794 3837
3838void ip_vs_unregister_nl_ioctl(void)
3839{
3840 ip_vs_genl_unregister();
3841 nf_unregister_sockopt(&ip_vs_sockopts);
3842}
3843
3844int __init ip_vs_control_init(void)
3845{
3846 int idx;
3847 int ret;
3848
3849 EnterFunction(2);
3850
3851 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3852 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3853 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3854 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3855 }
3856
3857 smp_wmb(); /* Do we really need it now ? */
3858
3859 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3860 if (ret < 0)
3861 return ret;
3862
3863 LeaveFunction(2);
3864 return 0;
3865}
3866
3795 3867
3796void ip_vs_control_cleanup(void) 3868void ip_vs_control_cleanup(void)
3797{ 3869{
3798 EnterFunction(2); 3870 EnterFunction(2);
3799 unregister_netdevice_notifier(&ip_vs_dst_notifier); 3871 unregister_netdevice_notifier(&ip_vs_dst_notifier);
3800 ip_vs_genl_unregister();
3801 nf_unregister_sockopt(&ip_vs_sockopts);
3802 LeaveFunction(2); 3872 LeaveFunction(2);
3803} 3873}
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 1c269e56200a..8b7dca9ea422 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -68,7 +68,7 @@ struct ip_vs_dh_bucket {
68/* 68/*
69 * Returns hash value for IPVS DH entry 69 * Returns hash value for IPVS DH entry
70 */ 70 */
71static inline unsigned ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr) 71static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
72{ 72{
73 __be32 addr_fold = addr->ip; 73 __be32 addr_fold = addr->ip;
74 74
@@ -149,7 +149,7 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
149 149
150 /* allocate the DH table for this service */ 150 /* allocate the DH table for this service */
151 tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, 151 tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE,
152 GFP_ATOMIC); 152 GFP_KERNEL);
153 if (tbl == NULL) 153 if (tbl == NULL)
154 return -ENOMEM; 154 return -ENOMEM;
155 155
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74ee4f68..b20b29c903ef 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -177,7 +177,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
177 __be16 port; 177 __be16 port;
178 struct ip_vs_conn *n_cp; 178 struct ip_vs_conn *n_cp;
179 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ 179 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
180 unsigned buf_len; 180 unsigned int buf_len;
181 int ret = 0; 181 int ret = 0;
182 enum ip_conntrack_info ctinfo; 182 enum ip_conntrack_info ctinfo;
183 struct nf_conn *ct; 183 struct nf_conn *ct;
@@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
439 struct ip_vs_app *app; 439 struct ip_vs_app *app;
440 struct netns_ipvs *ipvs = net_ipvs(net); 440 struct netns_ipvs *ipvs = net_ipvs(net);
441 441
442 if (!ipvs)
443 return -ENOENT;
442 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); 444 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
443 if (!app) 445 if (!app)
444 return -ENOMEM; 446 return -ENOMEM;
@@ -483,7 +485,7 @@ static struct pernet_operations ip_vs_ftp_ops = {
483 .exit = __ip_vs_ftp_exit, 485 .exit = __ip_vs_ftp_exit,
484}; 486};
485 487
486int __init ip_vs_ftp_init(void) 488static int __init ip_vs_ftp_init(void)
487{ 489{
488 int rv; 490 int rv;
489 491
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283fd058..df646ccf08a7 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -142,7 +142,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
142/* 142/*
143 * Returns hash value for IPVS LBLC entry 143 * Returns hash value for IPVS LBLC entry
144 */ 144 */
145static inline unsigned 145static inline unsigned int
146ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr) 146ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
147{ 147{
148 __be32 addr_fold = addr->ip; 148 __be32 addr_fold = addr->ip;
@@ -163,7 +163,7 @@ ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
163static void 163static void
164ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) 164ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
165{ 165{
166 unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr); 166 unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
167 167
168 list_add(&en->list, &tbl->bucket[hash]); 168 list_add(&en->list, &tbl->bucket[hash]);
169 atomic_inc(&tbl->entries); 169 atomic_inc(&tbl->entries);
@@ -178,7 +178,7 @@ static inline struct ip_vs_lblc_entry *
178ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, 178ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
179 const union nf_inet_addr *addr) 179 const union nf_inet_addr *addr)
180{ 180{
181 unsigned hash = ip_vs_lblc_hashkey(af, addr); 181 unsigned int hash = ip_vs_lblc_hashkey(af, addr);
182 struct ip_vs_lblc_entry *en; 182 struct ip_vs_lblc_entry *en;
183 183
184 list_for_each_entry(en, &tbl->bucket[hash], list) 184 list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -342,7 +342,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
342 /* 342 /*
343 * Allocate the ip_vs_lblc_table for this service 343 * Allocate the ip_vs_lblc_table for this service
344 */ 344 */
345 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); 345 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
346 if (tbl == NULL) 346 if (tbl == NULL)
347 return -ENOMEM; 347 return -ENOMEM;
348 348
@@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
551{ 551{
552 struct netns_ipvs *ipvs = net_ipvs(net); 552 struct netns_ipvs *ipvs = net_ipvs(net);
553 553
554 if (!ipvs)
555 return -ENOENT;
556
554 if (!net_eq(net, &init_net)) { 557 if (!net_eq(net, &init_net)) {
555 ipvs->lblc_ctl_table = kmemdup(vs_vars_table, 558 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
556 sizeof(vs_vars_table), 559 sizeof(vs_vars_table),
@@ -563,8 +566,7 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
563 ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration; 566 ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
564 567
565 ipvs->lblc_ctl_header = 568 ipvs->lblc_ctl_header =
566 register_net_sysctl_table(net, net_vs_ctl_path, 569 register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table);
567 ipvs->lblc_ctl_table);
568 if (!ipvs->lblc_ctl_header) { 570 if (!ipvs->lblc_ctl_header) {
569 if (!net_eq(net, &init_net)) 571 if (!net_eq(net, &init_net))
570 kfree(ipvs->lblc_ctl_table); 572 kfree(ipvs->lblc_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f8cce7..570e31ea427a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -311,7 +311,7 @@ static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
311/* 311/*
312 * Returns hash value for IPVS LBLCR entry 312 * Returns hash value for IPVS LBLCR entry
313 */ 313 */
314static inline unsigned 314static inline unsigned int
315ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr) 315ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
316{ 316{
317 __be32 addr_fold = addr->ip; 317 __be32 addr_fold = addr->ip;
@@ -332,7 +332,7 @@ ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
332static void 332static void
333ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) 333ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
334{ 334{
335 unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr); 335 unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
336 336
337 list_add(&en->list, &tbl->bucket[hash]); 337 list_add(&en->list, &tbl->bucket[hash]);
338 atomic_inc(&tbl->entries); 338 atomic_inc(&tbl->entries);
@@ -347,7 +347,7 @@ static inline struct ip_vs_lblcr_entry *
347ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, 347ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
348 const union nf_inet_addr *addr) 348 const union nf_inet_addr *addr)
349{ 349{
350 unsigned hash = ip_vs_lblcr_hashkey(af, addr); 350 unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
351 struct ip_vs_lblcr_entry *en; 351 struct ip_vs_lblcr_entry *en;
352 352
353 list_for_each_entry(en, &tbl->bucket[hash], list) 353 list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -511,7 +511,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
511 /* 511 /*
512 * Allocate the ip_vs_lblcr_table for this service 512 * Allocate the ip_vs_lblcr_table for this service
513 */ 513 */
514 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); 514 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
515 if (tbl == NULL) 515 if (tbl == NULL)
516 return -ENOMEM; 516 return -ENOMEM;
517 517
@@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
745{ 745{
746 struct netns_ipvs *ipvs = net_ipvs(net); 746 struct netns_ipvs *ipvs = net_ipvs(net);
747 747
748 if (!ipvs)
749 return -ENOENT;
750
748 if (!net_eq(net, &init_net)) { 751 if (!net_eq(net, &init_net)) {
749 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, 752 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
750 sizeof(vs_vars_table), 753 sizeof(vs_vars_table),
@@ -757,8 +760,7 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
757 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration; 760 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
758 761
759 ipvs->lblcr_ctl_header = 762 ipvs->lblcr_ctl_header =
760 register_net_sysctl_table(net, net_vs_ctl_path, 763 register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table);
761 ipvs->lblcr_ctl_table);
762 if (!ipvs->lblcr_ctl_header) { 764 if (!ipvs->lblcr_ctl_header) {
763 if (!net_eq(net, &init_net)) 765 if (!net_eq(net, &init_net))
764 kfree(ipvs->lblcr_ctl_table); 766 kfree(ipvs->lblcr_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 85312939695f..50d82186da87 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -25,7 +25,6 @@
25#include <net/protocol.h> 25#include <net/protocol.h>
26#include <net/tcp.h> 26#include <net/tcp.h>
27#include <net/udp.h> 27#include <net/udp.h>
28#include <asm/system.h>
29#include <linux/stat.h> 28#include <linux/stat.h>
30#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
31 30
@@ -49,7 +48,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
49 */ 48 */
50static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) 49static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
51{ 50{
52 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 51 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
53 52
54 pp->next = ip_vs_proto_table[hash]; 53 pp->next = ip_vs_proto_table[hash];
55 ip_vs_proto_table[hash] = pp; 54 ip_vs_proto_table[hash] = pp;
@@ -60,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
60 return 0; 59 return 0;
61} 60}
62 61
63#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
64 defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
65 defined(CONFIG_IP_VS_PROTO_ESP)
66/* 62/*
67 * register an ipvs protocols netns related data 63 * register an ipvs protocols netns related data
68 */ 64 */
@@ -70,9 +66,9 @@ static int
70register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) 66register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
71{ 67{
72 struct netns_ipvs *ipvs = net_ipvs(net); 68 struct netns_ipvs *ipvs = net_ipvs(net);
73 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 69 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
74 struct ip_vs_proto_data *pd = 70 struct ip_vs_proto_data *pd =
75 kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); 71 kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL);
76 72
77 if (!pd) 73 if (!pd)
78 return -ENOMEM; 74 return -ENOMEM;
@@ -82,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
82 ipvs->proto_data_table[hash] = pd; 78 ipvs->proto_data_table[hash] = pd;
83 atomic_set(&pd->appcnt, 0); /* Init app counter */ 79 atomic_set(&pd->appcnt, 0); /* Init app counter */
84 80
85 if (pp->init_netns != NULL) 81 if (pp->init_netns != NULL) {
86 pp->init_netns(net, pd); 82 int ret = pp->init_netns(net, pd);
83 if (ret) {
84 /* unlink an free proto data */
85 ipvs->proto_data_table[hash] = pd->next;
86 kfree(pd);
87 return ret;
88 }
89 }
87 90
88 return 0; 91 return 0;
89} 92}
90#endif
91 93
92/* 94/*
93 * unregister an ipvs protocol 95 * unregister an ipvs protocol
@@ -95,7 +97,7 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
95static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) 97static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
96{ 98{
97 struct ip_vs_protocol **pp_p; 99 struct ip_vs_protocol **pp_p;
98 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 100 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
99 101
100 pp_p = &ip_vs_proto_table[hash]; 102 pp_p = &ip_vs_proto_table[hash];
101 for (; *pp_p; pp_p = &(*pp_p)->next) { 103 for (; *pp_p; pp_p = &(*pp_p)->next) {
@@ -118,7 +120,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
118{ 120{
119 struct netns_ipvs *ipvs = net_ipvs(net); 121 struct netns_ipvs *ipvs = net_ipvs(net);
120 struct ip_vs_proto_data **pd_p; 122 struct ip_vs_proto_data **pd_p;
121 unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol); 123 unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
122 124
123 pd_p = &ipvs->proto_data_table[hash]; 125 pd_p = &ipvs->proto_data_table[hash];
124 for (; *pd_p; pd_p = &(*pd_p)->next) { 126 for (; *pd_p; pd_p = &(*pd_p)->next) {
@@ -140,7 +142,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
140struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) 142struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
141{ 143{
142 struct ip_vs_protocol *pp; 144 struct ip_vs_protocol *pp;
143 unsigned hash = IP_VS_PROTO_HASH(proto); 145 unsigned int hash = IP_VS_PROTO_HASH(proto);
144 146
145 for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { 147 for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) {
146 if (pp->protocol == proto) 148 if (pp->protocol == proto)
@@ -154,11 +156,11 @@ EXPORT_SYMBOL(ip_vs_proto_get);
154/* 156/*
155 * get ip_vs_protocol object data by netns and proto 157 * get ip_vs_protocol object data by netns and proto
156 */ 158 */
157struct ip_vs_proto_data * 159static struct ip_vs_proto_data *
158__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) 160__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
159{ 161{
160 struct ip_vs_proto_data *pd; 162 struct ip_vs_proto_data *pd;
161 unsigned hash = IP_VS_PROTO_HASH(proto); 163 unsigned int hash = IP_VS_PROTO_HASH(proto);
162 164
163 for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { 165 for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
164 if (pd->pp->protocol == proto) 166 if (pd->pp->protocol == proto)
@@ -197,7 +199,7 @@ void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
197int * 199int *
198ip_vs_create_timeout_table(int *table, int size) 200ip_vs_create_timeout_table(int *table, int size)
199{ 201{
200 return kmemdup(table, size, GFP_ATOMIC); 202 return kmemdup(table, size, GFP_KERNEL);
201} 203}
202 204
203 205
@@ -317,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
317 */ 319 */
318int __net_init ip_vs_protocol_net_init(struct net *net) 320int __net_init ip_vs_protocol_net_init(struct net *net)
319{ 321{
322 int i, ret;
323 static struct ip_vs_protocol *protos[] = {
320#ifdef CONFIG_IP_VS_PROTO_TCP 324#ifdef CONFIG_IP_VS_PROTO_TCP
321 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); 325 &ip_vs_protocol_tcp,
322#endif 326#endif
323#ifdef CONFIG_IP_VS_PROTO_UDP 327#ifdef CONFIG_IP_VS_PROTO_UDP
324 register_ip_vs_proto_netns(net, &ip_vs_protocol_udp); 328 &ip_vs_protocol_udp,
325#endif 329#endif
326#ifdef CONFIG_IP_VS_PROTO_SCTP 330#ifdef CONFIG_IP_VS_PROTO_SCTP
327 register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp); 331 &ip_vs_protocol_sctp,
328#endif 332#endif
329#ifdef CONFIG_IP_VS_PROTO_AH 333#ifdef CONFIG_IP_VS_PROTO_AH
330 register_ip_vs_proto_netns(net, &ip_vs_protocol_ah); 334 &ip_vs_protocol_ah,
331#endif 335#endif
332#ifdef CONFIG_IP_VS_PROTO_ESP 336#ifdef CONFIG_IP_VS_PROTO_ESP
333 register_ip_vs_proto_netns(net, &ip_vs_protocol_esp); 337 &ip_vs_protocol_esp,
334#endif 338#endif
339 };
340
341 for (i = 0; i < ARRAY_SIZE(protos); i++) {
342 ret = register_ip_vs_proto_netns(net, protos[i]);
343 if (ret < 0)
344 goto cleanup;
345 }
335 return 0; 346 return 0;
347
348cleanup:
349 ip_vs_protocol_net_cleanup(net);
350 return ret;
336} 351}
337 352
338void __net_exit ip_vs_protocol_net_cleanup(struct net *net) 353void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2816f5..9f3fb751c491 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@ out:
1090 * timeouts is netns related now. 1090 * timeouts is netns related now.
1091 * --------------------------------------------- 1091 * ---------------------------------------------
1092 */ 1092 */
1093static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) 1093static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1094{ 1094{
1095 struct netns_ipvs *ipvs = net_ipvs(net); 1095 struct netns_ipvs *ipvs = net_ipvs(net);
1096 1096
@@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1098 spin_lock_init(&ipvs->sctp_app_lock); 1098 spin_lock_init(&ipvs->sctp_app_lock);
1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, 1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
1100 sizeof(sctp_timeouts)); 1100 sizeof(sctp_timeouts));
1101 if (!pd->timeout_table)
1102 return -ENOMEM;
1103 return 0;
1101} 1104}
1102 1105
1103static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) 1106static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f7af83..cd609cc62721 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
677 * timeouts is netns related now. 677 * timeouts is netns related now.
678 * --------------------------------------------- 678 * ---------------------------------------------
679 */ 679 */
680static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) 680static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
681{ 681{
682 struct netns_ipvs *ipvs = net_ipvs(net); 682 struct netns_ipvs *ipvs = net_ipvs(net);
683 683
@@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
685 spin_lock_init(&ipvs->tcp_app_lock); 685 spin_lock_init(&ipvs->tcp_app_lock);
686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, 686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
687 sizeof(tcp_timeouts)); 687 sizeof(tcp_timeouts));
688 if (!pd->timeout_table)
689 return -ENOMEM;
688 pd->tcp_state_table = tcp_states; 690 pd->tcp_state_table = tcp_states;
691 return 0;
689} 692}
690 693
691static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) 694static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262896bb..2fedb2dcb3d1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; 467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
468} 468}
469 469
470static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) 470static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
471{ 471{
472 struct netns_ipvs *ipvs = net_ipvs(net); 472 struct netns_ipvs *ipvs = net_ipvs(net);
473 473
@@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
475 spin_lock_init(&ipvs->udp_app_lock); 475 spin_lock_init(&ipvs->udp_app_lock);
476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, 476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
477 sizeof(udp_timeouts)); 477 sizeof(udp_timeouts));
478 if (!pd->timeout_table)
479 return -ENOMEM;
480 return 0;
478} 481}
479 482
480static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) 483static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 069e8d4d5c01..05126521743e 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -70,7 +70,7 @@ struct ip_vs_sh_bucket {
70/* 70/*
71 * Returns hash value for IPVS SH entry 71 * Returns hash value for IPVS SH entry
72 */ 72 */
73static inline unsigned ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr) 73static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr)
74{ 74{
75 __be32 addr_fold = addr->ip; 75 __be32 addr_fold = addr->ip;
76 76
@@ -162,7 +162,7 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
162 162
163 /* allocate the SH table for this service */ 163 /* allocate the SH table for this service */
164 tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, 164 tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE,
165 GFP_ATOMIC); 165 GFP_KERNEL);
166 if (tbl == NULL) 166 if (tbl == NULL)
167 return -ENOMEM; 167 return -ENOMEM;
168 168
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8a0d6d6889f0..effa10c9e4e3 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -196,6 +196,7 @@ struct ip_vs_sync_thread_data {
196 struct net *net; 196 struct net *net;
197 struct socket *sock; 197 struct socket *sock;
198 char *buf; 198 char *buf;
199 int id;
199}; 200};
200 201
201/* Version 0 definition of packet sizes */ 202/* Version 0 definition of packet sizes */
@@ -271,13 +272,6 @@ struct ip_vs_sync_buff {
271 unsigned char *end; 272 unsigned char *end;
272}; 273};
273 274
274/* multicast addr */
275static struct sockaddr_in mcast_addr = {
276 .sin_family = AF_INET,
277 .sin_port = cpu_to_be16(IP_VS_SYNC_PORT),
278 .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
279};
280
281/* 275/*
282 * Copy of struct ip_vs_seq 276 * Copy of struct ip_vs_seq
283 * From unaligned network order to aligned host order 277 * From unaligned network order to aligned host order
@@ -300,18 +294,22 @@ static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
300 put_unaligned_be32(ho->previous_delta, &no->previous_delta); 294 put_unaligned_be32(ho->previous_delta, &no->previous_delta);
301} 295}
302 296
303static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs) 297static inline struct ip_vs_sync_buff *
298sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
304{ 299{
305 struct ip_vs_sync_buff *sb; 300 struct ip_vs_sync_buff *sb;
306 301
307 spin_lock_bh(&ipvs->sync_lock); 302 spin_lock_bh(&ipvs->sync_lock);
308 if (list_empty(&ipvs->sync_queue)) { 303 if (list_empty(&ms->sync_queue)) {
309 sb = NULL; 304 sb = NULL;
305 __set_current_state(TASK_INTERRUPTIBLE);
310 } else { 306 } else {
311 sb = list_entry(ipvs->sync_queue.next, 307 sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff,
312 struct ip_vs_sync_buff,
313 list); 308 list);
314 list_del(&sb->list); 309 list_del(&sb->list);
310 ms->sync_queue_len--;
311 if (!ms->sync_queue_len)
312 ms->sync_queue_delay = 0;
315 } 313 }
316 spin_unlock_bh(&ipvs->sync_lock); 314 spin_unlock_bh(&ipvs->sync_lock);
317 315
@@ -334,7 +332,7 @@ ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
334 kfree(sb); 332 kfree(sb);
335 return NULL; 333 return NULL;
336 } 334 }
337 sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */ 335 sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */
338 sb->mesg->version = SYNC_PROTO_VER; 336 sb->mesg->version = SYNC_PROTO_VER;
339 sb->mesg->syncid = ipvs->master_syncid; 337 sb->mesg->syncid = ipvs->master_syncid;
340 sb->mesg->size = sizeof(struct ip_vs_sync_mesg); 338 sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
@@ -353,14 +351,22 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
353 kfree(sb); 351 kfree(sb);
354} 352}
355 353
356static inline void sb_queue_tail(struct netns_ipvs *ipvs) 354static inline void sb_queue_tail(struct netns_ipvs *ipvs,
355 struct ipvs_master_sync_state *ms)
357{ 356{
358 struct ip_vs_sync_buff *sb = ipvs->sync_buff; 357 struct ip_vs_sync_buff *sb = ms->sync_buff;
359 358
360 spin_lock(&ipvs->sync_lock); 359 spin_lock(&ipvs->sync_lock);
361 if (ipvs->sync_state & IP_VS_STATE_MASTER) 360 if (ipvs->sync_state & IP_VS_STATE_MASTER &&
362 list_add_tail(&sb->list, &ipvs->sync_queue); 361 ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) {
363 else 362 if (!ms->sync_queue_len)
363 schedule_delayed_work(&ms->master_wakeup_work,
364 max(IPVS_SYNC_SEND_DELAY, 1));
365 ms->sync_queue_len++;
366 list_add_tail(&sb->list, &ms->sync_queue);
367 if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
368 wake_up_process(ms->master_thread);
369 } else
364 ip_vs_sync_buff_release(sb); 370 ip_vs_sync_buff_release(sb);
365 spin_unlock(&ipvs->sync_lock); 371 spin_unlock(&ipvs->sync_lock);
366} 372}
@@ -370,49 +376,26 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs)
370 * than the specified time or the specified time is zero. 376 * than the specified time or the specified time is zero.
371 */ 377 */
372static inline struct ip_vs_sync_buff * 378static inline struct ip_vs_sync_buff *
373get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time) 379get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms,
380 unsigned long time)
374{ 381{
375 struct ip_vs_sync_buff *sb; 382 struct ip_vs_sync_buff *sb;
376 383
377 spin_lock_bh(&ipvs->sync_buff_lock); 384 spin_lock_bh(&ipvs->sync_buff_lock);
378 if (ipvs->sync_buff && 385 sb = ms->sync_buff;
379 time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) { 386 if (sb && time_after_eq(jiffies - sb->firstuse, time)) {
380 sb = ipvs->sync_buff; 387 ms->sync_buff = NULL;
381 ipvs->sync_buff = NULL; 388 __set_current_state(TASK_RUNNING);
382 } else 389 } else
383 sb = NULL; 390 sb = NULL;
384 spin_unlock_bh(&ipvs->sync_buff_lock); 391 spin_unlock_bh(&ipvs->sync_buff_lock);
385 return sb; 392 return sb;
386} 393}
387 394
388/* 395static inline int
389 * Switch mode from sending version 0 or 1 396select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp)
390 * - must handle sync_buf
391 */
392void ip_vs_sync_switch_mode(struct net *net, int mode)
393{ 397{
394 struct netns_ipvs *ipvs = net_ipvs(net); 398 return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask;
395
396 if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
397 return;
398 if (mode == sysctl_sync_ver(ipvs) || !ipvs->sync_buff)
399 return;
400
401 spin_lock_bh(&ipvs->sync_buff_lock);
402 /* Buffer empty ? then let buf_create do the job */
403 if (ipvs->sync_buff->mesg->size <= sizeof(struct ip_vs_sync_mesg)) {
404 kfree(ipvs->sync_buff);
405 ipvs->sync_buff = NULL;
406 } else {
407 spin_lock_bh(&ipvs->sync_lock);
408 if (ipvs->sync_state & IP_VS_STATE_MASTER)
409 list_add_tail(&ipvs->sync_buff->list,
410 &ipvs->sync_queue);
411 else
412 ip_vs_sync_buff_release(ipvs->sync_buff);
413 spin_unlock_bh(&ipvs->sync_lock);
414 }
415 spin_unlock_bh(&ipvs->sync_buff_lock);
416} 399}
417 400
418/* 401/*
@@ -442,15 +425,101 @@ ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
442 return sb; 425 return sb;
443} 426}
444 427
428/* Check if conn should be synced.
429 * pkts: conn packets, use sysctl_sync_threshold to avoid packet check
430 * - (1) sync_refresh_period: reduce sync rate. Additionally, retry
431 * sync_retries times with period of sync_refresh_period/8
432 * - (2) if both sync_refresh_period and sync_period are 0 send sync only
433 * for state changes or only once when pkts matches sync_threshold
434 * - (3) templates: rate can be reduced only with sync_refresh_period or
435 * with (2)
436 */
437static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs,
438 struct ip_vs_conn *cp, int pkts)
439{
440 unsigned long orig = ACCESS_ONCE(cp->sync_endtime);
441 unsigned long now = jiffies;
442 unsigned long n = (now + cp->timeout) & ~3UL;
443 unsigned int sync_refresh_period;
444 int sync_period;
445 int force;
446
447 /* Check if we sync in current state */
448 if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE))
449 force = 0;
450 else if (likely(cp->protocol == IPPROTO_TCP)) {
451 if (!((1 << cp->state) &
452 ((1 << IP_VS_TCP_S_ESTABLISHED) |
453 (1 << IP_VS_TCP_S_FIN_WAIT) |
454 (1 << IP_VS_TCP_S_CLOSE) |
455 (1 << IP_VS_TCP_S_CLOSE_WAIT) |
456 (1 << IP_VS_TCP_S_TIME_WAIT))))
457 return 0;
458 force = cp->state != cp->old_state;
459 if (force && cp->state != IP_VS_TCP_S_ESTABLISHED)
460 goto set;
461 } else if (unlikely(cp->protocol == IPPROTO_SCTP)) {
462 if (!((1 << cp->state) &
463 ((1 << IP_VS_SCTP_S_ESTABLISHED) |
464 (1 << IP_VS_SCTP_S_CLOSED) |
465 (1 << IP_VS_SCTP_S_SHUT_ACK_CLI) |
466 (1 << IP_VS_SCTP_S_SHUT_ACK_SER))))
467 return 0;
468 force = cp->state != cp->old_state;
469 if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED)
470 goto set;
471 } else {
472 /* UDP or another protocol with single state */
473 force = 0;
474 }
475
476 sync_refresh_period = sysctl_sync_refresh_period(ipvs);
477 if (sync_refresh_period > 0) {
478 long diff = n - orig;
479 long min_diff = max(cp->timeout >> 1, 10UL * HZ);
480
481 /* Avoid sync if difference is below sync_refresh_period
482 * and below the half timeout.
483 */
484 if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) {
485 int retries = orig & 3;
486
487 if (retries >= sysctl_sync_retries(ipvs))
488 return 0;
489 if (time_before(now, orig - cp->timeout +
490 (sync_refresh_period >> 3)))
491 return 0;
492 n |= retries + 1;
493 }
494 }
495 sync_period = sysctl_sync_period(ipvs);
496 if (sync_period > 0) {
497 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) &&
498 pkts % sync_period != sysctl_sync_threshold(ipvs))
499 return 0;
500 } else if (sync_refresh_period <= 0 &&
501 pkts != sysctl_sync_threshold(ipvs))
502 return 0;
503
504set:
505 cp->old_state = cp->state;
506 n = cmpxchg(&cp->sync_endtime, orig, n);
507 return n == orig || force;
508}
509
445/* 510/*
446 * Version 0 , could be switched in by sys_ctl. 511 * Version 0 , could be switched in by sys_ctl.
447 * Add an ip_vs_conn information into the current sync_buff. 512 * Add an ip_vs_conn information into the current sync_buff.
448 */ 513 */
449void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) 514static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
515 int pkts)
450{ 516{
451 struct netns_ipvs *ipvs = net_ipvs(net); 517 struct netns_ipvs *ipvs = net_ipvs(net);
452 struct ip_vs_sync_mesg_v0 *m; 518 struct ip_vs_sync_mesg_v0 *m;
453 struct ip_vs_sync_conn_v0 *s; 519 struct ip_vs_sync_conn_v0 *s;
520 struct ip_vs_sync_buff *buff;
521 struct ipvs_master_sync_state *ms;
522 int id;
454 int len; 523 int len;
455 524
456 if (unlikely(cp->af != AF_INET)) 525 if (unlikely(cp->af != AF_INET))
@@ -459,21 +528,41 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
459 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) 528 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
460 return; 529 return;
461 530
531 if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
532 return;
533
462 spin_lock(&ipvs->sync_buff_lock); 534 spin_lock(&ipvs->sync_buff_lock);
463 if (!ipvs->sync_buff) { 535 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
464 ipvs->sync_buff = 536 spin_unlock(&ipvs->sync_buff_lock);
465 ip_vs_sync_buff_create_v0(ipvs); 537 return;
466 if (!ipvs->sync_buff) { 538 }
539
540 id = select_master_thread_id(ipvs, cp);
541 ms = &ipvs->ms[id];
542 buff = ms->sync_buff;
543 if (buff) {
544 m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
545 /* Send buffer if it is for v1 */
546 if (!m->nr_conns) {
547 sb_queue_tail(ipvs, ms);
548 ms->sync_buff = NULL;
549 buff = NULL;
550 }
551 }
552 if (!buff) {
553 buff = ip_vs_sync_buff_create_v0(ipvs);
554 if (!buff) {
467 spin_unlock(&ipvs->sync_buff_lock); 555 spin_unlock(&ipvs->sync_buff_lock);
468 pr_err("ip_vs_sync_buff_create failed.\n"); 556 pr_err("ip_vs_sync_buff_create failed.\n");
469 return; 557 return;
470 } 558 }
559 ms->sync_buff = buff;
471 } 560 }
472 561
473 len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : 562 len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
474 SIMPLE_CONN_SIZE; 563 SIMPLE_CONN_SIZE;
475 m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg; 564 m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
476 s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head; 565 s = (struct ip_vs_sync_conn_v0 *) buff->head;
477 566
478 /* copy members */ 567 /* copy members */
479 s->reserved = 0; 568 s->reserved = 0;
@@ -494,18 +583,24 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
494 583
495 m->nr_conns++; 584 m->nr_conns++;
496 m->size += len; 585 m->size += len;
497 ipvs->sync_buff->head += len; 586 buff->head += len;
498 587
499 /* check if there is a space for next one */ 588 /* check if there is a space for next one */
500 if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) { 589 if (buff->head + FULL_CONN_SIZE > buff->end) {
501 sb_queue_tail(ipvs); 590 sb_queue_tail(ipvs, ms);
502 ipvs->sync_buff = NULL; 591 ms->sync_buff = NULL;
503 } 592 }
504 spin_unlock(&ipvs->sync_buff_lock); 593 spin_unlock(&ipvs->sync_buff_lock);
505 594
506 /* synchronize its controller if it has */ 595 /* synchronize its controller if it has */
507 if (cp->control) 596 cp = cp->control;
508 ip_vs_sync_conn(net, cp->control); 597 if (cp) {
598 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
599 pkts = atomic_add_return(1, &cp->in_pkts);
600 else
601 pkts = sysctl_sync_threshold(ipvs);
602 ip_vs_sync_conn(net, cp->control, pkts);
603 }
509} 604}
510 605
511/* 606/*
@@ -513,23 +608,29 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
513 * Called by ip_vs_in. 608 * Called by ip_vs_in.
514 * Sending Version 1 messages 609 * Sending Version 1 messages
515 */ 610 */
516void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp) 611void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts)
517{ 612{
518 struct netns_ipvs *ipvs = net_ipvs(net); 613 struct netns_ipvs *ipvs = net_ipvs(net);
519 struct ip_vs_sync_mesg *m; 614 struct ip_vs_sync_mesg *m;
520 union ip_vs_sync_conn *s; 615 union ip_vs_sync_conn *s;
616 struct ip_vs_sync_buff *buff;
617 struct ipvs_master_sync_state *ms;
618 int id;
521 __u8 *p; 619 __u8 *p;
522 unsigned int len, pe_name_len, pad; 620 unsigned int len, pe_name_len, pad;
523 621
524 /* Handle old version of the protocol */ 622 /* Handle old version of the protocol */
525 if (sysctl_sync_ver(ipvs) == 0) { 623 if (sysctl_sync_ver(ipvs) == 0) {
526 ip_vs_sync_conn_v0(net, cp); 624 ip_vs_sync_conn_v0(net, cp, pkts);
527 return; 625 return;
528 } 626 }
529 /* Do not sync ONE PACKET */ 627 /* Do not sync ONE PACKET */
530 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) 628 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
531 goto control; 629 goto control;
532sloop: 630sloop:
631 if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
632 goto control;
633
533 /* Sanity checks */ 634 /* Sanity checks */
534 pe_name_len = 0; 635 pe_name_len = 0;
535 if (cp->pe_data_len) { 636 if (cp->pe_data_len) {
@@ -541,6 +642,13 @@ sloop:
541 } 642 }
542 643
543 spin_lock(&ipvs->sync_buff_lock); 644 spin_lock(&ipvs->sync_buff_lock);
645 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
646 spin_unlock(&ipvs->sync_buff_lock);
647 return;
648 }
649
650 id = select_master_thread_id(ipvs, cp);
651 ms = &ipvs->ms[id];
544 652
545#ifdef CONFIG_IP_VS_IPV6 653#ifdef CONFIG_IP_VS_IPV6
546 if (cp->af == AF_INET6) 654 if (cp->af == AF_INET6)
@@ -559,27 +667,32 @@ sloop:
559 667
560 /* check if there is a space for this one */ 668 /* check if there is a space for this one */
561 pad = 0; 669 pad = 0;
562 if (ipvs->sync_buff) { 670 buff = ms->sync_buff;
563 pad = (4 - (size_t)ipvs->sync_buff->head) & 3; 671 if (buff) {
564 if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) { 672 m = buff->mesg;
565 sb_queue_tail(ipvs); 673 pad = (4 - (size_t) buff->head) & 3;
566 ipvs->sync_buff = NULL; 674 /* Send buffer if it is for v0 */
675 if (buff->head + len + pad > buff->end || m->reserved) {
676 sb_queue_tail(ipvs, ms);
677 ms->sync_buff = NULL;
678 buff = NULL;
567 pad = 0; 679 pad = 0;
568 } 680 }
569 } 681 }
570 682
571 if (!ipvs->sync_buff) { 683 if (!buff) {
572 ipvs->sync_buff = ip_vs_sync_buff_create(ipvs); 684 buff = ip_vs_sync_buff_create(ipvs);
573 if (!ipvs->sync_buff) { 685 if (!buff) {
574 spin_unlock(&ipvs->sync_buff_lock); 686 spin_unlock(&ipvs->sync_buff_lock);
575 pr_err("ip_vs_sync_buff_create failed.\n"); 687 pr_err("ip_vs_sync_buff_create failed.\n");
576 return; 688 return;
577 } 689 }
690 ms->sync_buff = buff;
691 m = buff->mesg;
578 } 692 }
579 693
580 m = ipvs->sync_buff->mesg; 694 p = buff->head;
581 p = ipvs->sync_buff->head; 695 buff->head += pad + len;
582 ipvs->sync_buff->head += pad + len;
583 m->size += pad + len; 696 m->size += pad + len;
584 /* Add ev. padding from prev. sync_conn */ 697 /* Add ev. padding from prev. sync_conn */
585 while (pad--) 698 while (pad--)
@@ -644,16 +757,10 @@ control:
644 cp = cp->control; 757 cp = cp->control;
645 if (!cp) 758 if (!cp)
646 return; 759 return;
647 /* 760 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
648 * Reduce sync rate for templates 761 pkts = atomic_add_return(1, &cp->in_pkts);
649 * i.e only increment in_pkts for Templates. 762 else
650 */ 763 pkts = sysctl_sync_threshold(ipvs);
651 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
652 int pkts = atomic_add_return(1, &cp->in_pkts);
653
654 if (pkts % sysctl_sync_period(ipvs) != 1)
655 return;
656 }
657 goto sloop; 764 goto sloop;
658} 765}
659 766
@@ -731,9 +838,32 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
731 else 838 else
732 cp = ip_vs_ct_in_get(param); 839 cp = ip_vs_ct_in_get(param);
733 840
734 if (cp && param->pe_data) /* Free pe_data */ 841 if (cp) {
842 /* Free pe_data */
735 kfree(param->pe_data); 843 kfree(param->pe_data);
736 if (!cp) { 844
845 dest = cp->dest;
846 spin_lock(&cp->lock);
847 if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE &&
848 !(flags & IP_VS_CONN_F_TEMPLATE) && dest) {
849 if (flags & IP_VS_CONN_F_INACTIVE) {
850 atomic_dec(&dest->activeconns);
851 atomic_inc(&dest->inactconns);
852 } else {
853 atomic_inc(&dest->activeconns);
854 atomic_dec(&dest->inactconns);
855 }
856 }
857 flags &= IP_VS_CONN_F_BACKUP_UPD_MASK;
858 flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK;
859 cp->flags = flags;
860 spin_unlock(&cp->lock);
861 if (!dest) {
862 dest = ip_vs_try_bind_dest(cp);
863 if (dest)
864 atomic_dec(&dest->refcnt);
865 }
866 } else {
737 /* 867 /*
738 * Find the appropriate destination for the connection. 868 * Find the appropriate destination for the connection.
739 * If it is not found the connection will remain unbound 869 * If it is not found the connection will remain unbound
@@ -742,18 +872,6 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
742 dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, 872 dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
743 param->vport, protocol, fwmark, flags); 873 param->vport, protocol, fwmark, flags);
744 874
745 /* Set the approprite ativity flag */
746 if (protocol == IPPROTO_TCP) {
747 if (state != IP_VS_TCP_S_ESTABLISHED)
748 flags |= IP_VS_CONN_F_INACTIVE;
749 else
750 flags &= ~IP_VS_CONN_F_INACTIVE;
751 } else if (protocol == IPPROTO_SCTP) {
752 if (state != IP_VS_SCTP_S_ESTABLISHED)
753 flags |= IP_VS_CONN_F_INACTIVE;
754 else
755 flags &= ~IP_VS_CONN_F_INACTIVE;
756 }
757 cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); 875 cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
758 if (dest) 876 if (dest)
759 atomic_dec(&dest->refcnt); 877 atomic_dec(&dest->refcnt);
@@ -763,34 +881,6 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
763 IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); 881 IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
764 return; 882 return;
765 } 883 }
766 } else if (!cp->dest) {
767 dest = ip_vs_try_bind_dest(cp);
768 if (dest)
769 atomic_dec(&dest->refcnt);
770 } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
771 (cp->state != state)) {
772 /* update active/inactive flag for the connection */
773 dest = cp->dest;
774 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
775 (state != IP_VS_TCP_S_ESTABLISHED)) {
776 atomic_dec(&dest->activeconns);
777 atomic_inc(&dest->inactconns);
778 cp->flags |= IP_VS_CONN_F_INACTIVE;
779 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
780 (state == IP_VS_TCP_S_ESTABLISHED)) {
781 atomic_inc(&dest->activeconns);
782 atomic_dec(&dest->inactconns);
783 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
784 }
785 } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
786 (cp->state != state)) {
787 dest = cp->dest;
788 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
789 (state != IP_VS_SCTP_S_ESTABLISHED)) {
790 atomic_dec(&dest->activeconns);
791 atomic_inc(&dest->inactconns);
792 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
793 }
794 } 884 }
795 885
796 if (opt) 886 if (opt)
@@ -839,7 +929,7 @@ static void ip_vs_process_message_v0(struct net *net, const char *buffer,
839 929
840 p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); 930 p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
841 for (i=0; i<m->nr_conns; i++) { 931 for (i=0; i<m->nr_conns; i++) {
842 unsigned flags, state; 932 unsigned int flags, state;
843 933
844 if (p + SIMPLE_CONN_SIZE > buffer+buflen) { 934 if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
845 IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); 935 IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
@@ -1109,7 +1199,7 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer,
1109 1199
1110 for (i=0; i<nr_conns; i++) { 1200 for (i=0; i<nr_conns; i++) {
1111 union ip_vs_sync_conn *s; 1201 union ip_vs_sync_conn *s;
1112 unsigned size; 1202 unsigned int size;
1113 int retc; 1203 int retc;
1114 1204
1115 p = msg_end; 1205 p = msg_end;
@@ -1149,6 +1239,28 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer,
1149 1239
1150 1240
1151/* 1241/*
1242 * Setup sndbuf (mode=1) or rcvbuf (mode=0)
1243 */
1244static void set_sock_size(struct sock *sk, int mode, int val)
1245{
1246 /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */
1247 /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */
1248 lock_sock(sk);
1249 if (mode) {
1250 val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
1251 sysctl_wmem_max);
1252 sk->sk_sndbuf = val * 2;
1253 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1254 } else {
1255 val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
1256 sysctl_rmem_max);
1257 sk->sk_rcvbuf = val * 2;
1258 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1259 }
1260 release_sock(sk);
1261}
1262
1263/*
1152 * Setup loopback of outgoing multicasts on a sending socket 1264 * Setup loopback of outgoing multicasts on a sending socket
1153 */ 1265 */
1154static void set_mcast_loop(struct sock *sk, u_char loop) 1266static void set_mcast_loop(struct sock *sk, u_char loop)
@@ -1298,9 +1410,15 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
1298/* 1410/*
1299 * Set up sending multicast socket over UDP 1411 * Set up sending multicast socket over UDP
1300 */ 1412 */
1301static struct socket *make_send_sock(struct net *net) 1413static struct socket *make_send_sock(struct net *net, int id)
1302{ 1414{
1303 struct netns_ipvs *ipvs = net_ipvs(net); 1415 struct netns_ipvs *ipvs = net_ipvs(net);
1416 /* multicast addr */
1417 struct sockaddr_in mcast_addr = {
1418 .sin_family = AF_INET,
1419 .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id),
1420 .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
1421 };
1304 struct socket *sock; 1422 struct socket *sock;
1305 int result; 1423 int result;
1306 1424
@@ -1324,6 +1442,9 @@ static struct socket *make_send_sock(struct net *net)
1324 1442
1325 set_mcast_loop(sock->sk, 0); 1443 set_mcast_loop(sock->sk, 0);
1326 set_mcast_ttl(sock->sk, 1); 1444 set_mcast_ttl(sock->sk, 1);
1445 result = sysctl_sync_sock_size(ipvs);
1446 if (result > 0)
1447 set_sock_size(sock->sk, 1, result);
1327 1448
1328 result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn); 1449 result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
1329 if (result < 0) { 1450 if (result < 0) {
@@ -1349,9 +1470,15 @@ error:
1349/* 1470/*
1350 * Set up receiving multicast socket over UDP 1471 * Set up receiving multicast socket over UDP
1351 */ 1472 */
1352static struct socket *make_receive_sock(struct net *net) 1473static struct socket *make_receive_sock(struct net *net, int id)
1353{ 1474{
1354 struct netns_ipvs *ipvs = net_ipvs(net); 1475 struct netns_ipvs *ipvs = net_ipvs(net);
1476 /* multicast addr */
1477 struct sockaddr_in mcast_addr = {
1478 .sin_family = AF_INET,
1479 .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id),
1480 .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
1481 };
1355 struct socket *sock; 1482 struct socket *sock;
1356 int result; 1483 int result;
1357 1484
@@ -1368,7 +1495,10 @@ static struct socket *make_receive_sock(struct net *net)
1368 */ 1495 */
1369 sk_change_net(sock->sk, net); 1496 sk_change_net(sock->sk, net);
1370 /* it is equivalent to the REUSEADDR option in user-space */ 1497 /* it is equivalent to the REUSEADDR option in user-space */
1371 sock->sk->sk_reuse = 1; 1498 sock->sk->sk_reuse = SK_CAN_REUSE;
1499 result = sysctl_sync_sock_size(ipvs);
1500 if (result > 0)
1501 set_sock_size(sock->sk, 0, result);
1372 1502
1373 result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, 1503 result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
1374 sizeof(struct sockaddr)); 1504 sizeof(struct sockaddr));
@@ -1411,18 +1541,22 @@ ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
1411 return len; 1541 return len;
1412} 1542}
1413 1543
1414static void 1544static int
1415ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) 1545ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg)
1416{ 1546{
1417 int msize; 1547 int msize;
1548 int ret;
1418 1549
1419 msize = msg->size; 1550 msize = msg->size;
1420 1551
1421 /* Put size in network byte order */ 1552 /* Put size in network byte order */
1422 msg->size = htons(msg->size); 1553 msg->size = htons(msg->size);
1423 1554
1424 if (ip_vs_send_async(sock, (char *)msg, msize) != msize) 1555 ret = ip_vs_send_async(sock, (char *)msg, msize);
1425 pr_err("ip_vs_send_async error\n"); 1556 if (ret >= 0 || ret == -EAGAIN)
1557 return ret;
1558 pr_err("ip_vs_send_async error %d\n", ret);
1559 return 0;
1426} 1560}
1427 1561
1428static int 1562static int
@@ -1438,48 +1572,90 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
1438 iov.iov_base = buffer; 1572 iov.iov_base = buffer;
1439 iov.iov_len = (size_t)buflen; 1573 iov.iov_len = (size_t)buflen;
1440 1574
1441 len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0); 1575 len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT);
1442 1576
1443 if (len < 0) 1577 if (len < 0)
1444 return -1; 1578 return len;
1445 1579
1446 LeaveFunction(7); 1580 LeaveFunction(7);
1447 return len; 1581 return len;
1448} 1582}
1449 1583
1584/* Wakeup the master thread for sending */
1585static void master_wakeup_work_handler(struct work_struct *work)
1586{
1587 struct ipvs_master_sync_state *ms =
1588 container_of(work, struct ipvs_master_sync_state,
1589 master_wakeup_work.work);
1590 struct netns_ipvs *ipvs = ms->ipvs;
1591
1592 spin_lock_bh(&ipvs->sync_lock);
1593 if (ms->sync_queue_len &&
1594 ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
1595 ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
1596 wake_up_process(ms->master_thread);
1597 }
1598 spin_unlock_bh(&ipvs->sync_lock);
1599}
1600
1601/* Get next buffer to send */
1602static inline struct ip_vs_sync_buff *
1603next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
1604{
1605 struct ip_vs_sync_buff *sb;
1606
1607 sb = sb_dequeue(ipvs, ms);
1608 if (sb)
1609 return sb;
1610 /* Do not delay entries in buffer for more than 2 seconds */
1611 return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME);
1612}
1450 1613
1451static int sync_thread_master(void *data) 1614static int sync_thread_master(void *data)
1452{ 1615{
1453 struct ip_vs_sync_thread_data *tinfo = data; 1616 struct ip_vs_sync_thread_data *tinfo = data;
1454 struct netns_ipvs *ipvs = net_ipvs(tinfo->net); 1617 struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
1618 struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id];
1619 struct sock *sk = tinfo->sock->sk;
1455 struct ip_vs_sync_buff *sb; 1620 struct ip_vs_sync_buff *sb;
1456 1621
1457 pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " 1622 pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
1458 "syncid = %d\n", 1623 "syncid = %d, id = %d\n",
1459 ipvs->master_mcast_ifn, ipvs->master_syncid); 1624 ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id);
1460 1625
1461 while (!kthread_should_stop()) { 1626 for (;;) {
1462 while ((sb = sb_dequeue(ipvs))) { 1627 sb = next_sync_buff(ipvs, ms);
1463 ip_vs_send_sync_msg(tinfo->sock, sb->mesg); 1628 if (unlikely(kthread_should_stop()))
1464 ip_vs_sync_buff_release(sb); 1629 break;
1630 if (!sb) {
1631 schedule_timeout(IPVS_SYNC_CHECK_PERIOD);
1632 continue;
1465 } 1633 }
1466 1634 while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) {
1467 /* check if entries stay in ipvs->sync_buff for 2 seconds */ 1635 int ret = 0;
1468 sb = get_curr_sync_buff(ipvs, 2 * HZ); 1636
1469 if (sb) { 1637 __wait_event_interruptible(*sk_sleep(sk),
1470 ip_vs_send_sync_msg(tinfo->sock, sb->mesg); 1638 sock_writeable(sk) ||
1471 ip_vs_sync_buff_release(sb); 1639 kthread_should_stop(),
1640 ret);
1641 if (unlikely(kthread_should_stop()))
1642 goto done;
1472 } 1643 }
1473 1644 ip_vs_sync_buff_release(sb);
1474 schedule_timeout_interruptible(HZ);
1475 } 1645 }
1476 1646
1647done:
1648 __set_current_state(TASK_RUNNING);
1649 if (sb)
1650 ip_vs_sync_buff_release(sb);
1651
1477 /* clean up the sync_buff queue */ 1652 /* clean up the sync_buff queue */
1478 while ((sb = sb_dequeue(ipvs))) 1653 while ((sb = sb_dequeue(ipvs, ms)))
1479 ip_vs_sync_buff_release(sb); 1654 ip_vs_sync_buff_release(sb);
1655 __set_current_state(TASK_RUNNING);
1480 1656
1481 /* clean up the current sync_buff */ 1657 /* clean up the current sync_buff */
1482 sb = get_curr_sync_buff(ipvs, 0); 1658 sb = get_curr_sync_buff(ipvs, ms, 0);
1483 if (sb) 1659 if (sb)
1484 ip_vs_sync_buff_release(sb); 1660 ip_vs_sync_buff_release(sb);
1485 1661
@@ -1498,8 +1674,8 @@ static int sync_thread_backup(void *data)
1498 int len; 1674 int len;
1499 1675
1500 pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " 1676 pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
1501 "syncid = %d\n", 1677 "syncid = %d, id = %d\n",
1502 ipvs->backup_mcast_ifn, ipvs->backup_syncid); 1678 ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id);
1503 1679
1504 while (!kthread_should_stop()) { 1680 while (!kthread_should_stop()) {
1505 wait_event_interruptible(*sk_sleep(tinfo->sock->sk), 1681 wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -1511,7 +1687,8 @@ static int sync_thread_backup(void *data)
1511 len = ip_vs_receive(tinfo->sock, tinfo->buf, 1687 len = ip_vs_receive(tinfo->sock, tinfo->buf,
1512 ipvs->recv_mesg_maxlen); 1688 ipvs->recv_mesg_maxlen);
1513 if (len <= 0) { 1689 if (len <= 0) {
1514 pr_err("receiving message error\n"); 1690 if (len != -EAGAIN)
1691 pr_err("receiving message error\n");
1515 break; 1692 break;
1516 } 1693 }
1517 1694
@@ -1535,86 +1712,140 @@ static int sync_thread_backup(void *data)
1535int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) 1712int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
1536{ 1713{
1537 struct ip_vs_sync_thread_data *tinfo; 1714 struct ip_vs_sync_thread_data *tinfo;
1538 struct task_struct **realtask, *task; 1715 struct task_struct **array = NULL, *task;
1539 struct socket *sock; 1716 struct socket *sock;
1540 struct netns_ipvs *ipvs = net_ipvs(net); 1717 struct netns_ipvs *ipvs = net_ipvs(net);
1541 char *name, *buf = NULL; 1718 char *name;
1542 int (*threadfn)(void *data); 1719 int (*threadfn)(void *data);
1720 int id, count;
1543 int result = -ENOMEM; 1721 int result = -ENOMEM;
1544 1722
1545 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); 1723 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
1546 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", 1724 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
1547 sizeof(struct ip_vs_sync_conn_v0)); 1725 sizeof(struct ip_vs_sync_conn_v0));
1548 1726
1727 if (!ipvs->sync_state) {
1728 count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
1729 ipvs->threads_mask = count - 1;
1730 } else
1731 count = ipvs->threads_mask + 1;
1549 1732
1550 if (state == IP_VS_STATE_MASTER) { 1733 if (state == IP_VS_STATE_MASTER) {
1551 if (ipvs->master_thread) 1734 if (ipvs->ms)
1552 return -EEXIST; 1735 return -EEXIST;
1553 1736
1554 strlcpy(ipvs->master_mcast_ifn, mcast_ifn, 1737 strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
1555 sizeof(ipvs->master_mcast_ifn)); 1738 sizeof(ipvs->master_mcast_ifn));
1556 ipvs->master_syncid = syncid; 1739 ipvs->master_syncid = syncid;
1557 realtask = &ipvs->master_thread; 1740 name = "ipvs-m:%d:%d";
1558 name = "ipvs_master:%d";
1559 threadfn = sync_thread_master; 1741 threadfn = sync_thread_master;
1560 sock = make_send_sock(net);
1561 } else if (state == IP_VS_STATE_BACKUP) { 1742 } else if (state == IP_VS_STATE_BACKUP) {
1562 if (ipvs->backup_thread) 1743 if (ipvs->backup_threads)
1563 return -EEXIST; 1744 return -EEXIST;
1564 1745
1565 strlcpy(ipvs->backup_mcast_ifn, mcast_ifn, 1746 strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
1566 sizeof(ipvs->backup_mcast_ifn)); 1747 sizeof(ipvs->backup_mcast_ifn));
1567 ipvs->backup_syncid = syncid; 1748 ipvs->backup_syncid = syncid;
1568 realtask = &ipvs->backup_thread; 1749 name = "ipvs-b:%d:%d";
1569 name = "ipvs_backup:%d";
1570 threadfn = sync_thread_backup; 1750 threadfn = sync_thread_backup;
1571 sock = make_receive_sock(net);
1572 } else { 1751 } else {
1573 return -EINVAL; 1752 return -EINVAL;
1574 } 1753 }
1575 1754
1576 if (IS_ERR(sock)) { 1755 if (state == IP_VS_STATE_MASTER) {
1577 result = PTR_ERR(sock); 1756 struct ipvs_master_sync_state *ms;
1578 goto out;
1579 }
1580 1757
1581 set_sync_mesg_maxlen(net, state); 1758 ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
1582 if (state == IP_VS_STATE_BACKUP) { 1759 if (!ipvs->ms)
1583 buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL); 1760 goto out;
1584 if (!buf) 1761 ms = ipvs->ms;
1585 goto outsocket; 1762 for (id = 0; id < count; id++, ms++) {
1763 INIT_LIST_HEAD(&ms->sync_queue);
1764 ms->sync_queue_len = 0;
1765 ms->sync_queue_delay = 0;
1766 INIT_DELAYED_WORK(&ms->master_wakeup_work,
1767 master_wakeup_work_handler);
1768 ms->ipvs = ipvs;
1769 }
1770 } else {
1771 array = kzalloc(count * sizeof(struct task_struct *),
1772 GFP_KERNEL);
1773 if (!array)
1774 goto out;
1586 } 1775 }
1776 set_sync_mesg_maxlen(net, state);
1587 1777
1588 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); 1778 tinfo = NULL;
1589 if (!tinfo) 1779 for (id = 0; id < count; id++) {
1590 goto outbuf; 1780 if (state == IP_VS_STATE_MASTER)
1591 1781 sock = make_send_sock(net, id);
1592 tinfo->net = net; 1782 else
1593 tinfo->sock = sock; 1783 sock = make_receive_sock(net, id);
1594 tinfo->buf = buf; 1784 if (IS_ERR(sock)) {
1785 result = PTR_ERR(sock);
1786 goto outtinfo;
1787 }
1788 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
1789 if (!tinfo)
1790 goto outsocket;
1791 tinfo->net = net;
1792 tinfo->sock = sock;
1793 if (state == IP_VS_STATE_BACKUP) {
1794 tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen,
1795 GFP_KERNEL);
1796 if (!tinfo->buf)
1797 goto outtinfo;
1798 }
1799 tinfo->id = id;
1595 1800
1596 task = kthread_run(threadfn, tinfo, name, ipvs->gen); 1801 task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
1597 if (IS_ERR(task)) { 1802 if (IS_ERR(task)) {
1598 result = PTR_ERR(task); 1803 result = PTR_ERR(task);
1599 goto outtinfo; 1804 goto outtinfo;
1805 }
1806 tinfo = NULL;
1807 if (state == IP_VS_STATE_MASTER)
1808 ipvs->ms[id].master_thread = task;
1809 else
1810 array[id] = task;
1600 } 1811 }
1601 1812
1602 /* mark as active */ 1813 /* mark as active */
1603 *realtask = task; 1814
1815 if (state == IP_VS_STATE_BACKUP)
1816 ipvs->backup_threads = array;
1817 spin_lock_bh(&ipvs->sync_buff_lock);
1604 ipvs->sync_state |= state; 1818 ipvs->sync_state |= state;
1819 spin_unlock_bh(&ipvs->sync_buff_lock);
1605 1820
1606 /* increase the module use count */ 1821 /* increase the module use count */
1607 ip_vs_use_count_inc(); 1822 ip_vs_use_count_inc();
1608 1823
1609 return 0; 1824 return 0;
1610 1825
1611outtinfo:
1612 kfree(tinfo);
1613outbuf:
1614 kfree(buf);
1615outsocket: 1826outsocket:
1616 sk_release_kernel(sock->sk); 1827 sk_release_kernel(sock->sk);
1828
1829outtinfo:
1830 if (tinfo) {
1831 sk_release_kernel(tinfo->sock->sk);
1832 kfree(tinfo->buf);
1833 kfree(tinfo);
1834 }
1835 count = id;
1836 while (count-- > 0) {
1837 if (state == IP_VS_STATE_MASTER)
1838 kthread_stop(ipvs->ms[count].master_thread);
1839 else
1840 kthread_stop(array[count]);
1841 }
1842 kfree(array);
1843
1617out: 1844out:
1845 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
1846 kfree(ipvs->ms);
1847 ipvs->ms = NULL;
1848 }
1618 return result; 1849 return result;
1619} 1850}
1620 1851
@@ -1622,38 +1853,60 @@ out:
1622int stop_sync_thread(struct net *net, int state) 1853int stop_sync_thread(struct net *net, int state)
1623{ 1854{
1624 struct netns_ipvs *ipvs = net_ipvs(net); 1855 struct netns_ipvs *ipvs = net_ipvs(net);
1856 struct task_struct **array;
1857 int id;
1625 int retc = -EINVAL; 1858 int retc = -EINVAL;
1626 1859
1627 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); 1860 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
1628 1861
1629 if (state == IP_VS_STATE_MASTER) { 1862 if (state == IP_VS_STATE_MASTER) {
1630 if (!ipvs->master_thread) 1863 if (!ipvs->ms)
1631 return -ESRCH; 1864 return -ESRCH;
1632 1865
1633 pr_info("stopping master sync thread %d ...\n",
1634 task_pid_nr(ipvs->master_thread));
1635
1636 /* 1866 /*
1637 * The lock synchronizes with sb_queue_tail(), so that we don't 1867 * The lock synchronizes with sb_queue_tail(), so that we don't
1638 * add sync buffers to the queue, when we are already in 1868 * add sync buffers to the queue, when we are already in
1639 * progress of stopping the master sync daemon. 1869 * progress of stopping the master sync daemon.
1640 */ 1870 */
1641 1871
1642 spin_lock_bh(&ipvs->sync_lock); 1872 spin_lock_bh(&ipvs->sync_buff_lock);
1873 spin_lock(&ipvs->sync_lock);
1643 ipvs->sync_state &= ~IP_VS_STATE_MASTER; 1874 ipvs->sync_state &= ~IP_VS_STATE_MASTER;
1644 spin_unlock_bh(&ipvs->sync_lock); 1875 spin_unlock(&ipvs->sync_lock);
1645 retc = kthread_stop(ipvs->master_thread); 1876 spin_unlock_bh(&ipvs->sync_buff_lock);
1646 ipvs->master_thread = NULL; 1877
1878 retc = 0;
1879 for (id = ipvs->threads_mask; id >= 0; id--) {
1880 struct ipvs_master_sync_state *ms = &ipvs->ms[id];
1881 int ret;
1882
1883 pr_info("stopping master sync thread %d ...\n",
1884 task_pid_nr(ms->master_thread));
1885 cancel_delayed_work_sync(&ms->master_wakeup_work);
1886 ret = kthread_stop(ms->master_thread);
1887 if (retc >= 0)
1888 retc = ret;
1889 }
1890 kfree(ipvs->ms);
1891 ipvs->ms = NULL;
1647 } else if (state == IP_VS_STATE_BACKUP) { 1892 } else if (state == IP_VS_STATE_BACKUP) {
1648 if (!ipvs->backup_thread) 1893 if (!ipvs->backup_threads)
1649 return -ESRCH; 1894 return -ESRCH;
1650 1895
1651 pr_info("stopping backup sync thread %d ...\n",
1652 task_pid_nr(ipvs->backup_thread));
1653
1654 ipvs->sync_state &= ~IP_VS_STATE_BACKUP; 1896 ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
1655 retc = kthread_stop(ipvs->backup_thread); 1897 array = ipvs->backup_threads;
1656 ipvs->backup_thread = NULL; 1898 retc = 0;
1899 for (id = ipvs->threads_mask; id >= 0; id--) {
1900 int ret;
1901
1902 pr_info("stopping backup sync thread %d ...\n",
1903 task_pid_nr(array[id]));
1904 ret = kthread_stop(array[id]);
1905 if (retc >= 0)
1906 retc = ret;
1907 }
1908 kfree(array);
1909 ipvs->backup_threads = NULL;
1657 } 1910 }
1658 1911
1659 /* decrease the module use count */ 1912 /* decrease the module use count */
@@ -1670,13 +1923,8 @@ int __net_init ip_vs_sync_net_init(struct net *net)
1670 struct netns_ipvs *ipvs = net_ipvs(net); 1923 struct netns_ipvs *ipvs = net_ipvs(net);
1671 1924
1672 __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); 1925 __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
1673 INIT_LIST_HEAD(&ipvs->sync_queue);
1674 spin_lock_init(&ipvs->sync_lock); 1926 spin_lock_init(&ipvs->sync_lock);
1675 spin_lock_init(&ipvs->sync_buff_lock); 1927 spin_lock_init(&ipvs->sync_buff_lock);
1676
1677 ipvs->sync_mcast_addr.sin_family = AF_INET;
1678 ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
1679 ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
1680 return 0; 1928 return 0;
1681} 1929}
1682 1930
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index fd0d4e09876a..231be7dd547a 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -84,7 +84,7 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc)
84 /* 84 /*
85 * Allocate the mark variable for WRR scheduling 85 * Allocate the mark variable for WRR scheduling
86 */ 86 */
87 mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC); 87 mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_KERNEL);
88 if (mark == NULL) 88 if (mark == NULL)
89 return -ENOMEM; 89 return -ENOMEM;
90 90
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index f4f8cda05986..d61e0782a797 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -69,8 +69,8 @@ static int nf_conntrack_acct_init_sysctl(struct net *net)
69 69
70 table[0].data = &net->ct.sysctl_acct; 70 table[0].data = &net->ct.sysctl_acct;
71 71
72 net->ct.acct_sysctl_header = register_net_sysctl_table(net, 72 net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter",
73 nf_net_netfilter_sysctl_path, table); 73 table);
74 if (!net->ct.acct_sysctl_header) { 74 if (!net->ct.acct_sysctl_header) {
75 printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n"); 75 printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n");
76 goto out_register; 76 goto out_register;
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 13fd2c55e329..f2de8c55ac50 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -107,8 +107,7 @@ static int amanda_help(struct sk_buff *skb,
107 /* No data? */ 107 /* No data? */
108 dataoff = protoff + sizeof(struct udphdr); 108 dataoff = protoff + sizeof(struct udphdr);
109 if (dataoff >= skb->len) { 109 if (dataoff >= skb->len) {
110 if (net_ratelimit()) 110 net_err_ratelimited("amanda_help: skblen = %u\n", skb->len);
111 printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len);
112 return NF_ACCEPT; 111 return NF_ACCEPT;
113 } 112 }
114 113
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index fa4b82c8ae80..ac3af97cc468 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -44,6 +44,7 @@
44#include <net/netfilter/nf_conntrack_ecache.h> 44#include <net/netfilter/nf_conntrack_ecache.h>
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_timeout.h>
47#include <net/netfilter/nf_nat.h> 48#include <net/netfilter/nf_nat.h>
48#include <net/netfilter/nf_nat_core.h> 49#include <net/netfilter/nf_nat_core.h>
49 50
@@ -682,10 +683,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
682 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 683 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
683 if (!early_drop(net, hash_bucket(hash, net))) { 684 if (!early_drop(net, hash_bucket(hash, net))) {
684 atomic_dec(&net->ct.count); 685 atomic_dec(&net->ct.count);
685 if (net_ratelimit()) 686 net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
686 printk(KERN_WARNING
687 "nf_conntrack: table full, dropping"
688 " packet.\n");
689 return ERR_PTR(-ENOMEM); 687 return ERR_PTR(-ENOMEM);
690 } 688 }
691 } 689 }
@@ -734,6 +732,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
734 732
735#ifdef CONFIG_NF_CONNTRACK_ZONES 733#ifdef CONFIG_NF_CONNTRACK_ZONES
736out_free: 734out_free:
735 atomic_dec(&net->ct.count);
737 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 736 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
738 return ERR_PTR(-ENOMEM); 737 return ERR_PTR(-ENOMEM);
739#endif 738#endif
@@ -775,6 +774,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
775 struct nf_conntrack_ecache *ecache; 774 struct nf_conntrack_ecache *ecache;
776 struct nf_conntrack_expect *exp; 775 struct nf_conntrack_expect *exp;
777 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; 776 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
777 struct nf_conn_timeout *timeout_ext;
778 unsigned int *timeouts;
778 779
779 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 780 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
780 pr_debug("Can't invert tuple.\n"); 781 pr_debug("Can't invert tuple.\n");
@@ -786,12 +787,21 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
786 if (IS_ERR(ct)) 787 if (IS_ERR(ct))
787 return (struct nf_conntrack_tuple_hash *)ct; 788 return (struct nf_conntrack_tuple_hash *)ct;
788 789
789 if (!l4proto->new(ct, skb, dataoff)) { 790 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
791 if (timeout_ext)
792 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
793 else
794 timeouts = l4proto->get_timeouts(net);
795
796 if (!l4proto->new(ct, skb, dataoff, timeouts)) {
790 nf_conntrack_free(ct); 797 nf_conntrack_free(ct);
791 pr_debug("init conntrack: can't track with proto module\n"); 798 pr_debug("init conntrack: can't track with proto module\n");
792 return NULL; 799 return NULL;
793 } 800 }
794 801
802 if (timeout_ext)
803 nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC);
804
795 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 805 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
796 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 806 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
797 807
@@ -913,6 +923,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
913 enum ip_conntrack_info ctinfo; 923 enum ip_conntrack_info ctinfo;
914 struct nf_conntrack_l3proto *l3proto; 924 struct nf_conntrack_l3proto *l3proto;
915 struct nf_conntrack_l4proto *l4proto; 925 struct nf_conntrack_l4proto *l4proto;
926 struct nf_conn_timeout *timeout_ext;
927 unsigned int *timeouts;
916 unsigned int dataoff; 928 unsigned int dataoff;
917 u_int8_t protonum; 929 u_int8_t protonum;
918 int set_reply = 0; 930 int set_reply = 0;
@@ -977,7 +989,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
977 989
978 NF_CT_ASSERT(skb->nfct); 990 NF_CT_ASSERT(skb->nfct);
979 991
980 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); 992 /* Decide what timeout policy we want to apply to this flow. */
993 timeout_ext = nf_ct_timeout_find(ct);
994 if (timeout_ext)
995 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
996 else
997 timeouts = l4proto->get_timeouts(net);
998
999 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
981 if (ret <= 0) { 1000 if (ret <= 0) {
982 /* Invalid: inverse of the return code tells 1001 /* Invalid: inverse of the return code tells
983 * the netfilter core what to do */ 1002 * the netfilter core what to do */
@@ -1130,8 +1149,9 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1130int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 1149int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1131 const struct nf_conntrack_tuple *tuple) 1150 const struct nf_conntrack_tuple *tuple)
1132{ 1151{
1133 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); 1152 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1134 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); 1153 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1154 goto nla_put_failure;
1135 return 0; 1155 return 0;
1136 1156
1137nla_put_failure: 1157nla_put_failure:
@@ -1313,7 +1333,6 @@ static void nf_conntrack_cleanup_init_net(void)
1313 while (untrack_refs() > 0) 1333 while (untrack_refs() > 0)
1314 schedule(); 1334 schedule();
1315 1335
1316 nf_conntrack_helper_fini();
1317 nf_conntrack_proto_fini(); 1336 nf_conntrack_proto_fini();
1318#ifdef CONFIG_NF_CONNTRACK_ZONES 1337#ifdef CONFIG_NF_CONNTRACK_ZONES
1319 nf_ct_extend_unregister(&nf_ct_zone_extend); 1338 nf_ct_extend_unregister(&nf_ct_zone_extend);
@@ -1331,6 +1350,8 @@ static void nf_conntrack_cleanup_net(struct net *net)
1331 } 1350 }
1332 1351
1333 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1352 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1353 nf_conntrack_helper_fini(net);
1354 nf_conntrack_timeout_fini(net);
1334 nf_conntrack_ecache_fini(net); 1355 nf_conntrack_ecache_fini(net);
1335 nf_conntrack_tstamp_fini(net); 1356 nf_conntrack_tstamp_fini(net);
1336 nf_conntrack_acct_fini(net); 1357 nf_conntrack_acct_fini(net);
@@ -1480,10 +1501,6 @@ static int nf_conntrack_init_init_net(void)
1480 if (ret < 0) 1501 if (ret < 0)
1481 goto err_proto; 1502 goto err_proto;
1482 1503
1483 ret = nf_conntrack_helper_init();
1484 if (ret < 0)
1485 goto err_helper;
1486
1487#ifdef CONFIG_NF_CONNTRACK_ZONES 1504#ifdef CONFIG_NF_CONNTRACK_ZONES
1488 ret = nf_ct_extend_register(&nf_ct_zone_extend); 1505 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1489 if (ret < 0) 1506 if (ret < 0)
@@ -1501,10 +1518,8 @@ static int nf_conntrack_init_init_net(void)
1501 1518
1502#ifdef CONFIG_NF_CONNTRACK_ZONES 1519#ifdef CONFIG_NF_CONNTRACK_ZONES
1503err_extend: 1520err_extend:
1504 nf_conntrack_helper_fini();
1505#endif
1506err_helper:
1507 nf_conntrack_proto_fini(); 1521 nf_conntrack_proto_fini();
1522#endif
1508err_proto: 1523err_proto:
1509 return ret; 1524 return ret;
1510} 1525}
@@ -1562,9 +1577,19 @@ static int nf_conntrack_init_net(struct net *net)
1562 ret = nf_conntrack_ecache_init(net); 1577 ret = nf_conntrack_ecache_init(net);
1563 if (ret < 0) 1578 if (ret < 0)
1564 goto err_ecache; 1579 goto err_ecache;
1580 ret = nf_conntrack_timeout_init(net);
1581 if (ret < 0)
1582 goto err_timeout;
1583 ret = nf_conntrack_helper_init(net);
1584 if (ret < 0)
1585 goto err_helper;
1565 1586
1566 return 0; 1587 return 0;
1567 1588
1589err_helper:
1590 nf_conntrack_timeout_fini(net);
1591err_timeout:
1592 nf_conntrack_ecache_fini(net);
1568err_ecache: 1593err_ecache:
1569 nf_conntrack_tstamp_fini(net); 1594 nf_conntrack_tstamp_fini(net);
1570err_tstamp: 1595err_tstamp:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 14af6329bdda..e7be79e640de 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -32,9 +32,11 @@ static DEFINE_MUTEX(nf_ct_ecache_mutex);
32void nf_ct_deliver_cached_events(struct nf_conn *ct) 32void nf_ct_deliver_cached_events(struct nf_conn *ct)
33{ 33{
34 struct net *net = nf_ct_net(ct); 34 struct net *net = nf_ct_net(ct);
35 unsigned long events; 35 unsigned long events, missed;
36 struct nf_ct_event_notifier *notify; 36 struct nf_ct_event_notifier *notify;
37 struct nf_conntrack_ecache *e; 37 struct nf_conntrack_ecache *e;
38 struct nf_ct_event item;
39 int ret;
38 40
39 rcu_read_lock(); 41 rcu_read_lock();
40 notify = rcu_dereference(net->ct.nf_conntrack_event_cb); 42 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
@@ -47,31 +49,32 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
47 49
48 events = xchg(&e->cache, 0); 50 events = xchg(&e->cache, 0);
49 51
50 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) { 52 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
51 struct nf_ct_event item = { 53 goto out_unlock;
52 .ct = ct, 54
53 .pid = 0, 55 /* We make a copy of the missed event cache without taking
54 .report = 0 56 * the lock, thus we may send missed events twice. However,
55 }; 57 * this does not harm and it happens very rarely. */
56 int ret; 58 missed = e->missed;
57 /* We make a copy of the missed event cache without taking 59
58 * the lock, thus we may send missed events twice. However, 60 if (!((events | missed) & e->ctmask))
59 * this does not harm and it happens very rarely. */ 61 goto out_unlock;
60 unsigned long missed = e->missed; 62
61 63 item.ct = ct;
62 if (!((events | missed) & e->ctmask)) 64 item.pid = 0;
63 goto out_unlock; 65 item.report = 0;
64 66
65 ret = notify->fcn(events | missed, &item); 67 ret = notify->fcn(events | missed, &item);
66 if (unlikely(ret < 0 || missed)) { 68
67 spin_lock_bh(&ct->lock); 69 if (likely(ret >= 0 && !missed))
68 if (ret < 0) 70 goto out_unlock;
69 e->missed |= events; 71
70 else 72 spin_lock_bh(&ct->lock);
71 e->missed &= ~missed; 73 if (ret < 0)
72 spin_unlock_bh(&ct->lock); 74 e->missed |= events;
73 } 75 else
74 } 76 e->missed &= ~missed;
77 spin_unlock_bh(&ct->lock);
75 78
76out_unlock: 79out_unlock:
77 rcu_read_unlock(); 80 rcu_read_unlock();
@@ -81,7 +84,7 @@ EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
81int nf_conntrack_register_notifier(struct net *net, 84int nf_conntrack_register_notifier(struct net *net,
82 struct nf_ct_event_notifier *new) 85 struct nf_ct_event_notifier *new)
83{ 86{
84 int ret = 0; 87 int ret;
85 struct nf_ct_event_notifier *notify; 88 struct nf_ct_event_notifier *notify;
86 89
87 mutex_lock(&nf_ct_ecache_mutex); 90 mutex_lock(&nf_ct_ecache_mutex);
@@ -92,8 +95,7 @@ int nf_conntrack_register_notifier(struct net *net,
92 goto out_unlock; 95 goto out_unlock;
93 } 96 }
94 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new); 97 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
95 mutex_unlock(&nf_ct_ecache_mutex); 98 ret = 0;
96 return ret;
97 99
98out_unlock: 100out_unlock:
99 mutex_unlock(&nf_ct_ecache_mutex); 101 mutex_unlock(&nf_ct_ecache_mutex);
@@ -118,7 +120,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
118int nf_ct_expect_register_notifier(struct net *net, 120int nf_ct_expect_register_notifier(struct net *net,
119 struct nf_exp_event_notifier *new) 121 struct nf_exp_event_notifier *new)
120{ 122{
121 int ret = 0; 123 int ret;
122 struct nf_exp_event_notifier *notify; 124 struct nf_exp_event_notifier *notify;
123 125
124 mutex_lock(&nf_ct_ecache_mutex); 126 mutex_lock(&nf_ct_ecache_mutex);
@@ -129,8 +131,7 @@ int nf_ct_expect_register_notifier(struct net *net,
129 goto out_unlock; 131 goto out_unlock;
130 } 132 }
131 rcu_assign_pointer(net->ct.nf_expect_event_cb, new); 133 rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
132 mutex_unlock(&nf_ct_ecache_mutex); 134 ret = 0;
133 return ret;
134 135
135out_unlock: 136out_unlock:
136 mutex_unlock(&nf_ct_ecache_mutex); 137 mutex_unlock(&nf_ct_ecache_mutex);
@@ -196,8 +197,7 @@ static int nf_conntrack_event_init_sysctl(struct net *net)
196 table[1].data = &net->ct.sysctl_events_retry_timeout; 197 table[1].data = &net->ct.sysctl_events_retry_timeout;
197 198
198 net->ct.event_sysctl_header = 199 net->ct.event_sysctl_header =
199 register_net_sysctl_table(net, 200 register_net_sysctl(net, "net/netfilter", table);
200 nf_net_netfilter_sysctl_path, table);
201 if (!net->ct.event_sysctl_header) { 201 if (!net->ct.event_sysctl_header) {
202 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n"); 202 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
203 goto out_register; 203 goto out_register;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4147ba3f653c..45cf602a76bc 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -424,9 +424,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
424 } 424 }
425 425
426 if (net->ct.expect_count >= nf_ct_expect_max) { 426 if (net->ct.expect_count >= nf_ct_expect_max) {
427 if (net_ratelimit()) 427 net_warn_ratelimited("nf_conntrack: expectation table full\n");
428 printk(KERN_WARNING
429 "nf_conntrack: expectation table full\n");
430 ret = -EMFILE; 428 ret = -EMFILE;
431 } 429 }
432out: 430out:
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 722291f8af72..46d69d7f1bb4 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -605,8 +605,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
605 605
606 drop: 606 drop:
607 spin_unlock_bh(&nf_h323_lock); 607 spin_unlock_bh(&nf_h323_lock);
608 if (net_ratelimit()) 608 net_info_ratelimited("nf_ct_h245: packet dropped\n");
609 pr_info("nf_ct_h245: packet dropped\n");
610 return NF_DROP; 609 return NF_DROP;
611} 610}
612 611
@@ -1156,8 +1155,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
1156 1155
1157 drop: 1156 drop:
1158 spin_unlock_bh(&nf_h323_lock); 1157 spin_unlock_bh(&nf_h323_lock);
1159 if (net_ratelimit()) 1158 net_info_ratelimited("nf_ct_q931: packet dropped\n");
1160 pr_info("nf_ct_q931: packet dropped\n");
1161 return NF_DROP; 1159 return NF_DROP;
1162} 1160}
1163 1161
@@ -1230,7 +1228,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
1230 1228
1231/****************************************************************************/ 1229/****************************************************************************/
1232static int set_expect_timeout(struct nf_conntrack_expect *exp, 1230static int set_expect_timeout(struct nf_conntrack_expect *exp,
1233 unsigned timeout) 1231 unsigned int timeout)
1234{ 1232{
1235 if (!exp || !del_timer(&exp->timeout)) 1233 if (!exp || !del_timer(&exp->timeout))
1236 return 0; 1234 return 0;
@@ -1731,8 +1729,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
1731 1729
1732 drop: 1730 drop:
1733 spin_unlock_bh(&nf_h323_lock); 1731 spin_unlock_bh(&nf_h323_lock);
1734 if (net_ratelimit()) 1732 net_info_ratelimited("nf_ct_ras: packet dropped\n");
1735 pr_info("nf_ct_ras: packet dropped\n");
1736 return NF_DROP; 1733 return NF_DROP;
1737} 1734}
1738 1735
@@ -1833,4 +1830,6 @@ MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
1833MODULE_DESCRIPTION("H.323 connection tracking helper"); 1830MODULE_DESCRIPTION("H.323 connection tracking helper");
1834MODULE_LICENSE("GPL"); 1831MODULE_LICENSE("GPL");
1835MODULE_ALIAS("ip_conntrack_h323"); 1832MODULE_ALIAS("ip_conntrack_h323");
1836MODULE_ALIAS_NFCT_HELPER("h323"); 1833MODULE_ALIAS_NFCT_HELPER("RAS");
1834MODULE_ALIAS_NFCT_HELPER("Q.931");
1835MODULE_ALIAS_NFCT_HELPER("H.245");
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index bbe23baa19b6..4fa2ff961f5a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -34,6 +34,67 @@ static struct hlist_head *nf_ct_helper_hash __read_mostly;
34static unsigned int nf_ct_helper_hsize __read_mostly; 34static unsigned int nf_ct_helper_hsize __read_mostly;
35static unsigned int nf_ct_helper_count __read_mostly; 35static unsigned int nf_ct_helper_count __read_mostly;
36 36
37static bool nf_ct_auto_assign_helper __read_mostly = true;
38module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
39MODULE_PARM_DESC(nf_conntrack_helper,
40 "Enable automatic conntrack helper assignment (default 1)");
41
42#ifdef CONFIG_SYSCTL
43static struct ctl_table helper_sysctl_table[] = {
44 {
45 .procname = "nf_conntrack_helper",
46 .data = &init_net.ct.sysctl_auto_assign_helper,
47 .maxlen = sizeof(unsigned int),
48 .mode = 0644,
49 .proc_handler = proc_dointvec,
50 },
51 {}
52};
53
54static int nf_conntrack_helper_init_sysctl(struct net *net)
55{
56 struct ctl_table *table;
57
58 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
59 GFP_KERNEL);
60 if (!table)
61 goto out;
62
63 table[0].data = &net->ct.sysctl_auto_assign_helper;
64
65 net->ct.helper_sysctl_header =
66 register_net_sysctl(net, "net/netfilter", table);
67
68 if (!net->ct.helper_sysctl_header) {
69 pr_err("nf_conntrack_helper: can't register to sysctl.\n");
70 goto out_register;
71 }
72 return 0;
73
74out_register:
75 kfree(table);
76out:
77 return -ENOMEM;
78}
79
80static void nf_conntrack_helper_fini_sysctl(struct net *net)
81{
82 struct ctl_table *table;
83
84 table = net->ct.helper_sysctl_header->ctl_table_arg;
85 unregister_net_sysctl_table(net->ct.helper_sysctl_header);
86 kfree(table);
87}
88#else
89static int nf_conntrack_helper_init_sysctl(struct net *net)
90{
91 return 0;
92}
93
94static void nf_conntrack_helper_fini_sysctl(struct net *net)
95{
96}
97#endif /* CONFIG_SYSCTL */
37 98
38/* Stupid hash, but collision free for the default registrations of the 99/* Stupid hash, but collision free for the default registrations of the
39 * helpers currently in the kernel. */ 100 * helpers currently in the kernel. */
@@ -118,17 +179,38 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
118{ 179{
119 struct nf_conntrack_helper *helper = NULL; 180 struct nf_conntrack_helper *helper = NULL;
120 struct nf_conn_help *help; 181 struct nf_conn_help *help;
182 struct net *net = nf_ct_net(ct);
121 int ret = 0; 183 int ret = 0;
122 184
185 /* We already got a helper explicitly attached. The function
186 * nf_conntrack_alter_reply - in case NAT is in use - asks for looking
187 * the helper up again. Since now the user is in full control of
188 * making consistent helper configurations, skip this automatic
189 * re-lookup, otherwise we'll lose the helper.
190 */
191 if (test_bit(IPS_HELPER_BIT, &ct->status))
192 return 0;
193
123 if (tmpl != NULL) { 194 if (tmpl != NULL) {
124 help = nfct_help(tmpl); 195 help = nfct_help(tmpl);
125 if (help != NULL) 196 if (help != NULL) {
126 helper = help->helper; 197 helper = help->helper;
198 set_bit(IPS_HELPER_BIT, &ct->status);
199 }
127 } 200 }
128 201
129 help = nfct_help(ct); 202 help = nfct_help(ct);
130 if (helper == NULL) 203 if (net->ct.sysctl_auto_assign_helper && helper == NULL) {
131 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 204 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
205 if (unlikely(!net->ct.auto_assign_helper_warned && helper)) {
206 pr_info("nf_conntrack: automatic helper "
207 "assignment is deprecated and it will "
208 "be removed soon. Use the iptables CT target "
209 "to attach helpers instead.\n");
210 net->ct.auto_assign_helper_warned = true;
211 }
212 }
213
132 if (helper == NULL) { 214 if (helper == NULL) {
133 if (help) 215 if (help)
134 RCU_INIT_POINTER(help->helper, NULL); 216 RCU_INIT_POINTER(help->helper, NULL);
@@ -181,6 +263,60 @@ void nf_ct_helper_destroy(struct nf_conn *ct)
181 } 263 }
182} 264}
183 265
266static LIST_HEAD(nf_ct_helper_expectfn_list);
267
268void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n)
269{
270 spin_lock_bh(&nf_conntrack_lock);
271 list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
272 spin_unlock_bh(&nf_conntrack_lock);
273}
274EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register);
275
276void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
277{
278 spin_lock_bh(&nf_conntrack_lock);
279 list_del_rcu(&n->head);
280 spin_unlock_bh(&nf_conntrack_lock);
281}
282EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
283
284struct nf_ct_helper_expectfn *
285nf_ct_helper_expectfn_find_by_name(const char *name)
286{
287 struct nf_ct_helper_expectfn *cur;
288 bool found = false;
289
290 rcu_read_lock();
291 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
292 if (!strcmp(cur->name, name)) {
293 found = true;
294 break;
295 }
296 }
297 rcu_read_unlock();
298 return found ? cur : NULL;
299}
300EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
301
302struct nf_ct_helper_expectfn *
303nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
304{
305 struct nf_ct_helper_expectfn *cur;
306 bool found = false;
307
308 rcu_read_lock();
309 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
310 if (cur->expectfn == symbol) {
311 found = true;
312 break;
313 }
314 }
315 rcu_read_unlock();
316 return found ? cur : NULL;
317}
318EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
319
184int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 320int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
185{ 321{
186 unsigned int h = helper_hash(&me->tuple); 322 unsigned int h = helper_hash(&me->tuple);
@@ -261,28 +397,44 @@ static struct nf_ct_ext_type helper_extend __read_mostly = {
261 .id = NF_CT_EXT_HELPER, 397 .id = NF_CT_EXT_HELPER,
262}; 398};
263 399
264int nf_conntrack_helper_init(void) 400int nf_conntrack_helper_init(struct net *net)
265{ 401{
266 int err; 402 int err;
267 403
268 nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ 404 net->ct.auto_assign_helper_warned = false;
269 nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); 405 net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
270 if (!nf_ct_helper_hash) 406
271 return -ENOMEM; 407 if (net_eq(net, &init_net)) {
408 nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
409 nf_ct_helper_hash =
410 nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
411 if (!nf_ct_helper_hash)
412 return -ENOMEM;
413
414 err = nf_ct_extend_register(&helper_extend);
415 if (err < 0)
416 goto err1;
417 }
272 418
273 err = nf_ct_extend_register(&helper_extend); 419 err = nf_conntrack_helper_init_sysctl(net);
274 if (err < 0) 420 if (err < 0)
275 goto err1; 421 goto out_sysctl;
276 422
277 return 0; 423 return 0;
278 424
425out_sysctl:
426 if (net_eq(net, &init_net))
427 nf_ct_extend_unregister(&helper_extend);
279err1: 428err1:
280 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); 429 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
281 return err; 430 return err;
282} 431}
283 432
284void nf_conntrack_helper_fini(void) 433void nf_conntrack_helper_fini(struct net *net)
285{ 434{
286 nf_ct_extend_unregister(&helper_extend); 435 nf_conntrack_helper_fini_sysctl(net);
287 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); 436 if (net_eq(net, &init_net)) {
437 nf_ct_extend_unregister(&helper_extend);
438 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
439 }
288} 440}
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 4f9390b98697..81366c118271 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -185,11 +185,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
185 tuple = &ct->tuplehash[dir].tuple; 185 tuple = &ct->tuplehash[dir].tuple;
186 if (tuple->src.u3.ip != dcc_ip && 186 if (tuple->src.u3.ip != dcc_ip &&
187 tuple->dst.u3.ip != dcc_ip) { 187 tuple->dst.u3.ip != dcc_ip) {
188 if (net_ratelimit()) 188 net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
189 printk(KERN_WARNING 189 &tuple->src.u3.ip,
190 "Forged DCC command from %pI4: %pI4:%u\n", 190 &dcc_ip, dcc_port);
191 &tuple->src.u3.ip,
192 &dcc_ip, dcc_port);
193 continue; 191 continue;
194 } 192 }
195 193
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index b49da6c925b3..6f4b00a8fc73 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -66,7 +66,8 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb,
66 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); 66 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
67 if (!nest_parms) 67 if (!nest_parms)
68 goto nla_put_failure; 68 goto nla_put_failure;
69 NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum); 69 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
70 goto nla_put_failure;
70 71
71 if (likely(l4proto->tuple_to_nlattr)) 72 if (likely(l4proto->tuple_to_nlattr))
72 ret = l4proto->tuple_to_nlattr(skb, tuple); 73 ret = l4proto->tuple_to_nlattr(skb, tuple);
@@ -110,22 +111,24 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
110 struct nf_conntrack_l3proto *l3proto; 111 struct nf_conntrack_l3proto *l3proto;
111 struct nf_conntrack_l4proto *l4proto; 112 struct nf_conntrack_l4proto *l4proto;
112 113
114 rcu_read_lock();
113 l3proto = __nf_ct_l3proto_find(tuple->src.l3num); 115 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
114 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto); 116 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
115 117
116 if (unlikely(ret < 0)) 118 if (ret >= 0) {
117 return ret; 119 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
118 120 tuple->dst.protonum);
119 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); 121 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
120 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); 122 }
121 123 rcu_read_unlock();
122 return ret; 124 return ret;
123} 125}
124 126
125static inline int 127static inline int
126ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) 128ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
127{ 129{
128 NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status)); 130 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
131 goto nla_put_failure;
129 return 0; 132 return 0;
130 133
131nla_put_failure: 134nla_put_failure:
@@ -140,7 +143,8 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
140 if (timeout < 0) 143 if (timeout < 0)
141 timeout = 0; 144 timeout = 0;
142 145
143 NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout)); 146 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
147 goto nla_put_failure;
144 return 0; 148 return 0;
145 149
146nla_put_failure: 150nla_put_failure:
@@ -189,7 +193,8 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
189 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); 193 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
190 if (!nest_helper) 194 if (!nest_helper)
191 goto nla_put_failure; 195 goto nla_put_failure;
192 NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name); 196 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
197 goto nla_put_failure;
193 198
194 if (helper->to_nlattr) 199 if (helper->to_nlattr)
195 helper->to_nlattr(skb, ct); 200 helper->to_nlattr(skb, ct);
@@ -213,8 +218,9 @@ dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
213 if (!nest_count) 218 if (!nest_count)
214 goto nla_put_failure; 219 goto nla_put_failure;
215 220
216 NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)); 221 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
217 NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)); 222 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
223 goto nla_put_failure;
218 224
219 nla_nest_end(skb, nest_count); 225 nla_nest_end(skb, nest_count);
220 226
@@ -259,11 +265,10 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
259 if (!nest_count) 265 if (!nest_count)
260 goto nla_put_failure; 266 goto nla_put_failure;
261 267
262 NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)); 268 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
263 if (tstamp->stop != 0) { 269 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
264 NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP, 270 cpu_to_be64(tstamp->stop))))
265 cpu_to_be64(tstamp->stop)); 271 goto nla_put_failure;
266 }
267 nla_nest_end(skb, nest_count); 272 nla_nest_end(skb, nest_count);
268 273
269 return 0; 274 return 0;
@@ -276,7 +281,8 @@ nla_put_failure:
276static inline int 281static inline int
277ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) 282ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
278{ 283{
279 NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark)); 284 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
285 goto nla_put_failure;
280 return 0; 286 return 0;
281 287
282nla_put_failure: 288nla_put_failure:
@@ -303,7 +309,8 @@ ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
303 if (!nest_secctx) 309 if (!nest_secctx)
304 goto nla_put_failure; 310 goto nla_put_failure;
305 311
306 NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx); 312 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
313 goto nla_put_failure;
307 nla_nest_end(skb, nest_secctx); 314 nla_nest_end(skb, nest_secctx);
308 315
309 ret = 0; 316 ret = 0;
@@ -348,12 +355,13 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
348 if (!nest_parms) 355 if (!nest_parms)
349 goto nla_put_failure; 356 goto nla_put_failure;
350 357
351 NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, 358 if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
352 htonl(natseq->correction_pos)); 359 htonl(natseq->correction_pos)) ||
353 NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, 360 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
354 htonl(natseq->offset_before)); 361 htonl(natseq->offset_before)) ||
355 NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, 362 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
356 htonl(natseq->offset_after)); 363 htonl(natseq->offset_after)))
364 goto nla_put_failure;
357 365
358 nla_nest_end(skb, nest_parms); 366 nla_nest_end(skb, nest_parms);
359 367
@@ -389,7 +397,8 @@ ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
389static inline int 397static inline int
390ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) 398ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
391{ 399{
392 NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct)); 400 if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
401 goto nla_put_failure;
393 return 0; 402 return 0;
394 403
395nla_put_failure: 404nla_put_failure:
@@ -399,7 +408,8 @@ nla_put_failure:
399static inline int 408static inline int
400ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) 409ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
401{ 410{
402 NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); 411 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
412 goto nla_put_failure;
403 return 0; 413 return 0;
404 414
405nla_put_failure: 415nla_put_failure:
@@ -439,8 +449,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
439 goto nla_put_failure; 449 goto nla_put_failure;
440 nla_nest_end(skb, nest_parms); 450 nla_nest_end(skb, nest_parms);
441 451
442 if (nf_ct_zone(ct)) 452 if (nf_ct_zone(ct) &&
443 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); 453 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
454 goto nla_put_failure;
444 455
445 if (ctnetlink_dump_status(skb, ct) < 0 || 456 if (ctnetlink_dump_status(skb, ct) < 0 ||
446 ctnetlink_dump_timeout(skb, ct) < 0 || 457 ctnetlink_dump_timeout(skb, ct) < 0 ||
@@ -616,8 +627,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
616 goto nla_put_failure; 627 goto nla_put_failure;
617 nla_nest_end(skb, nest_parms); 628 nla_nest_end(skb, nest_parms);
618 629
619 if (nf_ct_zone(ct)) 630 if (nf_ct_zone(ct) &&
620 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); 631 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
632 goto nla_put_failure;
621 633
622 if (ctnetlink_dump_id(skb, ct) < 0) 634 if (ctnetlink_dump_id(skb, ct) < 0)
623 goto nla_put_failure; 635 goto nla_put_failure;
@@ -691,9 +703,18 @@ static int ctnetlink_done(struct netlink_callback *cb)
691{ 703{
692 if (cb->args[1]) 704 if (cb->args[1])
693 nf_ct_put((struct nf_conn *)cb->args[1]); 705 nf_ct_put((struct nf_conn *)cb->args[1]);
706 if (cb->data)
707 kfree(cb->data);
694 return 0; 708 return 0;
695} 709}
696 710
711struct ctnetlink_dump_filter {
712 struct {
713 u_int32_t val;
714 u_int32_t mask;
715 } mark;
716};
717
697static int 718static int
698ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 719ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
699{ 720{
@@ -703,6 +724,10 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
703 struct hlist_nulls_node *n; 724 struct hlist_nulls_node *n;
704 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 725 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
705 u_int8_t l3proto = nfmsg->nfgen_family; 726 u_int8_t l3proto = nfmsg->nfgen_family;
727 int res;
728#ifdef CONFIG_NF_CONNTRACK_MARK
729 const struct ctnetlink_dump_filter *filter = cb->data;
730#endif
706 731
707 spin_lock_bh(&nf_conntrack_lock); 732 spin_lock_bh(&nf_conntrack_lock);
708 last = (struct nf_conn *)cb->args[1]; 733 last = (struct nf_conn *)cb->args[1];
@@ -723,11 +748,20 @@ restart:
723 continue; 748 continue;
724 cb->args[1] = 0; 749 cb->args[1] = 0;
725 } 750 }
726 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 751#ifdef CONFIG_NF_CONNTRACK_MARK
727 cb->nlh->nlmsg_seq, 752 if (filter && !((ct->mark & filter->mark.mask) ==
728 NFNL_MSG_TYPE( 753 filter->mark.val)) {
729 cb->nlh->nlmsg_type), 754 continue;
730 ct) < 0) { 755 }
756#endif
757 rcu_read_lock();
758 res =
759 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
760 cb->nlh->nlmsg_seq,
761 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
762 ct);
763 rcu_read_unlock();
764 if (res < 0) {
731 nf_conntrack_get(&ct->ct_general); 765 nf_conntrack_get(&ct->ct_general);
732 cb->args[1] = (unsigned long)ct; 766 cb->args[1] = (unsigned long)ct;
733 goto out; 767 goto out;
@@ -894,6 +928,7 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
894 [CTA_NAT_DST] = { .type = NLA_NESTED }, 928 [CTA_NAT_DST] = { .type = NLA_NESTED },
895 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, 929 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
896 [CTA_ZONE] = { .type = NLA_U16 }, 930 [CTA_ZONE] = { .type = NLA_U16 },
931 [CTA_MARK_MASK] = { .type = NLA_U32 },
897}; 932};
898 933
899static int 934static int
@@ -978,9 +1013,28 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
978 u16 zone; 1013 u16 zone;
979 int err; 1014 int err;
980 1015
981 if (nlh->nlmsg_flags & NLM_F_DUMP) 1016 if (nlh->nlmsg_flags & NLM_F_DUMP) {
982 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 1017 struct netlink_dump_control c = {
983 ctnetlink_done, 0); 1018 .dump = ctnetlink_dump_table,
1019 .done = ctnetlink_done,
1020 };
1021#ifdef CONFIG_NF_CONNTRACK_MARK
1022 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1023 struct ctnetlink_dump_filter *filter;
1024
1025 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1026 GFP_ATOMIC);
1027 if (filter == NULL)
1028 return -ENOMEM;
1029
1030 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1031 filter->mark.mask =
1032 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1033 c.data = filter;
1034 }
1035#endif
1036 return netlink_dump_start(ctnl, skb, nlh, &c);
1037 }
984 1038
985 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 1039 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
986 if (err < 0) 1040 if (err < 0)
@@ -1610,14 +1664,16 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb,
1610 if (!nest_parms) 1664 if (!nest_parms)
1611 goto nla_put_failure; 1665 goto nla_put_failure;
1612 1666
1667 rcu_read_lock();
1613 l3proto = __nf_ct_l3proto_find(tuple->src.l3num); 1668 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
1614 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto); 1669 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
1615 1670 if (ret >= 0) {
1616 if (unlikely(ret < 0)) 1671 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
1617 goto nla_put_failure; 1672 tuple->dst.protonum);
1618
1619 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
1620 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); 1673 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
1674 }
1675 rcu_read_unlock();
1676
1621 if (unlikely(ret < 0)) 1677 if (unlikely(ret < 0))
1622 goto nla_put_failure; 1678 goto nla_put_failure;
1623 1679
@@ -1636,6 +1692,11 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1636 struct nf_conn *master = exp->master; 1692 struct nf_conn *master = exp->master;
1637 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ; 1693 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
1638 struct nf_conn_help *help; 1694 struct nf_conn_help *help;
1695#ifdef CONFIG_NF_NAT_NEEDED
1696 struct nlattr *nest_parms;
1697 struct nf_conntrack_tuple nat_tuple = {};
1698#endif
1699 struct nf_ct_helper_expectfn *expfn;
1639 1700
1640 if (timeout < 0) 1701 if (timeout < 0)
1641 timeout = 0; 1702 timeout = 0;
@@ -1649,17 +1710,44 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1649 CTA_EXPECT_MASTER) < 0) 1710 CTA_EXPECT_MASTER) < 0)
1650 goto nla_put_failure; 1711 goto nla_put_failure;
1651 1712
1652 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); 1713#ifdef CONFIG_NF_NAT_NEEDED
1653 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); 1714 if (exp->saved_ip || exp->saved_proto.all) {
1654 NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)); 1715 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
1716 if (!nest_parms)
1717 goto nla_put_failure;
1718
1719 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
1720 goto nla_put_failure;
1721
1722 nat_tuple.src.l3num = nf_ct_l3num(master);
1723 nat_tuple.src.u3.ip = exp->saved_ip;
1724 nat_tuple.dst.protonum = nf_ct_protonum(master);
1725 nat_tuple.src.u = exp->saved_proto;
1726
1727 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
1728 CTA_EXPECT_NAT_TUPLE) < 0)
1729 goto nla_put_failure;
1730 nla_nest_end(skb, nest_parms);
1731 }
1732#endif
1733 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
1734 nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
1735 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
1736 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
1737 goto nla_put_failure;
1655 help = nfct_help(master); 1738 help = nfct_help(master);
1656 if (help) { 1739 if (help) {
1657 struct nf_conntrack_helper *helper; 1740 struct nf_conntrack_helper *helper;
1658 1741
1659 helper = rcu_dereference(help->helper); 1742 helper = rcu_dereference(help->helper);
1660 if (helper) 1743 if (helper &&
1661 NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); 1744 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
1745 goto nla_put_failure;
1662 } 1746 }
1747 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
1748 if (expfn != NULL &&
1749 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
1750 goto nla_put_failure;
1663 1751
1664 return 0; 1752 return 0;
1665 1753
@@ -1817,6 +1905,9 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1817 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING }, 1905 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
1818 [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, 1906 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
1819 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, 1907 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
1908 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
1909 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
1910 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
1820}; 1911};
1821 1912
1822static int 1913static int
@@ -1834,9 +1925,11 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1834 int err; 1925 int err;
1835 1926
1836 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1927 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1837 return netlink_dump_start(ctnl, skb, nlh, 1928 struct netlink_dump_control c = {
1838 ctnetlink_exp_dump_table, 1929 .dump = ctnetlink_exp_dump_table,
1839 ctnetlink_exp_done, 0); 1930 .done = ctnetlink_exp_done,
1931 };
1932 return netlink_dump_start(ctnl, skb, nlh, &c);
1840 } 1933 }
1841 1934
1842 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 1935 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
@@ -1987,7 +2080,50 @@ static int
1987ctnetlink_change_expect(struct nf_conntrack_expect *x, 2080ctnetlink_change_expect(struct nf_conntrack_expect *x,
1988 const struct nlattr * const cda[]) 2081 const struct nlattr * const cda[])
1989{ 2082{
2083 if (cda[CTA_EXPECT_TIMEOUT]) {
2084 if (!del_timer(&x->timeout))
2085 return -ETIME;
2086
2087 x->timeout.expires = jiffies +
2088 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2089 add_timer(&x->timeout);
2090 }
2091 return 0;
2092}
2093
2094static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2095 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
2096 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
2097};
2098
2099static int
2100ctnetlink_parse_expect_nat(const struct nlattr *attr,
2101 struct nf_conntrack_expect *exp,
2102 u_int8_t u3)
2103{
2104#ifdef CONFIG_NF_NAT_NEEDED
2105 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2106 struct nf_conntrack_tuple nat_tuple = {};
2107 int err;
2108
2109 nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2110
2111 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2112 return -EINVAL;
2113
2114 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2115 &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2116 if (err < 0)
2117 return err;
2118
2119 exp->saved_ip = nat_tuple.src.u3.ip;
2120 exp->saved_proto = nat_tuple.src.u;
2121 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2122
2123 return 0;
2124#else
1990 return -EOPNOTSUPP; 2125 return -EOPNOTSUPP;
2126#endif
1991} 2127}
1992 2128
1993static int 2129static int
@@ -2001,6 +2137,8 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2001 struct nf_conntrack_expect *exp; 2137 struct nf_conntrack_expect *exp;
2002 struct nf_conn *ct; 2138 struct nf_conn *ct;
2003 struct nf_conn_help *help; 2139 struct nf_conn_help *help;
2140 struct nf_conntrack_helper *helper = NULL;
2141 u_int32_t class = 0;
2004 int err = 0; 2142 int err = 0;
2005 2143
2006 /* caller guarantees that those three CTA_EXPECT_* exist */ 2144 /* caller guarantees that those three CTA_EXPECT_* exist */
@@ -2019,6 +2157,40 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2019 if (!h) 2157 if (!h)
2020 return -ENOENT; 2158 return -ENOENT;
2021 ct = nf_ct_tuplehash_to_ctrack(h); 2159 ct = nf_ct_tuplehash_to_ctrack(h);
2160
2161 /* Look for helper of this expectation */
2162 if (cda[CTA_EXPECT_HELP_NAME]) {
2163 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2164
2165 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2166 nf_ct_protonum(ct));
2167 if (helper == NULL) {
2168#ifdef CONFIG_MODULES
2169 if (request_module("nfct-helper-%s", helpname) < 0) {
2170 err = -EOPNOTSUPP;
2171 goto out;
2172 }
2173
2174 helper = __nf_conntrack_helper_find(helpname,
2175 nf_ct_l3num(ct),
2176 nf_ct_protonum(ct));
2177 if (helper) {
2178 err = -EAGAIN;
2179 goto out;
2180 }
2181#endif
2182 err = -EOPNOTSUPP;
2183 goto out;
2184 }
2185 }
2186
2187 if (cda[CTA_EXPECT_CLASS] && helper) {
2188 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2189 if (class > helper->expect_class_max) {
2190 err = -EINVAL;
2191 goto out;
2192 }
2193 }
2022 exp = nf_ct_expect_alloc(ct); 2194 exp = nf_ct_expect_alloc(ct);
2023 if (!exp) { 2195 if (!exp) {
2024 err = -ENOMEM; 2196 err = -ENOMEM;
@@ -2045,18 +2217,35 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2045 } else 2217 } else
2046 exp->flags = 0; 2218 exp->flags = 0;
2047 } 2219 }
2220 if (cda[CTA_EXPECT_FN]) {
2221 const char *name = nla_data(cda[CTA_EXPECT_FN]);
2222 struct nf_ct_helper_expectfn *expfn;
2223
2224 expfn = nf_ct_helper_expectfn_find_by_name(name);
2225 if (expfn == NULL) {
2226 err = -EINVAL;
2227 goto err_out;
2228 }
2229 exp->expectfn = expfn->expectfn;
2230 } else
2231 exp->expectfn = NULL;
2048 2232
2049 exp->class = 0; 2233 exp->class = class;
2050 exp->expectfn = NULL;
2051 exp->master = ct; 2234 exp->master = ct;
2052 exp->helper = NULL; 2235 exp->helper = helper;
2053 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); 2236 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
2054 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3)); 2237 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
2055 exp->mask.src.u.all = mask.src.u.all; 2238 exp->mask.src.u.all = mask.src.u.all;
2056 2239
2240 if (cda[CTA_EXPECT_NAT]) {
2241 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2242 exp, u3);
2243 if (err < 0)
2244 goto err_out;
2245 }
2057 err = nf_ct_expect_related_report(exp, pid, report); 2246 err = nf_ct_expect_related_report(exp, pid, report);
2247err_out:
2058 nf_ct_expect_put(exp); 2248 nf_ct_expect_put(exp);
2059
2060out: 2249out:
2061 nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); 2250 nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
2062 return err; 2251 return err;
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 5701c8dd783c..8b631b07a645 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,11 +36,11 @@ static DEFINE_MUTEX(nf_ct_proto_mutex);
36 36
37#ifdef CONFIG_SYSCTL 37#ifdef CONFIG_SYSCTL
38static int 38static int
39nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path, 39nf_ct_register_sysctl(struct ctl_table_header **header, const char *path,
40 struct ctl_table *table, unsigned int *users) 40 struct ctl_table *table, unsigned int *users)
41{ 41{
42 if (*header == NULL) { 42 if (*header == NULL) {
43 *header = register_sysctl_paths(path, table); 43 *header = register_net_sysctl(&init_net, path, table);
44 if (*header == NULL) 44 if (*header == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46 } 46 }
@@ -56,7 +56,7 @@ nf_ct_unregister_sysctl(struct ctl_table_header **header,
56 if (users != NULL && --*users > 0) 56 if (users != NULL && --*users > 0)
57 return; 57 return;
58 58
59 unregister_sysctl_table(*header); 59 unregister_net_sysctl_table(*header);
60 *header = NULL; 60 *header = NULL;
61} 61}
62#endif 62#endif
@@ -127,6 +127,27 @@ void nf_ct_l3proto_module_put(unsigned short l3proto)
127} 127}
128EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put); 128EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
129 129
130struct nf_conntrack_l4proto *
131nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num)
132{
133 struct nf_conntrack_l4proto *p;
134
135 rcu_read_lock();
136 p = __nf_ct_l4proto_find(l3num, l4num);
137 if (!try_module_get(p->me))
138 p = &nf_conntrack_l4proto_generic;
139 rcu_read_unlock();
140
141 return p;
142}
143EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get);
144
145void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p)
146{
147 module_put(p->me);
148}
149EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
150
130static int kill_l3proto(struct nf_conn *i, void *data) 151static int kill_l3proto(struct nf_conn *i, void *data)
131{ 152{
132 return nf_ct_l3num(i) == ((struct nf_conntrack_l3proto *)data)->l3proto; 153 return nf_ct_l3num(i) == ((struct nf_conntrack_l3proto *)data)->l3proto;
@@ -229,7 +250,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
229#ifdef CONFIG_SYSCTL 250#ifdef CONFIG_SYSCTL
230 if (l4proto->ctl_table != NULL) { 251 if (l4proto->ctl_table != NULL) {
231 err = nf_ct_register_sysctl(l4proto->ctl_table_header, 252 err = nf_ct_register_sysctl(l4proto->ctl_table_header,
232 nf_net_netfilter_sysctl_path, 253 "net/netfilter",
233 l4proto->ctl_table, 254 l4proto->ctl_table,
234 l4proto->ctl_table_users); 255 l4proto->ctl_table_users);
235 if (err < 0) 256 if (err < 0)
@@ -238,7 +259,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
238#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT 259#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
239 if (l4proto->ctl_compat_table != NULL) { 260 if (l4proto->ctl_compat_table != NULL) {
240 err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header, 261 err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header,
241 nf_net_ipv4_netfilter_sysctl_path, 262 "net/ipv4/netfilter",
242 l4proto->ctl_compat_table, NULL); 263 l4proto->ctl_compat_table, NULL);
243 if (err == 0) 264 if (err == 0)
244 goto out; 265 goto out;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index d6dde6dc09e6..ef706a485be1 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -423,7 +423,7 @@ static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv,
423} 423}
424 424
425static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, 425static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
426 unsigned int dataoff) 426 unsigned int dataoff, unsigned int *timeouts)
427{ 427{
428 struct net *net = nf_ct_net(ct); 428 struct net *net = nf_ct_net(ct);
429 struct dccp_net *dn; 429 struct dccp_net *dn;
@@ -472,12 +472,17 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
472 ntohl(dhack->dccph_ack_nr_low); 472 ntohl(dhack->dccph_ack_nr_low);
473} 473}
474 474
475static unsigned int *dccp_get_timeouts(struct net *net)
476{
477 return dccp_pernet(net)->dccp_timeout;
478}
479
475static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, 480static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
476 unsigned int dataoff, enum ip_conntrack_info ctinfo, 481 unsigned int dataoff, enum ip_conntrack_info ctinfo,
477 u_int8_t pf, unsigned int hooknum) 482 u_int8_t pf, unsigned int hooknum,
483 unsigned int *timeouts)
478{ 484{
479 struct net *net = nf_ct_net(ct); 485 struct net *net = nf_ct_net(ct);
480 struct dccp_net *dn;
481 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 486 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
482 struct dccp_hdr _dh, *dh; 487 struct dccp_hdr _dh, *dh;
483 u_int8_t type, old_state, new_state; 488 u_int8_t type, old_state, new_state;
@@ -559,8 +564,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
559 if (new_state != old_state) 564 if (new_state != old_state)
560 nf_conntrack_event_cache(IPCT_PROTOINFO, ct); 565 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
561 566
562 dn = dccp_pernet(net); 567 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
563 nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]);
564 568
565 return NF_ACCEPT; 569 return NF_ACCEPT;
566} 570}
@@ -639,11 +643,12 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
639 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED); 643 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
640 if (!nest_parms) 644 if (!nest_parms)
641 goto nla_put_failure; 645 goto nla_put_failure;
642 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); 646 if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
643 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE, 647 nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
644 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]); 648 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
645 NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, 649 nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
646 cpu_to_be64(ct->proto.dccp.handshake_seq)); 650 cpu_to_be64(ct->proto.dccp.handshake_seq)))
651 goto nla_put_failure;
647 nla_nest_end(skb, nest_parms); 652 nla_nest_end(skb, nest_parms);
648 spin_unlock_bh(&ct->lock); 653 spin_unlock_bh(&ct->lock);
649 return 0; 654 return 0;
@@ -702,8 +707,61 @@ static int dccp_nlattr_size(void)
702 return nla_total_size(0) /* CTA_PROTOINFO_DCCP */ 707 return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
703 + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1); 708 + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
704} 709}
710
705#endif 711#endif
706 712
713#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
714
715#include <linux/netfilter/nfnetlink.h>
716#include <linux/netfilter/nfnetlink_cttimeout.h>
717
718static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
719{
720 struct dccp_net *dn = dccp_pernet(&init_net);
721 unsigned int *timeouts = data;
722 int i;
723
724 /* set default DCCP timeouts. */
725 for (i=0; i<CT_DCCP_MAX; i++)
726 timeouts[i] = dn->dccp_timeout[i];
727
728 /* there's a 1:1 mapping between attributes and protocol states. */
729 for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
730 if (tb[i]) {
731 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
732 }
733 }
734 return 0;
735}
736
737static int
738dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
739{
740 const unsigned int *timeouts = data;
741 int i;
742
743 for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
744 if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
745 goto nla_put_failure;
746 }
747 return 0;
748
749nla_put_failure:
750 return -ENOSPC;
751}
752
753static const struct nla_policy
754dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
755 [CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 },
756 [CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 },
757 [CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 },
758 [CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 },
759 [CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 },
760 [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
761 [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
762};
763#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
764
707#ifdef CONFIG_SYSCTL 765#ifdef CONFIG_SYSCTL
708/* template, data assigned later */ 766/* template, data assigned later */
709static struct ctl_table dccp_sysctl_table[] = { 767static struct ctl_table dccp_sysctl_table[] = {
@@ -767,6 +825,7 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
767 .invert_tuple = dccp_invert_tuple, 825 .invert_tuple = dccp_invert_tuple,
768 .new = dccp_new, 826 .new = dccp_new,
769 .packet = dccp_packet, 827 .packet = dccp_packet,
828 .get_timeouts = dccp_get_timeouts,
770 .error = dccp_error, 829 .error = dccp_error,
771 .print_tuple = dccp_print_tuple, 830 .print_tuple = dccp_print_tuple,
772 .print_conntrack = dccp_print_conntrack, 831 .print_conntrack = dccp_print_conntrack,
@@ -779,6 +838,15 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
779 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 838 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
780 .nla_policy = nf_ct_port_nla_policy, 839 .nla_policy = nf_ct_port_nla_policy,
781#endif 840#endif
841#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
842 .ctnl_timeout = {
843 .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
844 .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
845 .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
846 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
847 .nla_policy = dccp_timeout_nla_policy,
848 },
849#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
782}; 850};
783 851
784static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { 852static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -789,6 +857,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
789 .invert_tuple = dccp_invert_tuple, 857 .invert_tuple = dccp_invert_tuple,
790 .new = dccp_new, 858 .new = dccp_new,
791 .packet = dccp_packet, 859 .packet = dccp_packet,
860 .get_timeouts = dccp_get_timeouts,
792 .error = dccp_error, 861 .error = dccp_error,
793 .print_tuple = dccp_print_tuple, 862 .print_tuple = dccp_print_tuple,
794 .print_conntrack = dccp_print_conntrack, 863 .print_conntrack = dccp_print_conntrack,
@@ -801,6 +870,15 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
801 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 870 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
802 .nla_policy = nf_ct_port_nla_policy, 871 .nla_policy = nf_ct_port_nla_policy,
803#endif 872#endif
873#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
874 .ctnl_timeout = {
875 .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
876 .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
877 .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
878 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
879 .nla_policy = dccp_timeout_nla_policy,
880 },
881#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
804}; 882};
805 883
806static __net_init int dccp_net_init(struct net *net) 884static __net_init int dccp_net_init(struct net *net)
@@ -832,8 +910,8 @@ static __net_init int dccp_net_init(struct net *net)
832 dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; 910 dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
833 dn->sysctl_table[7].data = &dn->dccp_loose; 911 dn->sysctl_table[7].data = &dn->dccp_loose;
834 912
835 dn->sysctl_header = register_net_sysctl_table(net, 913 dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
836 nf_net_netfilter_sysctl_path, dn->sysctl_table); 914 dn->sysctl_table);
837 if (!dn->sysctl_header) { 915 if (!dn->sysctl_header) {
838 kfree(dn->sysctl_table); 916 kfree(dn->sysctl_table);
839 return -ENOMEM; 917 return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index e2091d0c7a2f..d8923d54b358 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -40,25 +40,71 @@ static int generic_print_tuple(struct seq_file *s,
40 return 0; 40 return 0;
41} 41}
42 42
43static unsigned int *generic_get_timeouts(struct net *net)
44{
45 return &nf_ct_generic_timeout;
46}
47
43/* Returns verdict for packet, or -1 for invalid. */ 48/* Returns verdict for packet, or -1 for invalid. */
44static int packet(struct nf_conn *ct, 49static int generic_packet(struct nf_conn *ct,
45 const struct sk_buff *skb, 50 const struct sk_buff *skb,
46 unsigned int dataoff, 51 unsigned int dataoff,
47 enum ip_conntrack_info ctinfo, 52 enum ip_conntrack_info ctinfo,
48 u_int8_t pf, 53 u_int8_t pf,
49 unsigned int hooknum) 54 unsigned int hooknum,
55 unsigned int *timeout)
50{ 56{
51 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_generic_timeout); 57 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
52 return NF_ACCEPT; 58 return NF_ACCEPT;
53} 59}
54 60
55/* Called when a new connection for this protocol found. */ 61/* Called when a new connection for this protocol found. */
56static bool new(struct nf_conn *ct, const struct sk_buff *skb, 62static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
57 unsigned int dataoff) 63 unsigned int dataoff, unsigned int *timeouts)
58{ 64{
59 return true; 65 return true;
60} 66}
61 67
68#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
69
70#include <linux/netfilter/nfnetlink.h>
71#include <linux/netfilter/nfnetlink_cttimeout.h>
72
73static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
74{
75 unsigned int *timeout = data;
76
77 if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
78 *timeout =
79 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
80 else {
81 /* Set default generic timeout. */
82 *timeout = nf_ct_generic_timeout;
83 }
84
85 return 0;
86}
87
88static int
89generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
90{
91 const unsigned int *timeout = data;
92
93 if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)))
94 goto nla_put_failure;
95
96 return 0;
97
98nla_put_failure:
99 return -ENOSPC;
100}
101
102static const struct nla_policy
103generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
104 [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
105};
106#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
107
62#ifdef CONFIG_SYSCTL 108#ifdef CONFIG_SYSCTL
63static struct ctl_table_header *generic_sysctl_header; 109static struct ctl_table_header *generic_sysctl_header;
64static struct ctl_table generic_sysctl_table[] = { 110static struct ctl_table generic_sysctl_table[] = {
@@ -93,8 +139,18 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
93 .pkt_to_tuple = generic_pkt_to_tuple, 139 .pkt_to_tuple = generic_pkt_to_tuple,
94 .invert_tuple = generic_invert_tuple, 140 .invert_tuple = generic_invert_tuple,
95 .print_tuple = generic_print_tuple, 141 .print_tuple = generic_print_tuple,
96 .packet = packet, 142 .packet = generic_packet,
97 .new = new, 143 .get_timeouts = generic_get_timeouts,
144 .new = generic_new,
145#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
146 .ctnl_timeout = {
147 .nlattr_to_obj = generic_timeout_nlattr_to_obj,
148 .obj_to_nlattr = generic_timeout_obj_to_nlattr,
149 .nlattr_max = CTA_TIMEOUT_GENERIC_MAX,
150 .obj_size = sizeof(unsigned int),
151 .nla_policy = generic_timeout_nla_policy,
152 },
153#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
98#ifdef CONFIG_SYSCTL 154#ifdef CONFIG_SYSCTL
99 .ctl_table_header = &generic_sysctl_header, 155 .ctl_table_header = &generic_sysctl_header,
100 .ctl_table = generic_sysctl_table, 156 .ctl_table = generic_sysctl_table,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index f0338791b822..4bf6b4e4b776 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -41,8 +41,16 @@
41#include <linux/netfilter/nf_conntrack_proto_gre.h> 41#include <linux/netfilter/nf_conntrack_proto_gre.h>
42#include <linux/netfilter/nf_conntrack_pptp.h> 42#include <linux/netfilter/nf_conntrack_pptp.h>
43 43
44#define GRE_TIMEOUT (30 * HZ) 44enum grep_conntrack {
45#define GRE_STREAM_TIMEOUT (180 * HZ) 45 GRE_CT_UNREPLIED,
46 GRE_CT_REPLIED,
47 GRE_CT_MAX
48};
49
50static unsigned int gre_timeouts[GRE_CT_MAX] = {
51 [GRE_CT_UNREPLIED] = 30*HZ,
52 [GRE_CT_REPLIED] = 180*HZ,
53};
46 54
47static int proto_gre_net_id __read_mostly; 55static int proto_gre_net_id __read_mostly;
48struct netns_proto_gre { 56struct netns_proto_gre {
@@ -227,13 +235,19 @@ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
227 (ct->proto.gre.stream_timeout / HZ)); 235 (ct->proto.gre.stream_timeout / HZ));
228} 236}
229 237
238static unsigned int *gre_get_timeouts(struct net *net)
239{
240 return gre_timeouts;
241}
242
230/* Returns verdict for packet, and may modify conntrack */ 243/* Returns verdict for packet, and may modify conntrack */
231static int gre_packet(struct nf_conn *ct, 244static int gre_packet(struct nf_conn *ct,
232 const struct sk_buff *skb, 245 const struct sk_buff *skb,
233 unsigned int dataoff, 246 unsigned int dataoff,
234 enum ip_conntrack_info ctinfo, 247 enum ip_conntrack_info ctinfo,
235 u_int8_t pf, 248 u_int8_t pf,
236 unsigned int hooknum) 249 unsigned int hooknum,
250 unsigned int *timeouts)
237{ 251{
238 /* If we've seen traffic both ways, this is a GRE connection. 252 /* If we've seen traffic both ways, this is a GRE connection.
239 * Extend timeout. */ 253 * Extend timeout. */
@@ -252,15 +266,15 @@ static int gre_packet(struct nf_conn *ct,
252 266
253/* Called when a new connection for this protocol found. */ 267/* Called when a new connection for this protocol found. */
254static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb, 268static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
255 unsigned int dataoff) 269 unsigned int dataoff, unsigned int *timeouts)
256{ 270{
257 pr_debug(": "); 271 pr_debug(": ");
258 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 272 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
259 273
260 /* initialize to sane value. Ideally a conntrack helper 274 /* initialize to sane value. Ideally a conntrack helper
261 * (e.g. in case of pptp) is increasing them */ 275 * (e.g. in case of pptp) is increasing them */
262 ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT; 276 ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
263 ct->proto.gre.timeout = GRE_TIMEOUT; 277 ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
264 278
265 return true; 279 return true;
266} 280}
@@ -278,6 +292,53 @@ static void gre_destroy(struct nf_conn *ct)
278 nf_ct_gre_keymap_destroy(master); 292 nf_ct_gre_keymap_destroy(master);
279} 293}
280 294
295#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
296
297#include <linux/netfilter/nfnetlink.h>
298#include <linux/netfilter/nfnetlink_cttimeout.h>
299
300static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
301{
302 unsigned int *timeouts = data;
303
304 /* set default timeouts for GRE. */
305 timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED];
306 timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED];
307
308 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
309 timeouts[GRE_CT_UNREPLIED] =
310 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ;
311 }
312 if (tb[CTA_TIMEOUT_GRE_REPLIED]) {
313 timeouts[GRE_CT_REPLIED] =
314 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ;
315 }
316 return 0;
317}
318
319static int
320gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
321{
322 const unsigned int *timeouts = data;
323
324 if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
325 htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) ||
326 nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
327 htonl(timeouts[GRE_CT_REPLIED] / HZ)))
328 goto nla_put_failure;
329 return 0;
330
331nla_put_failure:
332 return -ENOSPC;
333}
334
335static const struct nla_policy
336gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
337 [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
338 [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
339};
340#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
341
281/* protocol helper struct */ 342/* protocol helper struct */
282static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { 343static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
283 .l3proto = AF_INET, 344 .l3proto = AF_INET,
@@ -287,6 +348,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
287 .invert_tuple = gre_invert_tuple, 348 .invert_tuple = gre_invert_tuple,
288 .print_tuple = gre_print_tuple, 349 .print_tuple = gre_print_tuple,
289 .print_conntrack = gre_print_conntrack, 350 .print_conntrack = gre_print_conntrack,
351 .get_timeouts = gre_get_timeouts,
290 .packet = gre_packet, 352 .packet = gre_packet,
291 .new = gre_new, 353 .new = gre_new,
292 .destroy = gre_destroy, 354 .destroy = gre_destroy,
@@ -297,6 +359,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
297 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 359 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
298 .nla_policy = nf_ct_port_nla_policy, 360 .nla_policy = nf_ct_port_nla_policy,
299#endif 361#endif
362#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
363 .ctnl_timeout = {
364 .nlattr_to_obj = gre_timeout_nlattr_to_obj,
365 .obj_to_nlattr = gre_timeout_obj_to_nlattr,
366 .nlattr_max = CTA_TIMEOUT_GRE_MAX,
367 .obj_size = sizeof(unsigned int) * GRE_CT_MAX,
368 .nla_policy = gre_timeout_nla_policy,
369 },
370#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
300}; 371};
301 372
302static int proto_gre_net_init(struct net *net) 373static int proto_gre_net_init(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index afa69136061a..996db2fa21f7 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -279,13 +279,19 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
279 return sctp_conntracks[dir][i][cur_state]; 279 return sctp_conntracks[dir][i][cur_state];
280} 280}
281 281
282static unsigned int *sctp_get_timeouts(struct net *net)
283{
284 return sctp_timeouts;
285}
286
282/* Returns verdict for packet, or -NF_ACCEPT for invalid. */ 287/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
283static int sctp_packet(struct nf_conn *ct, 288static int sctp_packet(struct nf_conn *ct,
284 const struct sk_buff *skb, 289 const struct sk_buff *skb,
285 unsigned int dataoff, 290 unsigned int dataoff,
286 enum ip_conntrack_info ctinfo, 291 enum ip_conntrack_info ctinfo,
287 u_int8_t pf, 292 u_int8_t pf,
288 unsigned int hooknum) 293 unsigned int hooknum,
294 unsigned int *timeouts)
289{ 295{
290 enum sctp_conntrack new_state, old_state; 296 enum sctp_conntrack new_state, old_state;
291 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 297 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -370,7 +376,7 @@ static int sctp_packet(struct nf_conn *ct,
370 } 376 }
371 spin_unlock_bh(&ct->lock); 377 spin_unlock_bh(&ct->lock);
372 378
373 nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]); 379 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
374 380
375 if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && 381 if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
376 dir == IP_CT_DIR_REPLY && 382 dir == IP_CT_DIR_REPLY &&
@@ -390,7 +396,7 @@ out:
390 396
391/* Called when a new connection for this protocol found. */ 397/* Called when a new connection for this protocol found. */
392static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, 398static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
393 unsigned int dataoff) 399 unsigned int dataoff, unsigned int *timeouts)
394{ 400{
395 enum sctp_conntrack new_state; 401 enum sctp_conntrack new_state;
396 const struct sctphdr *sh; 402 const struct sctphdr *sh;
@@ -476,15 +482,12 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
476 if (!nest_parms) 482 if (!nest_parms)
477 goto nla_put_failure; 483 goto nla_put_failure;
478 484
479 NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state); 485 if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) ||
480 486 nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
481 NLA_PUT_BE32(skb, 487 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) ||
482 CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, 488 nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
483 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]); 489 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]))
484 490 goto nla_put_failure;
485 NLA_PUT_BE32(skb,
486 CTA_PROTOINFO_SCTP_VTAG_REPLY,
487 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]);
488 491
489 spin_unlock_bh(&ct->lock); 492 spin_unlock_bh(&ct->lock);
490 493
@@ -543,6 +546,58 @@ static int sctp_nlattr_size(void)
543} 546}
544#endif 547#endif
545 548
549#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
550
551#include <linux/netfilter/nfnetlink.h>
552#include <linux/netfilter/nfnetlink_cttimeout.h>
553
554static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
555{
556 unsigned int *timeouts = data;
557 int i;
558
559 /* set default SCTP timeouts. */
560 for (i=0; i<SCTP_CONNTRACK_MAX; i++)
561 timeouts[i] = sctp_timeouts[i];
562
563 /* there's a 1:1 mapping between attributes and protocol states. */
564 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
565 if (tb[i]) {
566 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
567 }
568 }
569 return 0;
570}
571
572static int
573sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
574{
575 const unsigned int *timeouts = data;
576 int i;
577
578 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
579 if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
580 goto nla_put_failure;
581 }
582 return 0;
583
584nla_put_failure:
585 return -ENOSPC;
586}
587
588static const struct nla_policy
589sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
590 [CTA_TIMEOUT_SCTP_CLOSED] = { .type = NLA_U32 },
591 [CTA_TIMEOUT_SCTP_COOKIE_WAIT] = { .type = NLA_U32 },
592 [CTA_TIMEOUT_SCTP_COOKIE_ECHOED] = { .type = NLA_U32 },
593 [CTA_TIMEOUT_SCTP_ESTABLISHED] = { .type = NLA_U32 },
594 [CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 },
595 [CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 },
596 [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
597};
598#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
599
600
546#ifdef CONFIG_SYSCTL 601#ifdef CONFIG_SYSCTL
547static unsigned int sctp_sysctl_table_users; 602static unsigned int sctp_sysctl_table_users;
548static struct ctl_table_header *sctp_sysctl_header; 603static struct ctl_table_header *sctp_sysctl_header;
@@ -664,6 +719,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
664 .print_tuple = sctp_print_tuple, 719 .print_tuple = sctp_print_tuple,
665 .print_conntrack = sctp_print_conntrack, 720 .print_conntrack = sctp_print_conntrack,
666 .packet = sctp_packet, 721 .packet = sctp_packet,
722 .get_timeouts = sctp_get_timeouts,
667 .new = sctp_new, 723 .new = sctp_new,
668 .me = THIS_MODULE, 724 .me = THIS_MODULE,
669#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 725#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -675,6 +731,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
675 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 731 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
676 .nla_policy = nf_ct_port_nla_policy, 732 .nla_policy = nf_ct_port_nla_policy,
677#endif 733#endif
734#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
735 .ctnl_timeout = {
736 .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
737 .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
738 .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
739 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
740 .nla_policy = sctp_timeout_nla_policy,
741 },
742#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
678#ifdef CONFIG_SYSCTL 743#ifdef CONFIG_SYSCTL
679 .ctl_table_users = &sctp_sysctl_table_users, 744 .ctl_table_users = &sctp_sysctl_table_users,
680 .ctl_table_header = &sctp_sysctl_header, 745 .ctl_table_header = &sctp_sysctl_header,
@@ -694,6 +759,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
694 .print_tuple = sctp_print_tuple, 759 .print_tuple = sctp_print_tuple,
695 .print_conntrack = sctp_print_conntrack, 760 .print_conntrack = sctp_print_conntrack,
696 .packet = sctp_packet, 761 .packet = sctp_packet,
762 .get_timeouts = sctp_get_timeouts,
697 .new = sctp_new, 763 .new = sctp_new,
698 .me = THIS_MODULE, 764 .me = THIS_MODULE,
699#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 765#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -704,6 +770,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
704 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 770 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
705 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 771 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
706 .nla_policy = nf_ct_port_nla_policy, 772 .nla_policy = nf_ct_port_nla_policy,
773#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
774 .ctnl_timeout = {
775 .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
776 .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
777 .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
778 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
779 .nla_policy = sctp_timeout_nla_policy,
780 },
781#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
707#endif 782#endif
708#ifdef CONFIG_SYSCTL 783#ifdef CONFIG_SYSCTL
709 .ctl_table_users = &sctp_sysctl_table_users, 784 .ctl_table_users = &sctp_sysctl_table_users,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 97b9f3ebf28c..21ff1a99f534 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -64,13 +64,7 @@ static const char *const tcp_conntrack_names[] = {
64#define HOURS * 60 MINS 64#define HOURS * 60 MINS
65#define DAYS * 24 HOURS 65#define DAYS * 24 HOURS
66 66
67/* RFC1122 says the R2 limit should be at least 100 seconds. 67static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
68 Linux uses 15 packets as limit, which corresponds
69 to ~13-30min depending on RTO. */
70static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS;
71static unsigned int nf_ct_tcp_timeout_unacknowledged __read_mostly = 5 MINS;
72
73static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
74 [TCP_CONNTRACK_SYN_SENT] = 2 MINS, 68 [TCP_CONNTRACK_SYN_SENT] = 2 MINS,
75 [TCP_CONNTRACK_SYN_RECV] = 60 SECS, 69 [TCP_CONNTRACK_SYN_RECV] = 60 SECS,
76 [TCP_CONNTRACK_ESTABLISHED] = 5 DAYS, 70 [TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
@@ -80,6 +74,11 @@ static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
80 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS, 74 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
81 [TCP_CONNTRACK_CLOSE] = 10 SECS, 75 [TCP_CONNTRACK_CLOSE] = 10 SECS,
82 [TCP_CONNTRACK_SYN_SENT2] = 2 MINS, 76 [TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
77/* RFC1122 says the R2 limit should be at least 100 seconds.
78 Linux uses 15 packets as limit, which corresponds
79 to ~13-30min depending on RTO. */
80 [TCP_CONNTRACK_RETRANS] = 5 MINS,
81 [TCP_CONNTRACK_UNACK] = 5 MINS,
83}; 82};
84 83
85#define sNO TCP_CONNTRACK_NONE 84#define sNO TCP_CONNTRACK_NONE
@@ -585,8 +584,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
585 * Let's try to use the data from the packet. 584 * Let's try to use the data from the packet.
586 */ 585 */
587 sender->td_end = end; 586 sender->td_end = end;
588 win <<= sender->td_scale; 587 swin = win << sender->td_scale;
589 sender->td_maxwin = (win == 0 ? 1 : win); 588 sender->td_maxwin = (swin == 0 ? 1 : swin);
590 sender->td_maxend = end + sender->td_maxwin; 589 sender->td_maxend = end + sender->td_maxwin;
591 /* 590 /*
592 * We haven't seen traffic in the other direction yet 591 * We haven't seen traffic in the other direction yet
@@ -814,13 +813,19 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
814 return NF_ACCEPT; 813 return NF_ACCEPT;
815} 814}
816 815
816static unsigned int *tcp_get_timeouts(struct net *net)
817{
818 return tcp_timeouts;
819}
820
817/* Returns verdict for packet, or -1 for invalid. */ 821/* Returns verdict for packet, or -1 for invalid. */
818static int tcp_packet(struct nf_conn *ct, 822static int tcp_packet(struct nf_conn *ct,
819 const struct sk_buff *skb, 823 const struct sk_buff *skb,
820 unsigned int dataoff, 824 unsigned int dataoff,
821 enum ip_conntrack_info ctinfo, 825 enum ip_conntrack_info ctinfo,
822 u_int8_t pf, 826 u_int8_t pf,
823 unsigned int hooknum) 827 unsigned int hooknum,
828 unsigned int *timeouts)
824{ 829{
825 struct net *net = nf_ct_net(ct); 830 struct net *net = nf_ct_net(ct);
826 struct nf_conntrack_tuple *tuple; 831 struct nf_conntrack_tuple *tuple;
@@ -947,7 +952,8 @@ static int tcp_packet(struct nf_conn *ct,
947 spin_unlock_bh(&ct->lock); 952 spin_unlock_bh(&ct->lock);
948 if (LOG_INVALID(net, IPPROTO_TCP)) 953 if (LOG_INVALID(net, IPPROTO_TCP))
949 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 954 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
950 "nf_ct_tcp: invalid packet ignored "); 955 "nf_ct_tcp: invalid packet ignored in "
956 "state %s ", tcp_conntrack_names[old_state]);
951 return NF_ACCEPT; 957 return NF_ACCEPT;
952 case TCP_CONNTRACK_MAX: 958 case TCP_CONNTRACK_MAX:
953 /* Invalid packet */ 959 /* Invalid packet */
@@ -1015,14 +1021,14 @@ static int tcp_packet(struct nf_conn *ct,
1015 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; 1021 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1016 1022
1017 if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans && 1023 if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans &&
1018 tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans) 1024 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1019 timeout = nf_ct_tcp_timeout_max_retrans; 1025 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1020 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) & 1026 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1021 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && 1027 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1022 tcp_timeouts[new_state] > nf_ct_tcp_timeout_unacknowledged) 1028 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1023 timeout = nf_ct_tcp_timeout_unacknowledged; 1029 timeout = timeouts[TCP_CONNTRACK_UNACK];
1024 else 1030 else
1025 timeout = tcp_timeouts[new_state]; 1031 timeout = timeouts[new_state];
1026 spin_unlock_bh(&ct->lock); 1032 spin_unlock_bh(&ct->lock);
1027 1033
1028 if (new_state != old_state) 1034 if (new_state != old_state)
@@ -1054,7 +1060,7 @@ static int tcp_packet(struct nf_conn *ct,
1054 1060
1055/* Called when a new connection for this protocol found. */ 1061/* Called when a new connection for this protocol found. */
1056static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, 1062static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1057 unsigned int dataoff) 1063 unsigned int dataoff, unsigned int *timeouts)
1058{ 1064{
1059 enum tcp_conntrack new_state; 1065 enum tcp_conntrack new_state;
1060 const struct tcphdr *th; 1066 const struct tcphdr *th;
@@ -1142,21 +1148,22 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1142 if (!nest_parms) 1148 if (!nest_parms)
1143 goto nla_put_failure; 1149 goto nla_put_failure;
1144 1150
1145 NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state); 1151 if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
1146 1152 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1147 NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, 1153 ct->proto.tcp.seen[0].td_scale) ||
1148 ct->proto.tcp.seen[0].td_scale); 1154 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1149 1155 ct->proto.tcp.seen[1].td_scale))
1150 NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, 1156 goto nla_put_failure;
1151 ct->proto.tcp.seen[1].td_scale);
1152 1157
1153 tmp.flags = ct->proto.tcp.seen[0].flags; 1158 tmp.flags = ct->proto.tcp.seen[0].flags;
1154 NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, 1159 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1155 sizeof(struct nf_ct_tcp_flags), &tmp); 1160 sizeof(struct nf_ct_tcp_flags), &tmp))
1161 goto nla_put_failure;
1156 1162
1157 tmp.flags = ct->proto.tcp.seen[1].flags; 1163 tmp.flags = ct->proto.tcp.seen[1].flags;
1158 NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, 1164 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1159 sizeof(struct nf_ct_tcp_flags), &tmp); 1165 sizeof(struct nf_ct_tcp_flags), &tmp))
1166 goto nla_put_failure;
1160 spin_unlock_bh(&ct->lock); 1167 spin_unlock_bh(&ct->lock);
1161 1168
1162 nla_nest_end(skb, nest_parms); 1169 nla_nest_end(skb, nest_parms);
@@ -1239,6 +1246,114 @@ static int tcp_nlattr_tuple_size(void)
1239} 1246}
1240#endif 1247#endif
1241 1248
1249#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1250
1251#include <linux/netfilter/nfnetlink.h>
1252#include <linux/netfilter/nfnetlink_cttimeout.h>
1253
1254static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
1255{
1256 unsigned int *timeouts = data;
1257 int i;
1258
1259 /* set default TCP timeouts. */
1260 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1261 timeouts[i] = tcp_timeouts[i];
1262
1263 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1264 timeouts[TCP_CONNTRACK_SYN_SENT] =
1265 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1266 }
1267 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1268 timeouts[TCP_CONNTRACK_SYN_RECV] =
1269 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1270 }
1271 if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1272 timeouts[TCP_CONNTRACK_ESTABLISHED] =
1273 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1274 }
1275 if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1276 timeouts[TCP_CONNTRACK_FIN_WAIT] =
1277 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1278 }
1279 if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1280 timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1281 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1282 }
1283 if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1284 timeouts[TCP_CONNTRACK_LAST_ACK] =
1285 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1286 }
1287 if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1288 timeouts[TCP_CONNTRACK_TIME_WAIT] =
1289 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1290 }
1291 if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1292 timeouts[TCP_CONNTRACK_CLOSE] =
1293 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1294 }
1295 if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1296 timeouts[TCP_CONNTRACK_SYN_SENT2] =
1297 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1298 }
1299 if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1300 timeouts[TCP_CONNTRACK_RETRANS] =
1301 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1302 }
1303 if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1304 timeouts[TCP_CONNTRACK_UNACK] =
1305 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1306 }
1307 return 0;
1308}
1309
1310static int
1311tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1312{
1313 const unsigned int *timeouts = data;
1314
1315 if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1316 htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1317 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1318 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1319 nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1320 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1321 nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1322 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1323 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1324 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1325 nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1326 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1327 nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1328 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1329 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1330 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1331 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1332 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1333 nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1334 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1335 nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1336 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1337 goto nla_put_failure;
1338 return 0;
1339
1340nla_put_failure:
1341 return -ENOSPC;
1342}
1343
1344static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1345 [CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
1346 [CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
1347 [CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
1348 [CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
1349 [CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
1350 [CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
1351 [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
1352 [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
1353 [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
1354};
1355#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1356
1242#ifdef CONFIG_SYSCTL 1357#ifdef CONFIG_SYSCTL
1243static unsigned int tcp_sysctl_table_users; 1358static unsigned int tcp_sysctl_table_users;
1244static struct ctl_table_header *tcp_sysctl_header; 1359static struct ctl_table_header *tcp_sysctl_header;
@@ -1301,14 +1416,14 @@ static struct ctl_table tcp_sysctl_table[] = {
1301 }, 1416 },
1302 { 1417 {
1303 .procname = "nf_conntrack_tcp_timeout_max_retrans", 1418 .procname = "nf_conntrack_tcp_timeout_max_retrans",
1304 .data = &nf_ct_tcp_timeout_max_retrans, 1419 .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
1305 .maxlen = sizeof(unsigned int), 1420 .maxlen = sizeof(unsigned int),
1306 .mode = 0644, 1421 .mode = 0644,
1307 .proc_handler = proc_dointvec_jiffies, 1422 .proc_handler = proc_dointvec_jiffies,
1308 }, 1423 },
1309 { 1424 {
1310 .procname = "nf_conntrack_tcp_timeout_unacknowledged", 1425 .procname = "nf_conntrack_tcp_timeout_unacknowledged",
1311 .data = &nf_ct_tcp_timeout_unacknowledged, 1426 .data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
1312 .maxlen = sizeof(unsigned int), 1427 .maxlen = sizeof(unsigned int),
1313 .mode = 0644, 1428 .mode = 0644,
1314 .proc_handler = proc_dointvec_jiffies, 1429 .proc_handler = proc_dointvec_jiffies,
@@ -1404,7 +1519,7 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
1404 }, 1519 },
1405 { 1520 {
1406 .procname = "ip_conntrack_tcp_timeout_max_retrans", 1521 .procname = "ip_conntrack_tcp_timeout_max_retrans",
1407 .data = &nf_ct_tcp_timeout_max_retrans, 1522 .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
1408 .maxlen = sizeof(unsigned int), 1523 .maxlen = sizeof(unsigned int),
1409 .mode = 0644, 1524 .mode = 0644,
1410 .proc_handler = proc_dointvec_jiffies, 1525 .proc_handler = proc_dointvec_jiffies,
@@ -1445,6 +1560,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1445 .print_tuple = tcp_print_tuple, 1560 .print_tuple = tcp_print_tuple,
1446 .print_conntrack = tcp_print_conntrack, 1561 .print_conntrack = tcp_print_conntrack,
1447 .packet = tcp_packet, 1562 .packet = tcp_packet,
1563 .get_timeouts = tcp_get_timeouts,
1448 .new = tcp_new, 1564 .new = tcp_new,
1449 .error = tcp_error, 1565 .error = tcp_error,
1450#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 1566#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1456,6 +1572,16 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1456 .nlattr_tuple_size = tcp_nlattr_tuple_size, 1572 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1457 .nla_policy = nf_ct_port_nla_policy, 1573 .nla_policy = nf_ct_port_nla_policy,
1458#endif 1574#endif
1575#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1576 .ctnl_timeout = {
1577 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1578 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1579 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1580 .obj_size = sizeof(unsigned int) *
1581 TCP_CONNTRACK_TIMEOUT_MAX,
1582 .nla_policy = tcp_timeout_nla_policy,
1583 },
1584#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1459#ifdef CONFIG_SYSCTL 1585#ifdef CONFIG_SYSCTL
1460 .ctl_table_users = &tcp_sysctl_table_users, 1586 .ctl_table_users = &tcp_sysctl_table_users,
1461 .ctl_table_header = &tcp_sysctl_header, 1587 .ctl_table_header = &tcp_sysctl_header,
@@ -1477,6 +1603,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1477 .print_tuple = tcp_print_tuple, 1603 .print_tuple = tcp_print_tuple,
1478 .print_conntrack = tcp_print_conntrack, 1604 .print_conntrack = tcp_print_conntrack,
1479 .packet = tcp_packet, 1605 .packet = tcp_packet,
1606 .get_timeouts = tcp_get_timeouts,
1480 .new = tcp_new, 1607 .new = tcp_new,
1481 .error = tcp_error, 1608 .error = tcp_error,
1482#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 1609#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1488,6 +1615,16 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1488 .nlattr_tuple_size = tcp_nlattr_tuple_size, 1615 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1489 .nla_policy = nf_ct_port_nla_policy, 1616 .nla_policy = nf_ct_port_nla_policy,
1490#endif 1617#endif
1618#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1619 .ctnl_timeout = {
1620 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1621 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1622 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1623 .obj_size = sizeof(unsigned int) *
1624 TCP_CONNTRACK_TIMEOUT_MAX,
1625 .nla_policy = tcp_timeout_nla_policy,
1626 },
1627#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1491#ifdef CONFIG_SYSCTL 1628#ifdef CONFIG_SYSCTL
1492 .ctl_table_users = &tcp_sysctl_table_users, 1629 .ctl_table_users = &tcp_sysctl_table_users,
1493 .ctl_table_header = &tcp_sysctl_header, 1630 .ctl_table_header = &tcp_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 5f35757fbff0..7259a6bdeb49 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,8 +25,16 @@
25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
27 27
28static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; 28enum udp_conntrack {
29static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ; 29 UDP_CT_UNREPLIED,
30 UDP_CT_REPLIED,
31 UDP_CT_MAX
32};
33
34static unsigned int udp_timeouts[UDP_CT_MAX] = {
35 [UDP_CT_UNREPLIED] = 30*HZ,
36 [UDP_CT_REPLIED] = 180*HZ,
37};
30 38
31static bool udp_pkt_to_tuple(const struct sk_buff *skb, 39static bool udp_pkt_to_tuple(const struct sk_buff *skb,
32 unsigned int dataoff, 40 unsigned int dataoff,
@@ -63,30 +71,38 @@ static int udp_print_tuple(struct seq_file *s,
63 ntohs(tuple->dst.u.udp.port)); 71 ntohs(tuple->dst.u.udp.port));
64} 72}
65 73
74static unsigned int *udp_get_timeouts(struct net *net)
75{
76 return udp_timeouts;
77}
78
66/* Returns verdict for packet, and may modify conntracktype */ 79/* Returns verdict for packet, and may modify conntracktype */
67static int udp_packet(struct nf_conn *ct, 80static int udp_packet(struct nf_conn *ct,
68 const struct sk_buff *skb, 81 const struct sk_buff *skb,
69 unsigned int dataoff, 82 unsigned int dataoff,
70 enum ip_conntrack_info ctinfo, 83 enum ip_conntrack_info ctinfo,
71 u_int8_t pf, 84 u_int8_t pf,
72 unsigned int hooknum) 85 unsigned int hooknum,
86 unsigned int *timeouts)
73{ 87{
74 /* If we've seen traffic both ways, this is some kind of UDP 88 /* If we've seen traffic both ways, this is some kind of UDP
75 stream. Extend timeout. */ 89 stream. Extend timeout. */
76 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 90 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
77 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream); 91 nf_ct_refresh_acct(ct, ctinfo, skb,
92 timeouts[UDP_CT_REPLIED]);
78 /* Also, more likely to be important, and not a probe */ 93 /* Also, more likely to be important, and not a probe */
79 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 94 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
80 nf_conntrack_event_cache(IPCT_ASSURED, ct); 95 nf_conntrack_event_cache(IPCT_ASSURED, ct);
81 } else 96 } else {
82 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout); 97 nf_ct_refresh_acct(ct, ctinfo, skb,
83 98 timeouts[UDP_CT_UNREPLIED]);
99 }
84 return NF_ACCEPT; 100 return NF_ACCEPT;
85} 101}
86 102
87/* Called when a new connection for this protocol found. */ 103/* Called when a new connection for this protocol found. */
88static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, 104static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
89 unsigned int dataoff) 105 unsigned int dataoff, unsigned int *timeouts)
90{ 106{
91 return true; 107 return true;
92} 108}
@@ -136,20 +152,67 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
136 return NF_ACCEPT; 152 return NF_ACCEPT;
137} 153}
138 154
155#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
156
157#include <linux/netfilter/nfnetlink.h>
158#include <linux/netfilter/nfnetlink_cttimeout.h>
159
160static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
161{
162 unsigned int *timeouts = data;
163
164 /* set default timeouts for UDP. */
165 timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED];
166 timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED];
167
168 if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
169 timeouts[UDP_CT_UNREPLIED] =
170 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ;
171 }
172 if (tb[CTA_TIMEOUT_UDP_REPLIED]) {
173 timeouts[UDP_CT_REPLIED] =
174 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ;
175 }
176 return 0;
177}
178
179static int
180udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
181{
182 const unsigned int *timeouts = data;
183
184 if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
185 htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) ||
186 nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED,
187 htonl(timeouts[UDP_CT_REPLIED] / HZ)))
188 goto nla_put_failure;
189 return 0;
190
191nla_put_failure:
192 return -ENOSPC;
193}
194
195static const struct nla_policy
196udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
197 [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
198 [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
199};
200#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
201
139#ifdef CONFIG_SYSCTL 202#ifdef CONFIG_SYSCTL
140static unsigned int udp_sysctl_table_users; 203static unsigned int udp_sysctl_table_users;
141static struct ctl_table_header *udp_sysctl_header; 204static struct ctl_table_header *udp_sysctl_header;
142static struct ctl_table udp_sysctl_table[] = { 205static struct ctl_table udp_sysctl_table[] = {
143 { 206 {
144 .procname = "nf_conntrack_udp_timeout", 207 .procname = "nf_conntrack_udp_timeout",
145 .data = &nf_ct_udp_timeout, 208 .data = &udp_timeouts[UDP_CT_UNREPLIED],
146 .maxlen = sizeof(unsigned int), 209 .maxlen = sizeof(unsigned int),
147 .mode = 0644, 210 .mode = 0644,
148 .proc_handler = proc_dointvec_jiffies, 211 .proc_handler = proc_dointvec_jiffies,
149 }, 212 },
150 { 213 {
151 .procname = "nf_conntrack_udp_timeout_stream", 214 .procname = "nf_conntrack_udp_timeout_stream",
152 .data = &nf_ct_udp_timeout_stream, 215 .data = &udp_timeouts[UDP_CT_REPLIED],
153 .maxlen = sizeof(unsigned int), 216 .maxlen = sizeof(unsigned int),
154 .mode = 0644, 217 .mode = 0644,
155 .proc_handler = proc_dointvec_jiffies, 218 .proc_handler = proc_dointvec_jiffies,
@@ -160,14 +223,14 @@ static struct ctl_table udp_sysctl_table[] = {
160static struct ctl_table udp_compat_sysctl_table[] = { 223static struct ctl_table udp_compat_sysctl_table[] = {
161 { 224 {
162 .procname = "ip_conntrack_udp_timeout", 225 .procname = "ip_conntrack_udp_timeout",
163 .data = &nf_ct_udp_timeout, 226 .data = &udp_timeouts[UDP_CT_UNREPLIED],
164 .maxlen = sizeof(unsigned int), 227 .maxlen = sizeof(unsigned int),
165 .mode = 0644, 228 .mode = 0644,
166 .proc_handler = proc_dointvec_jiffies, 229 .proc_handler = proc_dointvec_jiffies,
167 }, 230 },
168 { 231 {
169 .procname = "ip_conntrack_udp_timeout_stream", 232 .procname = "ip_conntrack_udp_timeout_stream",
170 .data = &nf_ct_udp_timeout_stream, 233 .data = &udp_timeouts[UDP_CT_REPLIED],
171 .maxlen = sizeof(unsigned int), 234 .maxlen = sizeof(unsigned int),
172 .mode = 0644, 235 .mode = 0644,
173 .proc_handler = proc_dointvec_jiffies, 236 .proc_handler = proc_dointvec_jiffies,
@@ -186,6 +249,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
186 .invert_tuple = udp_invert_tuple, 249 .invert_tuple = udp_invert_tuple,
187 .print_tuple = udp_print_tuple, 250 .print_tuple = udp_print_tuple,
188 .packet = udp_packet, 251 .packet = udp_packet,
252 .get_timeouts = udp_get_timeouts,
189 .new = udp_new, 253 .new = udp_new,
190 .error = udp_error, 254 .error = udp_error,
191#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 255#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -194,6 +258,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
194 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 258 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
195 .nla_policy = nf_ct_port_nla_policy, 259 .nla_policy = nf_ct_port_nla_policy,
196#endif 260#endif
261#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
262 .ctnl_timeout = {
263 .nlattr_to_obj = udp_timeout_nlattr_to_obj,
264 .obj_to_nlattr = udp_timeout_obj_to_nlattr,
265 .nlattr_max = CTA_TIMEOUT_UDP_MAX,
266 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
267 .nla_policy = udp_timeout_nla_policy,
268 },
269#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
197#ifdef CONFIG_SYSCTL 270#ifdef CONFIG_SYSCTL
198 .ctl_table_users = &udp_sysctl_table_users, 271 .ctl_table_users = &udp_sysctl_table_users,
199 .ctl_table_header = &udp_sysctl_header, 272 .ctl_table_header = &udp_sysctl_header,
@@ -214,6 +287,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
214 .invert_tuple = udp_invert_tuple, 287 .invert_tuple = udp_invert_tuple,
215 .print_tuple = udp_print_tuple, 288 .print_tuple = udp_print_tuple,
216 .packet = udp_packet, 289 .packet = udp_packet,
290 .get_timeouts = udp_get_timeouts,
217 .new = udp_new, 291 .new = udp_new,
218 .error = udp_error, 292 .error = udp_error,
219#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 293#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -222,6 +296,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
222 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 296 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
223 .nla_policy = nf_ct_port_nla_policy, 297 .nla_policy = nf_ct_port_nla_policy,
224#endif 298#endif
299#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
300 .ctnl_timeout = {
301 .nlattr_to_obj = udp_timeout_nlattr_to_obj,
302 .obj_to_nlattr = udp_timeout_obj_to_nlattr,
303 .nlattr_max = CTA_TIMEOUT_UDP_MAX,
304 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
305 .nla_policy = udp_timeout_nla_policy,
306 },
307#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
225#ifdef CONFIG_SYSCTL 308#ifdef CONFIG_SYSCTL
226 .ctl_table_users = &udp_sysctl_table_users, 309 .ctl_table_users = &udp_sysctl_table_users,
227 .ctl_table_header = &udp_sysctl_header, 310 .ctl_table_header = &udp_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index f52ca1181013..4d60a5376aa6 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -24,8 +24,16 @@
24#include <net/netfilter/nf_conntrack_ecache.h> 24#include <net/netfilter/nf_conntrack_ecache.h>
25#include <net/netfilter/nf_log.h> 25#include <net/netfilter/nf_log.h>
26 26
27static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ; 27enum udplite_conntrack {
28static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ; 28 UDPLITE_CT_UNREPLIED,
29 UDPLITE_CT_REPLIED,
30 UDPLITE_CT_MAX
31};
32
33static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
34 [UDPLITE_CT_UNREPLIED] = 30*HZ,
35 [UDPLITE_CT_REPLIED] = 180*HZ,
36};
29 37
30static bool udplite_pkt_to_tuple(const struct sk_buff *skb, 38static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
31 unsigned int dataoff, 39 unsigned int dataoff,
@@ -60,31 +68,38 @@ static int udplite_print_tuple(struct seq_file *s,
60 ntohs(tuple->dst.u.udp.port)); 68 ntohs(tuple->dst.u.udp.port));
61} 69}
62 70
71static unsigned int *udplite_get_timeouts(struct net *net)
72{
73 return udplite_timeouts;
74}
75
63/* Returns verdict for packet, and may modify conntracktype */ 76/* Returns verdict for packet, and may modify conntracktype */
64static int udplite_packet(struct nf_conn *ct, 77static int udplite_packet(struct nf_conn *ct,
65 const struct sk_buff *skb, 78 const struct sk_buff *skb,
66 unsigned int dataoff, 79 unsigned int dataoff,
67 enum ip_conntrack_info ctinfo, 80 enum ip_conntrack_info ctinfo,
68 u_int8_t pf, 81 u_int8_t pf,
69 unsigned int hooknum) 82 unsigned int hooknum,
83 unsigned int *timeouts)
70{ 84{
71 /* If we've seen traffic both ways, this is some kind of UDP 85 /* If we've seen traffic both ways, this is some kind of UDP
72 stream. Extend timeout. */ 86 stream. Extend timeout. */
73 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 87 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
74 nf_ct_refresh_acct(ct, ctinfo, skb, 88 nf_ct_refresh_acct(ct, ctinfo, skb,
75 nf_ct_udplite_timeout_stream); 89 timeouts[UDPLITE_CT_REPLIED]);
76 /* Also, more likely to be important, and not a probe */ 90 /* Also, more likely to be important, and not a probe */
77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 91 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
78 nf_conntrack_event_cache(IPCT_ASSURED, ct); 92 nf_conntrack_event_cache(IPCT_ASSURED, ct);
79 } else 93 } else {
80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout); 94 nf_ct_refresh_acct(ct, ctinfo, skb,
81 95 timeouts[UDPLITE_CT_UNREPLIED]);
96 }
82 return NF_ACCEPT; 97 return NF_ACCEPT;
83} 98}
84 99
85/* Called when a new connection for this protocol found. */ 100/* Called when a new connection for this protocol found. */
86static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb, 101static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
87 unsigned int dataoff) 102 unsigned int dataoff, unsigned int *timeouts)
88{ 103{
89 return true; 104 return true;
90} 105}
@@ -141,20 +156,67 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
141 return NF_ACCEPT; 156 return NF_ACCEPT;
142} 157}
143 158
159#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
160
161#include <linux/netfilter/nfnetlink.h>
162#include <linux/netfilter/nfnetlink_cttimeout.h>
163
164static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
165{
166 unsigned int *timeouts = data;
167
168 /* set default timeouts for UDPlite. */
169 timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED];
170 timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED];
171
172 if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
173 timeouts[UDPLITE_CT_UNREPLIED] =
174 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ;
175 }
176 if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) {
177 timeouts[UDPLITE_CT_REPLIED] =
178 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ;
179 }
180 return 0;
181}
182
183static int
184udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
185{
186 const unsigned int *timeouts = data;
187
188 if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
189 htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
190 nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
191 htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
192 goto nla_put_failure;
193 return 0;
194
195nla_put_failure:
196 return -ENOSPC;
197}
198
199static const struct nla_policy
200udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
201 [CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 },
202 [CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 },
203};
204#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
205
144#ifdef CONFIG_SYSCTL 206#ifdef CONFIG_SYSCTL
145static unsigned int udplite_sysctl_table_users; 207static unsigned int udplite_sysctl_table_users;
146static struct ctl_table_header *udplite_sysctl_header; 208static struct ctl_table_header *udplite_sysctl_header;
147static struct ctl_table udplite_sysctl_table[] = { 209static struct ctl_table udplite_sysctl_table[] = {
148 { 210 {
149 .procname = "nf_conntrack_udplite_timeout", 211 .procname = "nf_conntrack_udplite_timeout",
150 .data = &nf_ct_udplite_timeout, 212 .data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
151 .maxlen = sizeof(unsigned int), 213 .maxlen = sizeof(unsigned int),
152 .mode = 0644, 214 .mode = 0644,
153 .proc_handler = proc_dointvec_jiffies, 215 .proc_handler = proc_dointvec_jiffies,
154 }, 216 },
155 { 217 {
156 .procname = "nf_conntrack_udplite_timeout_stream", 218 .procname = "nf_conntrack_udplite_timeout_stream",
157 .data = &nf_ct_udplite_timeout_stream, 219 .data = &udplite_timeouts[UDPLITE_CT_REPLIED],
158 .maxlen = sizeof(unsigned int), 220 .maxlen = sizeof(unsigned int),
159 .mode = 0644, 221 .mode = 0644,
160 .proc_handler = proc_dointvec_jiffies, 222 .proc_handler = proc_dointvec_jiffies,
@@ -172,6 +234,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
172 .invert_tuple = udplite_invert_tuple, 234 .invert_tuple = udplite_invert_tuple,
173 .print_tuple = udplite_print_tuple, 235 .print_tuple = udplite_print_tuple,
174 .packet = udplite_packet, 236 .packet = udplite_packet,
237 .get_timeouts = udplite_get_timeouts,
175 .new = udplite_new, 238 .new = udplite_new,
176 .error = udplite_error, 239 .error = udplite_error,
177#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 240#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -180,6 +243,16 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
180 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 243 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
181 .nla_policy = nf_ct_port_nla_policy, 244 .nla_policy = nf_ct_port_nla_policy,
182#endif 245#endif
246#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
247 .ctnl_timeout = {
248 .nlattr_to_obj = udplite_timeout_nlattr_to_obj,
249 .obj_to_nlattr = udplite_timeout_obj_to_nlattr,
250 .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
251 .obj_size = sizeof(unsigned int) *
252 CTA_TIMEOUT_UDPLITE_MAX,
253 .nla_policy = udplite_timeout_nla_policy,
254 },
255#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
183#ifdef CONFIG_SYSCTL 256#ifdef CONFIG_SYSCTL
184 .ctl_table_users = &udplite_sysctl_table_users, 257 .ctl_table_users = &udplite_sysctl_table_users,
185 .ctl_table_header = &udplite_sysctl_header, 258 .ctl_table_header = &udplite_sysctl_header,
@@ -196,6 +269,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
196 .invert_tuple = udplite_invert_tuple, 269 .invert_tuple = udplite_invert_tuple,
197 .print_tuple = udplite_print_tuple, 270 .print_tuple = udplite_print_tuple,
198 .packet = udplite_packet, 271 .packet = udplite_packet,
272 .get_timeouts = udplite_get_timeouts,
199 .new = udplite_new, 273 .new = udplite_new,
200 .error = udplite_error, 274 .error = udplite_error,
201#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 275#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -204,6 +278,16 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
204 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 278 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
205 .nla_policy = nf_ct_port_nla_policy, 279 .nla_policy = nf_ct_port_nla_policy,
206#endif 280#endif
281#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
282 .ctnl_timeout = {
283 .nlattr_to_obj = udplite_timeout_nlattr_to_obj,
284 .obj_to_nlattr = udplite_timeout_obj_to_nlattr,
285 .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
286 .obj_size = sizeof(unsigned int) *
287 CTA_TIMEOUT_UDPLITE_MAX,
288 .nla_policy = udplite_timeout_nla_policy,
289 },
290#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
207#ifdef CONFIG_SYSCTL 291#ifdef CONFIG_SYSCTL
208 .ctl_table_users = &udplite_sysctl_table_users, 292 .ctl_table_users = &udplite_sysctl_table_users,
209 .ctl_table_header = &udplite_sysctl_header, 293 .ctl_table_header = &udplite_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 885f5ab9bc28..9b3943252a5e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -468,18 +468,13 @@ static ctl_table nf_ct_netfilter_table[] = {
468 { } 468 { }
469}; 469};
470 470
471static struct ctl_path nf_ct_path[] = {
472 { .procname = "net", },
473 { }
474};
475
476static int nf_conntrack_standalone_init_sysctl(struct net *net) 471static int nf_conntrack_standalone_init_sysctl(struct net *net)
477{ 472{
478 struct ctl_table *table; 473 struct ctl_table *table;
479 474
480 if (net_eq(net, &init_net)) { 475 if (net_eq(net, &init_net)) {
481 nf_ct_netfilter_header = 476 nf_ct_netfilter_header =
482 register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table); 477 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
483 if (!nf_ct_netfilter_header) 478 if (!nf_ct_netfilter_header)
484 goto out; 479 goto out;
485 } 480 }
@@ -494,8 +489,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
494 table[3].data = &net->ct.sysctl_checksum; 489 table[3].data = &net->ct.sysctl_checksum;
495 table[4].data = &net->ct.sysctl_log_invalid; 490 table[4].data = &net->ct.sysctl_log_invalid;
496 491
497 net->ct.sysctl_header = register_net_sysctl_table(net, 492 net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
498 nf_net_netfilter_sysctl_path, table);
499 if (!net->ct.sysctl_header) 493 if (!net->ct.sysctl_header)
500 goto out_unregister_netfilter; 494 goto out_unregister_netfilter;
501 495
@@ -505,7 +499,7 @@ out_unregister_netfilter:
505 kfree(table); 499 kfree(table);
506out_kmemdup: 500out_kmemdup:
507 if (net_eq(net, &init_net)) 501 if (net_eq(net, &init_net))
508 unregister_sysctl_table(nf_ct_netfilter_header); 502 unregister_net_sysctl_table(nf_ct_netfilter_header);
509out: 503out:
510 printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n"); 504 printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
511 return -ENOMEM; 505 return -ENOMEM;
@@ -516,7 +510,7 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net)
516 struct ctl_table *table; 510 struct ctl_table *table;
517 511
518 if (net_eq(net, &init_net)) 512 if (net_eq(net, &init_net))
519 unregister_sysctl_table(nf_ct_netfilter_header); 513 unregister_net_sysctl_table(nf_ct_netfilter_header);
520 table = net->ct.sysctl_header->ctl_table_arg; 514 table = net->ct.sysctl_header->ctl_table_arg;
521 unregister_net_sysctl_table(net->ct.sysctl_header); 515 unregister_net_sysctl_table(net->ct.sysctl_header);
522 kfree(table); 516 kfree(table);
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
new file mode 100644
index 000000000000..a878ce5b252c
--- /dev/null
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -0,0 +1,60 @@
1/*
2 * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
3 * (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation (or any later at your option).
8 */
9
10#include <linux/types.h>
11#include <linux/netfilter.h>
12#include <linux/skbuff.h>
13#include <linux/vmalloc.h>
14#include <linux/stddef.h>
15#include <linux/err.h>
16#include <linux/percpu.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/slab.h>
20#include <linux/export.h>
21
22#include <net/netfilter/nf_conntrack.h>
23#include <net/netfilter/nf_conntrack_core.h>
24#include <net/netfilter/nf_conntrack_extend.h>
25#include <net/netfilter/nf_conntrack_timeout.h>
26
27struct ctnl_timeout *
28(*nf_ct_timeout_find_get_hook)(const char *name) __read_mostly;
29EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook);
30
31void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly;
32EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook);
33
34static struct nf_ct_ext_type timeout_extend __read_mostly = {
35 .len = sizeof(struct nf_conn_timeout),
36 .align = __alignof__(struct nf_conn_timeout),
37 .id = NF_CT_EXT_TIMEOUT,
38};
39
40int nf_conntrack_timeout_init(struct net *net)
41{
42 int ret = 0;
43
44 if (net_eq(net, &init_net)) {
45 ret = nf_ct_extend_register(&timeout_extend);
46 if (ret < 0) {
47 printk(KERN_ERR "nf_ct_timeout: Unable to register "
48 "timeout extension.\n");
49 return ret;
50 }
51 }
52
53 return 0;
54}
55
56void nf_conntrack_timeout_fini(struct net *net)
57{
58 if (net_eq(net, &init_net))
59 nf_ct_extend_unregister(&timeout_extend);
60}
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index e8d27afbbdb9..dbb364f62d6f 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -51,8 +51,8 @@ static int nf_conntrack_tstamp_init_sysctl(struct net *net)
51 51
52 table[0].data = &net->ct.sysctl_tstamp; 52 table[0].data = &net->ct.sysctl_tstamp;
53 53
54 net->ct.tstamp_sysctl_header = register_net_sysctl_table(net, 54 net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter",
55 nf_net_netfilter_sysctl_path, table); 55 table);
56 if (!net->ct.tstamp_sysctl_header) { 56 if (!net->ct.tstamp_sysctl_header) {
57 printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n"); 57 printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
58 goto out_register; 58 goto out_register;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 957374a234d4..703fb26aa48d 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -214,13 +214,6 @@ static const struct file_operations nflog_file_ops = {
214#endif /* PROC_FS */ 214#endif /* PROC_FS */
215 215
216#ifdef CONFIG_SYSCTL 216#ifdef CONFIG_SYSCTL
217static struct ctl_path nf_log_sysctl_path[] = {
218 { .procname = "net", },
219 { .procname = "netfilter", },
220 { .procname = "nf_log", },
221 { }
222};
223
224static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; 217static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
225static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; 218static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
226static struct ctl_table_header *nf_log_dir_header; 219static struct ctl_table_header *nf_log_dir_header;
@@ -283,7 +276,7 @@ static __init int netfilter_log_sysctl_init(void)
283 nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; 276 nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i;
284 } 277 }
285 278
286 nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path, 279 nf_log_dir_header = register_net_sysctl(&init_net, "net/netfilter/nf_log",
287 nf_log_sysctl_table); 280 nf_log_sysctl_table);
288 if (!nf_log_dir_header) 281 if (!nf_log_dir_header)
289 return -ENOMEM; 282 return -ENOMEM;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 4d70785b953d..3e797d1fcb94 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -23,7 +23,6 @@
23#include <linux/net.h> 23#include <linux/net.h>
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/system.h>
27#include <net/sock.h> 26#include <net/sock.h>
28#include <net/netlink.h> 27#include <net/netlink.h>
29#include <linux/init.h> 28#include <linux/init.h>
@@ -104,7 +103,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group)
104EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 103EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
105 104
106int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, 105int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
107 unsigned group, int echo, gfp_t flags) 106 unsigned int group, int echo, gfp_t flags)
108{ 107{
109 return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); 108 return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
110} 109}
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 11ba013e47f6..b2e7310ca0b8 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/atomic.h>
13#include <linux/netlink.h> 14#include <linux/netlink.h>
14#include <linux/rculist.h> 15#include <linux/rculist.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
@@ -17,7 +18,6 @@
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <net/netlink.h> 19#include <net/netlink.h>
19#include <net/sock.h> 20#include <net/sock.h>
20#include <asm/atomic.h>
21 21
22#include <linux/netfilter.h> 22#include <linux/netfilter.h>
23#include <linux/netfilter/nfnetlink.h> 23#include <linux/netfilter/nfnetlink.h>
@@ -109,7 +109,8 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
109 nfmsg->version = NFNETLINK_V0; 109 nfmsg->version = NFNETLINK_V0;
110 nfmsg->res_id = 0; 110 nfmsg->res_id = 0;
111 111
112 NLA_PUT_STRING(skb, NFACCT_NAME, acct->name); 112 if (nla_put_string(skb, NFACCT_NAME, acct->name))
113 goto nla_put_failure;
113 114
114 if (type == NFNL_MSG_ACCT_GET_CTRZERO) { 115 if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
115 pkts = atomic64_xchg(&acct->pkts, 0); 116 pkts = atomic64_xchg(&acct->pkts, 0);
@@ -118,9 +119,10 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
118 pkts = atomic64_read(&acct->pkts); 119 pkts = atomic64_read(&acct->pkts);
119 bytes = atomic64_read(&acct->bytes); 120 bytes = atomic64_read(&acct->bytes);
120 } 121 }
121 NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts)); 122 if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
122 NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes)); 123 nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
123 NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))); 124 nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
125 goto nla_put_failure;
124 126
125 nlmsg_end(skb, nlh); 127 nlmsg_end(skb, nlh);
126 return skb->len; 128 return skb->len;
@@ -171,8 +173,10 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
171 char *acct_name; 173 char *acct_name;
172 174
173 if (nlh->nlmsg_flags & NLM_F_DUMP) { 175 if (nlh->nlmsg_flags & NLM_F_DUMP) {
174 return netlink_dump_start(nfnl, skb, nlh, nfnl_acct_dump, 176 struct netlink_dump_control c = {
175 NULL, 0); 177 .dump = nfnl_acct_dump,
178 };
179 return netlink_dump_start(nfnl, skb, nlh, &c);
176 } 180 }
177 181
178 if (!tb[NFACCT_NAME]) 182 if (!tb[NFACCT_NAME])
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
new file mode 100644
index 000000000000..3e655288d1d6
--- /dev/null
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -0,0 +1,431 @@
1/*
2 * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
3 * (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation (or any later at your option).
8 */
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/rculist.h>
13#include <linux/rculist_nulls.h>
14#include <linux/types.h>
15#include <linux/timer.h>
16#include <linux/security.h>
17#include <linux/skbuff.h>
18#include <linux/errno.h>
19#include <linux/netlink.h>
20#include <linux/spinlock.h>
21#include <linux/interrupt.h>
22#include <linux/slab.h>
23
24#include <linux/netfilter.h>
25#include <net/netlink.h>
26#include <net/sock.h>
27#include <net/netfilter/nf_conntrack.h>
28#include <net/netfilter/nf_conntrack_core.h>
29#include <net/netfilter/nf_conntrack_l3proto.h>
30#include <net/netfilter/nf_conntrack_l4proto.h>
31#include <net/netfilter/nf_conntrack_tuple.h>
32#include <net/netfilter/nf_conntrack_timeout.h>
33
34#include <linux/netfilter/nfnetlink.h>
35#include <linux/netfilter/nfnetlink_cttimeout.h>
36
37MODULE_LICENSE("GPL");
38MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
39MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tuning");
40
41static LIST_HEAD(cttimeout_list);
42
43static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
44 [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING },
45 [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
46 [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
47 [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
48};
49
50static int
51ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
52 struct nf_conntrack_l4proto *l4proto,
53 const struct nlattr *attr)
54{
55 int ret = 0;
56
57 if (likely(l4proto->ctnl_timeout.nlattr_to_obj)) {
58 struct nlattr *tb[l4proto->ctnl_timeout.nlattr_max+1];
59
60 nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
61 attr, l4proto->ctnl_timeout.nla_policy);
62
63 ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data);
64 }
65 return ret;
66}
67
68static int
69cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
70 const struct nlmsghdr *nlh,
71 const struct nlattr * const cda[])
72{
73 __u16 l3num;
74 __u8 l4num;
75 struct nf_conntrack_l4proto *l4proto;
76 struct ctnl_timeout *timeout, *matching = NULL;
77 char *name;
78 int ret;
79
80 if (!cda[CTA_TIMEOUT_NAME] ||
81 !cda[CTA_TIMEOUT_L3PROTO] ||
82 !cda[CTA_TIMEOUT_L4PROTO] ||
83 !cda[CTA_TIMEOUT_DATA])
84 return -EINVAL;
85
86 name = nla_data(cda[CTA_TIMEOUT_NAME]);
87 l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
88 l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
89
90 list_for_each_entry(timeout, &cttimeout_list, head) {
91 if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
92 continue;
93
94 if (nlh->nlmsg_flags & NLM_F_EXCL)
95 return -EEXIST;
96
97 matching = timeout;
98 break;
99 }
100
101 l4proto = nf_ct_l4proto_find_get(l3num, l4num);
102
103 /* This protocol is not supportted, skip. */
104 if (l4proto->l4proto != l4num) {
105 ret = -EOPNOTSUPP;
106 goto err_proto_put;
107 }
108
109 if (matching) {
110 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
111 /* You cannot replace one timeout policy by another of
112 * different kind, sorry.
113 */
114 if (matching->l3num != l3num ||
115 matching->l4proto->l4proto != l4num) {
116 ret = -EINVAL;
117 goto err_proto_put;
118 }
119
120 ret = ctnl_timeout_parse_policy(matching, l4proto,
121 cda[CTA_TIMEOUT_DATA]);
122 return ret;
123 }
124 ret = -EBUSY;
125 goto err_proto_put;
126 }
127
128 timeout = kzalloc(sizeof(struct ctnl_timeout) +
129 l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
130 if (timeout == NULL) {
131 ret = -ENOMEM;
132 goto err_proto_put;
133 }
134
135 ret = ctnl_timeout_parse_policy(timeout, l4proto,
136 cda[CTA_TIMEOUT_DATA]);
137 if (ret < 0)
138 goto err;
139
140 strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
141 timeout->l3num = l3num;
142 timeout->l4proto = l4proto;
143 atomic_set(&timeout->refcnt, 1);
144 list_add_tail_rcu(&timeout->head, &cttimeout_list);
145
146 return 0;
147err:
148 kfree(timeout);
149err_proto_put:
150 nf_ct_l4proto_put(l4proto);
151 return ret;
152}
153
154static int
155ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
156 int event, struct ctnl_timeout *timeout)
157{
158 struct nlmsghdr *nlh;
159 struct nfgenmsg *nfmsg;
160 unsigned int flags = pid ? NLM_F_MULTI : 0;
161 struct nf_conntrack_l4proto *l4proto = timeout->l4proto;
162
163 event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
164 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
165 if (nlh == NULL)
166 goto nlmsg_failure;
167
168 nfmsg = nlmsg_data(nlh);
169 nfmsg->nfgen_family = AF_UNSPEC;
170 nfmsg->version = NFNETLINK_V0;
171 nfmsg->res_id = 0;
172
173 if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
174 nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
175 nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
176 nla_put_be32(skb, CTA_TIMEOUT_USE,
177 htonl(atomic_read(&timeout->refcnt))))
178 goto nla_put_failure;
179
180 if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
181 struct nlattr *nest_parms;
182 int ret;
183
184 nest_parms = nla_nest_start(skb,
185 CTA_TIMEOUT_DATA | NLA_F_NESTED);
186 if (!nest_parms)
187 goto nla_put_failure;
188
189 ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
190 if (ret < 0)
191 goto nla_put_failure;
192
193 nla_nest_end(skb, nest_parms);
194 }
195
196 nlmsg_end(skb, nlh);
197 return skb->len;
198
199nlmsg_failure:
200nla_put_failure:
201 nlmsg_cancel(skb, nlh);
202 return -1;
203}
204
205static int
206ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
207{
208 struct ctnl_timeout *cur, *last;
209
210 if (cb->args[2])
211 return 0;
212
213 last = (struct ctnl_timeout *)cb->args[1];
214 if (cb->args[1])
215 cb->args[1] = 0;
216
217 rcu_read_lock();
218 list_for_each_entry_rcu(cur, &cttimeout_list, head) {
219 if (last && cur != last)
220 continue;
221
222 if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).pid,
223 cb->nlh->nlmsg_seq,
224 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
225 IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
226 cb->args[1] = (unsigned long)cur;
227 break;
228 }
229 }
230 if (!cb->args[1])
231 cb->args[2] = 1;
232 rcu_read_unlock();
233 return skb->len;
234}
235
236static int
237cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
238 const struct nlmsghdr *nlh,
239 const struct nlattr * const cda[])
240{
241 int ret = -ENOENT;
242 char *name;
243 struct ctnl_timeout *cur;
244
245 if (nlh->nlmsg_flags & NLM_F_DUMP) {
246 struct netlink_dump_control c = {
247 .dump = ctnl_timeout_dump,
248 };
249 return netlink_dump_start(ctnl, skb, nlh, &c);
250 }
251
252 if (!cda[CTA_TIMEOUT_NAME])
253 return -EINVAL;
254 name = nla_data(cda[CTA_TIMEOUT_NAME]);
255
256 list_for_each_entry(cur, &cttimeout_list, head) {
257 struct sk_buff *skb2;
258
259 if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
260 continue;
261
262 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
263 if (skb2 == NULL) {
264 ret = -ENOMEM;
265 break;
266 }
267
268 ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).pid,
269 nlh->nlmsg_seq,
270 NFNL_MSG_TYPE(nlh->nlmsg_type),
271 IPCTNL_MSG_TIMEOUT_NEW, cur);
272 if (ret <= 0) {
273 kfree_skb(skb2);
274 break;
275 }
276 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid,
277 MSG_DONTWAIT);
278 if (ret > 0)
279 ret = 0;
280
281 /* this avoids a loop in nfnetlink. */
282 return ret == -EAGAIN ? -ENOBUFS : ret;
283 }
284 return ret;
285}
286
287/* try to delete object, fail if it is still in use. */
288static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
289{
290 int ret = 0;
291
292 /* we want to avoid races with nf_ct_timeout_find_get. */
293 if (atomic_dec_and_test(&timeout->refcnt)) {
294 /* We are protected by nfnl mutex. */
295 list_del_rcu(&timeout->head);
296 nf_ct_l4proto_put(timeout->l4proto);
297 kfree_rcu(timeout, rcu_head);
298 } else {
299 /* still in use, restore reference counter. */
300 atomic_inc(&timeout->refcnt);
301 ret = -EBUSY;
302 }
303 return ret;
304}
305
306static int
307cttimeout_del_timeout(struct sock *ctnl, struct sk_buff *skb,
308 const struct nlmsghdr *nlh,
309 const struct nlattr * const cda[])
310{
311 char *name;
312 struct ctnl_timeout *cur;
313 int ret = -ENOENT;
314
315 if (!cda[CTA_TIMEOUT_NAME]) {
316 list_for_each_entry(cur, &cttimeout_list, head)
317 ctnl_timeout_try_del(cur);
318
319 return 0;
320 }
321 name = nla_data(cda[CTA_TIMEOUT_NAME]);
322
323 list_for_each_entry(cur, &cttimeout_list, head) {
324 if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
325 continue;
326
327 ret = ctnl_timeout_try_del(cur);
328 if (ret < 0)
329 return ret;
330
331 break;
332 }
333 return ret;
334}
335
336#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
337static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
338{
339 struct ctnl_timeout *timeout, *matching = NULL;
340
341 rcu_read_lock();
342 list_for_each_entry_rcu(timeout, &cttimeout_list, head) {
343 if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
344 continue;
345
346 if (!try_module_get(THIS_MODULE))
347 goto err;
348
349 if (!atomic_inc_not_zero(&timeout->refcnt)) {
350 module_put(THIS_MODULE);
351 goto err;
352 }
353 matching = timeout;
354 break;
355 }
356err:
357 rcu_read_unlock();
358 return matching;
359}
360
361static void ctnl_timeout_put(struct ctnl_timeout *timeout)
362{
363 atomic_dec(&timeout->refcnt);
364 module_put(THIS_MODULE);
365}
366#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
367
368static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
369 [IPCTNL_MSG_TIMEOUT_NEW] = { .call = cttimeout_new_timeout,
370 .attr_count = CTA_TIMEOUT_MAX,
371 .policy = cttimeout_nla_policy },
372 [IPCTNL_MSG_TIMEOUT_GET] = { .call = cttimeout_get_timeout,
373 .attr_count = CTA_TIMEOUT_MAX,
374 .policy = cttimeout_nla_policy },
375 [IPCTNL_MSG_TIMEOUT_DELETE] = { .call = cttimeout_del_timeout,
376 .attr_count = CTA_TIMEOUT_MAX,
377 .policy = cttimeout_nla_policy },
378};
379
380static const struct nfnetlink_subsystem cttimeout_subsys = {
381 .name = "conntrack_timeout",
382 .subsys_id = NFNL_SUBSYS_CTNETLINK_TIMEOUT,
383 .cb_count = IPCTNL_MSG_TIMEOUT_MAX,
384 .cb = cttimeout_cb,
385};
386
387MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_TIMEOUT);
388
389static int __init cttimeout_init(void)
390{
391 int ret;
392
393 ret = nfnetlink_subsys_register(&cttimeout_subsys);
394 if (ret < 0) {
395 pr_err("cttimeout_init: cannot register cttimeout with "
396 "nfnetlink.\n");
397 goto err_out;
398 }
399#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
400 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, ctnl_timeout_find_get);
401 RCU_INIT_POINTER(nf_ct_timeout_put_hook, ctnl_timeout_put);
402#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
403 return 0;
404
405err_out:
406 return ret;
407}
408
409static void __exit cttimeout_exit(void)
410{
411 struct ctnl_timeout *cur, *tmp;
412
413 pr_info("cttimeout: unregistering from nfnetlink.\n");
414
415 nfnetlink_subsys_unregister(&cttimeout_subsys);
416 list_for_each_entry_safe(cur, tmp, &cttimeout_list, head) {
417 list_del_rcu(&cur->head);
418 /* We are sure that our objects have no clients at this point,
419 * it's safe to release them all without checking refcnt.
420 */
421 nf_ct_l4proto_put(cur->l4proto);
422 kfree_rcu(cur, rcu_head);
423 }
424#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
425 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
426 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
427#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
428}
429
430module_init(cttimeout_init);
431module_exit(cttimeout_exit);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 66b2c54c544f..3c3cfc0cc9b5 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -391,67 +391,78 @@ __build_packet_message(struct nfulnl_instance *inst,
391 pmsg.hw_protocol = skb->protocol; 391 pmsg.hw_protocol = skb->protocol;
392 pmsg.hook = hooknum; 392 pmsg.hook = hooknum;
393 393
394 NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); 394 if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
395 goto nla_put_failure;
395 396
396 if (prefix) 397 if (prefix &&
397 NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); 398 nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
399 goto nla_put_failure;
398 400
399 if (indev) { 401 if (indev) {
400#ifndef CONFIG_BRIDGE_NETFILTER 402#ifndef CONFIG_BRIDGE_NETFILTER
401 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, 403 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
402 htonl(indev->ifindex)); 404 htonl(indev->ifindex)))
405 goto nla_put_failure;
403#else 406#else
404 if (pf == PF_BRIDGE) { 407 if (pf == PF_BRIDGE) {
405 /* Case 1: outdev is physical input device, we need to 408 /* Case 1: outdev is physical input device, we need to
406 * look for bridge group (when called from 409 * look for bridge group (when called from
407 * netfilter_bridge) */ 410 * netfilter_bridge) */
408 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, 411 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
409 htonl(indev->ifindex)); 412 htonl(indev->ifindex)) ||
410 /* this is the bridge group "brX" */ 413 /* this is the bridge group "brX" */
411 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ 414 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
412 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, 415 nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
413 htonl(br_port_get_rcu(indev)->br->dev->ifindex)); 416 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
417 goto nla_put_failure;
414 } else { 418 } else {
415 /* Case 2: indev is bridge group, we need to look for 419 /* Case 2: indev is bridge group, we need to look for
416 * physical device (when called from ipv4) */ 420 * physical device (when called from ipv4) */
417 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, 421 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
418 htonl(indev->ifindex)); 422 htonl(indev->ifindex)))
419 if (skb->nf_bridge && skb->nf_bridge->physindev) 423 goto nla_put_failure;
420 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, 424 if (skb->nf_bridge && skb->nf_bridge->physindev &&
421 htonl(skb->nf_bridge->physindev->ifindex)); 425 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
426 htonl(skb->nf_bridge->physindev->ifindex)))
427 goto nla_put_failure;
422 } 428 }
423#endif 429#endif
424 } 430 }
425 431
426 if (outdev) { 432 if (outdev) {
427#ifndef CONFIG_BRIDGE_NETFILTER 433#ifndef CONFIG_BRIDGE_NETFILTER
428 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 434 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
429 htonl(outdev->ifindex)); 435 htonl(outdev->ifindex)))
436 goto nla_put_failure;
430#else 437#else
431 if (pf == PF_BRIDGE) { 438 if (pf == PF_BRIDGE) {
432 /* Case 1: outdev is physical output device, we need to 439 /* Case 1: outdev is physical output device, we need to
433 * look for bridge group (when called from 440 * look for bridge group (when called from
434 * netfilter_bridge) */ 441 * netfilter_bridge) */
435 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, 442 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
436 htonl(outdev->ifindex)); 443 htonl(outdev->ifindex)) ||
437 /* this is the bridge group "brX" */ 444 /* this is the bridge group "brX" */
438 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ 445 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
439 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 446 nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
440 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); 447 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
448 goto nla_put_failure;
441 } else { 449 } else {
442 /* Case 2: indev is a bridge group, we need to look 450 /* Case 2: indev is a bridge group, we need to look
443 * for physical device (when called from ipv4) */ 451 * for physical device (when called from ipv4) */
444 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 452 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
445 htonl(outdev->ifindex)); 453 htonl(outdev->ifindex)))
446 if (skb->nf_bridge && skb->nf_bridge->physoutdev) 454 goto nla_put_failure;
447 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, 455 if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
448 htonl(skb->nf_bridge->physoutdev->ifindex)); 456 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
457 htonl(skb->nf_bridge->physoutdev->ifindex)))
458 goto nla_put_failure;
449 } 459 }
450#endif 460#endif
451 } 461 }
452 462
453 if (skb->mark) 463 if (skb->mark &&
454 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); 464 nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
465 goto nla_put_failure;
455 466
456 if (indev && skb->dev && 467 if (indev && skb->dev &&
457 skb->mac_header != skb->network_header) { 468 skb->mac_header != skb->network_header) {
@@ -459,16 +470,18 @@ __build_packet_message(struct nfulnl_instance *inst,
459 int len = dev_parse_header(skb, phw.hw_addr); 470 int len = dev_parse_header(skb, phw.hw_addr);
460 if (len > 0) { 471 if (len > 0) {
461 phw.hw_addrlen = htons(len); 472 phw.hw_addrlen = htons(len);
462 NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); 473 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
474 goto nla_put_failure;
463 } 475 }
464 } 476 }
465 477
466 if (indev && skb_mac_header_was_set(skb)) { 478 if (indev && skb_mac_header_was_set(skb)) {
467 NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)); 479 if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
468 NLA_PUT_BE16(inst->skb, NFULA_HWLEN, 480 nla_put_be16(inst->skb, NFULA_HWLEN,
469 htons(skb->dev->hard_header_len)); 481 htons(skb->dev->hard_header_len)) ||
470 NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, 482 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
471 skb_mac_header(skb)); 483 skb_mac_header(skb)))
484 goto nla_put_failure;
472 } 485 }
473 486
474 if (skb->tstamp.tv64) { 487 if (skb->tstamp.tv64) {
@@ -477,7 +490,8 @@ __build_packet_message(struct nfulnl_instance *inst,
477 ts.sec = cpu_to_be64(tv.tv_sec); 490 ts.sec = cpu_to_be64(tv.tv_sec);
478 ts.usec = cpu_to_be64(tv.tv_usec); 491 ts.usec = cpu_to_be64(tv.tv_usec);
479 492
480 NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); 493 if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
494 goto nla_put_failure;
481 } 495 }
482 496
483 /* UID */ 497 /* UID */
@@ -487,22 +501,24 @@ __build_packet_message(struct nfulnl_instance *inst,
487 struct file *file = skb->sk->sk_socket->file; 501 struct file *file = skb->sk->sk_socket->file;
488 __be32 uid = htonl(file->f_cred->fsuid); 502 __be32 uid = htonl(file->f_cred->fsuid);
489 __be32 gid = htonl(file->f_cred->fsgid); 503 __be32 gid = htonl(file->f_cred->fsgid);
490 /* need to unlock here since NLA_PUT may goto */
491 read_unlock_bh(&skb->sk->sk_callback_lock); 504 read_unlock_bh(&skb->sk->sk_callback_lock);
492 NLA_PUT_BE32(inst->skb, NFULA_UID, uid); 505 if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
493 NLA_PUT_BE32(inst->skb, NFULA_GID, gid); 506 nla_put_be32(inst->skb, NFULA_GID, gid))
507 goto nla_put_failure;
494 } else 508 } else
495 read_unlock_bh(&skb->sk->sk_callback_lock); 509 read_unlock_bh(&skb->sk->sk_callback_lock);
496 } 510 }
497 511
498 /* local sequence number */ 512 /* local sequence number */
499 if (inst->flags & NFULNL_CFG_F_SEQ) 513 if ((inst->flags & NFULNL_CFG_F_SEQ) &&
500 NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++)); 514 nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
515 goto nla_put_failure;
501 516
502 /* global sequence number */ 517 /* global sequence number */
503 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) 518 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
504 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, 519 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
505 htonl(atomic_inc_return(&global_seq))); 520 htonl(atomic_inc_return(&global_seq))))
521 goto nla_put_failure;
506 522
507 if (data_len) { 523 if (data_len) {
508 struct nlattr *nla; 524 struct nlattr *nla;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a80b0cb03f17..4162437b8361 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -288,58 +288,67 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
288 indev = entry->indev; 288 indev = entry->indev;
289 if (indev) { 289 if (indev) {
290#ifndef CONFIG_BRIDGE_NETFILTER 290#ifndef CONFIG_BRIDGE_NETFILTER
291 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); 291 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
292 goto nla_put_failure;
292#else 293#else
293 if (entry->pf == PF_BRIDGE) { 294 if (entry->pf == PF_BRIDGE) {
294 /* Case 1: indev is physical input device, we need to 295 /* Case 1: indev is physical input device, we need to
295 * look for bridge group (when called from 296 * look for bridge group (when called from
296 * netfilter_bridge) */ 297 * netfilter_bridge) */
297 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, 298 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
298 htonl(indev->ifindex)); 299 htonl(indev->ifindex)) ||
299 /* this is the bridge group "brX" */ 300 /* this is the bridge group "brX" */
300 /* rcu_read_lock()ed by __nf_queue */ 301 /* rcu_read_lock()ed by __nf_queue */
301 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, 302 nla_put_be32(skb, NFQA_IFINDEX_INDEV,
302 htonl(br_port_get_rcu(indev)->br->dev->ifindex)); 303 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
304 goto nla_put_failure;
303 } else { 305 } else {
304 /* Case 2: indev is bridge group, we need to look for 306 /* Case 2: indev is bridge group, we need to look for
305 * physical device (when called from ipv4) */ 307 * physical device (when called from ipv4) */
306 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, 308 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
307 htonl(indev->ifindex)); 309 htonl(indev->ifindex)))
308 if (entskb->nf_bridge && entskb->nf_bridge->physindev) 310 goto nla_put_failure;
309 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, 311 if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
310 htonl(entskb->nf_bridge->physindev->ifindex)); 312 nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
313 htonl(entskb->nf_bridge->physindev->ifindex)))
314 goto nla_put_failure;
311 } 315 }
312#endif 316#endif
313 } 317 }
314 318
315 if (outdev) { 319 if (outdev) {
316#ifndef CONFIG_BRIDGE_NETFILTER 320#ifndef CONFIG_BRIDGE_NETFILTER
317 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); 321 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
322 goto nla_put_failure;
318#else 323#else
319 if (entry->pf == PF_BRIDGE) { 324 if (entry->pf == PF_BRIDGE) {
320 /* Case 1: outdev is physical output device, we need to 325 /* Case 1: outdev is physical output device, we need to
321 * look for bridge group (when called from 326 * look for bridge group (when called from
322 * netfilter_bridge) */ 327 * netfilter_bridge) */
323 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, 328 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
324 htonl(outdev->ifindex)); 329 htonl(outdev->ifindex)) ||
325 /* this is the bridge group "brX" */ 330 /* this is the bridge group "brX" */
326 /* rcu_read_lock()ed by __nf_queue */ 331 /* rcu_read_lock()ed by __nf_queue */
327 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, 332 nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
328 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); 333 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
334 goto nla_put_failure;
329 } else { 335 } else {
330 /* Case 2: outdev is bridge group, we need to look for 336 /* Case 2: outdev is bridge group, we need to look for
331 * physical output device (when called from ipv4) */ 337 * physical output device (when called from ipv4) */
332 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, 338 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
333 htonl(outdev->ifindex)); 339 htonl(outdev->ifindex)))
334 if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) 340 goto nla_put_failure;
335 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, 341 if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
336 htonl(entskb->nf_bridge->physoutdev->ifindex)); 342 nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
343 htonl(entskb->nf_bridge->physoutdev->ifindex)))
344 goto nla_put_failure;
337 } 345 }
338#endif 346#endif
339 } 347 }
340 348
341 if (entskb->mark) 349 if (entskb->mark &&
342 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); 350 nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
351 goto nla_put_failure;
343 352
344 if (indev && entskb->dev && 353 if (indev && entskb->dev &&
345 entskb->mac_header != entskb->network_header) { 354 entskb->mac_header != entskb->network_header) {
@@ -347,7 +356,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
347 int len = dev_parse_header(entskb, phw.hw_addr); 356 int len = dev_parse_header(entskb, phw.hw_addr);
348 if (len) { 357 if (len) {
349 phw.hw_addrlen = htons(len); 358 phw.hw_addrlen = htons(len);
350 NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); 359 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
360 goto nla_put_failure;
351 } 361 }
352 } 362 }
353 363
@@ -357,7 +367,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
357 ts.sec = cpu_to_be64(tv.tv_sec); 367 ts.sec = cpu_to_be64(tv.tv_sec);
358 ts.usec = cpu_to_be64(tv.tv_usec); 368 ts.usec = cpu_to_be64(tv.tv_usec);
359 369
360 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); 370 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
371 goto nla_put_failure;
361 } 372 }
362 373
363 if (data_len) { 374 if (data_len) {
@@ -384,8 +395,7 @@ nlmsg_failure:
384nla_put_failure: 395nla_put_failure:
385 if (skb) 396 if (skb)
386 kfree_skb(skb); 397 kfree_skb(skb);
387 if (net_ratelimit()) 398 net_err_ratelimited("nf_queue: error creating packet message\n");
388 printk(KERN_ERR "nf_queue: error creating packet message\n");
389 return NULL; 399 return NULL;
390} 400}
391 401
@@ -422,10 +432,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
422 } 432 }
423 if (queue->queue_total >= queue->queue_maxlen) { 433 if (queue->queue_total >= queue->queue_maxlen) {
424 queue->queue_dropped++; 434 queue->queue_dropped++;
425 if (net_ratelimit()) 435 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
426 printk(KERN_WARNING "nf_queue: full at %d entries, " 436 queue->queue_total);
427 "dropping packets(s).\n",
428 queue->queue_total);
429 goto err_out_free_nskb; 437 goto err_out_free_nskb;
430 } 438 }
431 entry->id = ++queue->id_sequence; 439 entry->id = ++queue->id_sequence;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0221d10de75a..a51de9b052be 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -14,12 +14,14 @@
14#include <linux/netfilter/x_tables.h> 14#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter/xt_CT.h> 15#include <linux/netfilter/xt_CT.h>
16#include <net/netfilter/nf_conntrack.h> 16#include <net/netfilter/nf_conntrack.h>
17#include <net/netfilter/nf_conntrack_l4proto.h>
17#include <net/netfilter/nf_conntrack_helper.h> 18#include <net/netfilter/nf_conntrack_helper.h>
18#include <net/netfilter/nf_conntrack_ecache.h> 19#include <net/netfilter/nf_conntrack_ecache.h>
20#include <net/netfilter/nf_conntrack_timeout.h>
19#include <net/netfilter/nf_conntrack_zones.h> 21#include <net/netfilter/nf_conntrack_zones.h>
20 22
21static unsigned int xt_ct_target(struct sk_buff *skb, 23static unsigned int xt_ct_target_v0(struct sk_buff *skb,
22 const struct xt_action_param *par) 24 const struct xt_action_param *par)
23{ 25{
24 const struct xt_ct_target_info *info = par->targinfo; 26 const struct xt_ct_target_info *info = par->targinfo;
25 struct nf_conn *ct = info->ct; 27 struct nf_conn *ct = info->ct;
@@ -35,6 +37,23 @@ static unsigned int xt_ct_target(struct sk_buff *skb,
35 return XT_CONTINUE; 37 return XT_CONTINUE;
36} 38}
37 39
40static unsigned int xt_ct_target_v1(struct sk_buff *skb,
41 const struct xt_action_param *par)
42{
43 const struct xt_ct_target_info_v1 *info = par->targinfo;
44 struct nf_conn *ct = info->ct;
45
46 /* Previously seen (loopback)? Ignore. */
47 if (skb->nfct != NULL)
48 return XT_CONTINUE;
49
50 atomic_inc(&ct->ct_general.use);
51 skb->nfct = &ct->ct_general;
52 skb->nfctinfo = IP_CT_NEW;
53
54 return XT_CONTINUE;
55}
56
38static u8 xt_ct_find_proto(const struct xt_tgchk_param *par) 57static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
39{ 58{
40 if (par->family == NFPROTO_IPV4) { 59 if (par->family == NFPROTO_IPV4) {
@@ -53,7 +72,7 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
53 return 0; 72 return 0;
54} 73}
55 74
56static int xt_ct_tg_check(const struct xt_tgchk_param *par) 75static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
57{ 76{
58 struct xt_ct_target_info *info = par->targinfo; 77 struct xt_ct_target_info *info = par->targinfo;
59 struct nf_conntrack_tuple t; 78 struct nf_conntrack_tuple t;
@@ -130,7 +149,164 @@ err1:
130 return ret; 149 return ret;
131} 150}
132 151
133static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par) 152#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
153static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
154{
155 typeof(nf_ct_timeout_put_hook) timeout_put;
156
157 timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
158 if (timeout_put)
159 timeout_put(timeout);
160}
161#endif
162
163static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
164{
165 struct xt_ct_target_info_v1 *info = par->targinfo;
166 struct nf_conntrack_tuple t;
167 struct nf_conn_help *help;
168 struct nf_conn *ct;
169 int ret = 0;
170 u8 proto;
171#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
172 struct ctnl_timeout *timeout;
173#endif
174 if (info->flags & ~XT_CT_NOTRACK)
175 return -EINVAL;
176
177 if (info->flags & XT_CT_NOTRACK) {
178 ct = nf_ct_untracked_get();
179 atomic_inc(&ct->ct_general.use);
180 goto out;
181 }
182
183#ifndef CONFIG_NF_CONNTRACK_ZONES
184 if (info->zone)
185 goto err1;
186#endif
187
188 ret = nf_ct_l3proto_try_module_get(par->family);
189 if (ret < 0)
190 goto err1;
191
192 memset(&t, 0, sizeof(t));
193 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
194 ret = PTR_ERR(ct);
195 if (IS_ERR(ct))
196 goto err2;
197
198 ret = 0;
199 if ((info->ct_events || info->exp_events) &&
200 !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
201 GFP_KERNEL))
202 goto err3;
203
204 if (info->helper[0]) {
205 ret = -ENOENT;
206 proto = xt_ct_find_proto(par);
207 if (!proto) {
208 pr_info("You must specify a L4 protocol, "
209 "and not use inversions on it.\n");
210 goto err3;
211 }
212
213 ret = -ENOMEM;
214 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
215 if (help == NULL)
216 goto err3;
217
218 ret = -ENOENT;
219 help->helper = nf_conntrack_helper_try_module_get(info->helper,
220 par->family,
221 proto);
222 if (help->helper == NULL) {
223 pr_info("No such helper \"%s\"\n", info->helper);
224 goto err3;
225 }
226 }
227
228#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
229 if (info->timeout[0]) {
230 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
231 struct nf_conn_timeout *timeout_ext;
232
233 rcu_read_lock();
234 timeout_find_get =
235 rcu_dereference(nf_ct_timeout_find_get_hook);
236
237 if (timeout_find_get) {
238 const struct ipt_entry *e = par->entryinfo;
239 struct nf_conntrack_l4proto *l4proto;
240
241 if (e->ip.invflags & IPT_INV_PROTO) {
242 ret = -EINVAL;
243 pr_info("You cannot use inversion on "
244 "L4 protocol\n");
245 goto err4;
246 }
247 timeout = timeout_find_get(info->timeout);
248 if (timeout == NULL) {
249 ret = -ENOENT;
250 pr_info("No such timeout policy \"%s\"\n",
251 info->timeout);
252 goto err4;
253 }
254 if (timeout->l3num != par->family) {
255 ret = -EINVAL;
256 pr_info("Timeout policy `%s' can only be "
257 "used by L3 protocol number %d\n",
258 info->timeout, timeout->l3num);
259 goto err5;
260 }
261 /* Make sure the timeout policy matches any existing
262 * protocol tracker, otherwise default to generic.
263 */
264 l4proto = __nf_ct_l4proto_find(par->family,
265 e->ip.proto);
266 if (timeout->l4proto->l4proto != l4proto->l4proto) {
267 ret = -EINVAL;
268 pr_info("Timeout policy `%s' can only be "
269 "used by L4 protocol number %d\n",
270 info->timeout,
271 timeout->l4proto->l4proto);
272 goto err5;
273 }
274 timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
275 GFP_ATOMIC);
276 if (timeout_ext == NULL) {
277 ret = -ENOMEM;
278 goto err5;
279 }
280 } else {
281 ret = -ENOENT;
282 pr_info("Timeout policy base is empty\n");
283 goto err4;
284 }
285 rcu_read_unlock();
286 }
287#endif
288
289 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
290 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
291out:
292 info->ct = ct;
293 return 0;
294
295#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
296err5:
297 __xt_ct_tg_timeout_put(timeout);
298err4:
299 rcu_read_unlock();
300#endif
301err3:
302 nf_conntrack_free(ct);
303err2:
304 nf_ct_l3proto_module_put(par->family);
305err1:
306 return ret;
307}
308
309static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
134{ 310{
135 struct xt_ct_target_info *info = par->targinfo; 311 struct xt_ct_target_info *info = par->targinfo;
136 struct nf_conn *ct = info->ct; 312 struct nf_conn *ct = info->ct;
@@ -146,25 +322,69 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
146 nf_ct_put(info->ct); 322 nf_ct_put(info->ct);
147} 323}
148 324
149static struct xt_target xt_ct_tg __read_mostly = { 325static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
150 .name = "CT", 326{
151 .family = NFPROTO_UNSPEC, 327 struct xt_ct_target_info_v1 *info = par->targinfo;
152 .targetsize = sizeof(struct xt_ct_target_info), 328 struct nf_conn *ct = info->ct;
153 .checkentry = xt_ct_tg_check, 329 struct nf_conn_help *help;
154 .destroy = xt_ct_tg_destroy, 330#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
155 .target = xt_ct_target, 331 struct nf_conn_timeout *timeout_ext;
156 .table = "raw", 332 typeof(nf_ct_timeout_put_hook) timeout_put;
157 .me = THIS_MODULE, 333#endif
334 if (!nf_ct_is_untracked(ct)) {
335 help = nfct_help(ct);
336 if (help)
337 module_put(help->helper->me);
338
339 nf_ct_l3proto_module_put(par->family);
340
341#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
342 rcu_read_lock();
343 timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
344
345 if (timeout_put) {
346 timeout_ext = nf_ct_timeout_find(ct);
347 if (timeout_ext)
348 timeout_put(timeout_ext->timeout);
349 }
350 rcu_read_unlock();
351#endif
352 }
353 nf_ct_put(info->ct);
354}
355
356static struct xt_target xt_ct_tg_reg[] __read_mostly = {
357 {
358 .name = "CT",
359 .family = NFPROTO_UNSPEC,
360 .targetsize = sizeof(struct xt_ct_target_info),
361 .checkentry = xt_ct_tg_check_v0,
362 .destroy = xt_ct_tg_destroy_v0,
363 .target = xt_ct_target_v0,
364 .table = "raw",
365 .me = THIS_MODULE,
366 },
367 {
368 .name = "CT",
369 .family = NFPROTO_UNSPEC,
370 .revision = 1,
371 .targetsize = sizeof(struct xt_ct_target_info_v1),
372 .checkentry = xt_ct_tg_check_v1,
373 .destroy = xt_ct_tg_destroy_v1,
374 .target = xt_ct_target_v1,
375 .table = "raw",
376 .me = THIS_MODULE,
377 },
158}; 378};
159 379
160static int __init xt_ct_tg_init(void) 380static int __init xt_ct_tg_init(void)
161{ 381{
162 return xt_register_target(&xt_ct_tg); 382 return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
163} 383}
164 384
165static void __exit xt_ct_tg_exit(void) 385static void __exit xt_ct_tg_exit(void)
166{ 386{
167 xt_unregister_target(&xt_ct_tg); 387 xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
168} 388}
169 389
170module_init(xt_ct_tg_init); 390module_init(xt_ct_tg_init);
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
new file mode 100644
index 000000000000..0a96a43108ed
--- /dev/null
+++ b/net/netfilter/xt_HMARK.c
@@ -0,0 +1,362 @@
1/*
2 * xt_HMARK - Netfilter module to set mark by means of hashing
3 *
4 * (C) 2012 by Hans Schillstrom <hans.schillstrom@ericsson.com>
5 * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/icmp.h>
15
16#include <linux/netfilter/x_tables.h>
17#include <linux/netfilter/xt_HMARK.h>
18
19#include <net/ip.h>
20#if IS_ENABLED(CONFIG_NF_CONNTRACK)
21#include <net/netfilter/nf_conntrack.h>
22#endif
23#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
24#include <net/ipv6.h>
25#include <linux/netfilter_ipv6/ip6_tables.h>
26#endif
27
28MODULE_LICENSE("GPL");
29MODULE_AUTHOR("Hans Schillstrom <hans.schillstrom@ericsson.com>");
30MODULE_DESCRIPTION("Xtables: packet marking using hash calculation");
31MODULE_ALIAS("ipt_HMARK");
32MODULE_ALIAS("ip6t_HMARK");
33
34struct hmark_tuple {
35 u32 src;
36 u32 dst;
37 union hmark_ports uports;
38 uint8_t proto;
39};
40
41static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
42{
43 return (addr32[0] & mask[0]) ^
44 (addr32[1] & mask[1]) ^
45 (addr32[2] & mask[2]) ^
46 (addr32[3] & mask[3]);
47}
48
49static inline u32
50hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
51{
52 switch (l3num) {
53 case AF_INET:
54 return *addr32 & *mask;
55 case AF_INET6:
56 return hmark_addr6_mask(addr32, mask);
57 }
58 return 0;
59}
60
61static int
62hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
63 const struct xt_hmark_info *info)
64{
65#if IS_ENABLED(CONFIG_NF_CONNTRACK)
66 enum ip_conntrack_info ctinfo;
67 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
68 struct nf_conntrack_tuple *otuple;
69 struct nf_conntrack_tuple *rtuple;
70
71 if (ct == NULL || nf_ct_is_untracked(ct))
72 return -1;
73
74 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
75 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
76
77 t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all,
78 info->src_mask.all);
79 t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all,
80 info->dst_mask.all);
81
82 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
83 return 0;
84
85 t->proto = nf_ct_protonum(ct);
86 if (t->proto != IPPROTO_ICMP) {
87 t->uports.p16.src = otuple->src.u.all;
88 t->uports.p16.dst = rtuple->src.u.all;
89 t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
90 info->port_set.v32;
91 if (t->uports.p16.dst < t->uports.p16.src)
92 swap(t->uports.p16.dst, t->uports.p16.src);
93 }
94
95 return 0;
96#else
97 return -1;
98#endif
99}
100
101static inline u32
102hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
103{
104 u32 hash;
105
106 if (t->dst < t->src)
107 swap(t->src, t->dst);
108
109 hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd);
110 hash = hash ^ (t->proto & info->proto_mask);
111
112 return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
113}
114
115static void
116hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
117 struct hmark_tuple *t, const struct xt_hmark_info *info)
118{
119 int protoff;
120
121 protoff = proto_ports_offset(t->proto);
122 if (protoff < 0)
123 return;
124
125 nhoff += protoff;
126 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
127 return;
128
129 t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
130 info->port_set.v32;
131
132 if (t->uports.p16.dst < t->uports.p16.src)
133 swap(t->uports.p16.dst, t->uports.p16.src);
134}
135
136#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
137static int get_inner6_hdr(const struct sk_buff *skb, int *offset)
138{
139 struct icmp6hdr *icmp6h, _ih6;
140
141 icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6);
142 if (icmp6h == NULL)
143 return 0;
144
145 if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) {
146 *offset += sizeof(struct icmp6hdr);
147 return 1;
148 }
149 return 0;
150}
151
152static int
153hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
154 const struct xt_hmark_info *info)
155{
156 struct ipv6hdr *ip6, _ip6;
157 int flag = IP6T_FH_F_AUTH;
158 unsigned int nhoff = 0;
159 u16 fragoff = 0;
160 int nexthdr;
161
162 ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb));
163 nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
164 if (nexthdr < 0)
165 return 0;
166 /* No need to check for icmp errors on fragments */
167 if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6))
168 goto noicmp;
169 /* Use inner header in case of ICMP errors */
170 if (get_inner6_hdr(skb, &nhoff)) {
171 ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6);
172 if (ip6 == NULL)
173 return -1;
174 /* If AH present, use SPI like in ESP. */
175 flag = IP6T_FH_F_AUTH;
176 nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
177 if (nexthdr < 0)
178 return -1;
179 }
180noicmp:
181 t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all);
182 t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all);
183
184 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
185 return 0;
186
187 t->proto = nexthdr;
188 if (t->proto == IPPROTO_ICMPV6)
189 return 0;
190
191 if (flag & IP6T_FH_F_FRAG)
192 return 0;
193
194 hmark_set_tuple_ports(skb, nhoff, t, info);
195 return 0;
196}
197
198static unsigned int
199hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par)
200{
201 const struct xt_hmark_info *info = par->targinfo;
202 struct hmark_tuple t;
203
204 memset(&t, 0, sizeof(struct hmark_tuple));
205
206 if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
207 if (hmark_ct_set_htuple(skb, &t, info) < 0)
208 return XT_CONTINUE;
209 } else {
210 if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0)
211 return XT_CONTINUE;
212 }
213
214 skb->mark = hmark_hash(&t, info);
215 return XT_CONTINUE;
216}
217#endif
218
219static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
220{
221 const struct icmphdr *icmph;
222 struct icmphdr _ih;
223
224 /* Not enough header? */
225 icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih);
226 if (icmph == NULL || icmph->type > NR_ICMP_TYPES)
227 return 0;
228
229 /* Error message? */
230 if (icmph->type != ICMP_DEST_UNREACH &&
231 icmph->type != ICMP_SOURCE_QUENCH &&
232 icmph->type != ICMP_TIME_EXCEEDED &&
233 icmph->type != ICMP_PARAMETERPROB &&
234 icmph->type != ICMP_REDIRECT)
235 return 0;
236
237 *nhoff += iphsz + sizeof(_ih);
238 return 1;
239}
240
241static int
242hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
243 const struct xt_hmark_info *info)
244{
245 struct iphdr *ip, _ip;
246 int nhoff = skb_network_offset(skb);
247
248 ip = (struct iphdr *) (skb->data + nhoff);
249 if (ip->protocol == IPPROTO_ICMP) {
250 /* Use inner header in case of ICMP errors */
251 if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) {
252 ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip);
253 if (ip == NULL)
254 return -1;
255 }
256 }
257
258 t->src = (__force u32) ip->saddr;
259 t->dst = (__force u32) ip->daddr;
260
261 t->src &= info->src_mask.ip;
262 t->dst &= info->dst_mask.ip;
263
264 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
265 return 0;
266
267 t->proto = ip->protocol;
268
269 /* ICMP has no ports, skip */
270 if (t->proto == IPPROTO_ICMP)
271 return 0;
272
273 /* follow-up fragments don't contain ports, skip all fragments */
274 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
275 return 0;
276
277 hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
278
279 return 0;
280}
281
282static unsigned int
283hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
284{
285 const struct xt_hmark_info *info = par->targinfo;
286 struct hmark_tuple t;
287
288 memset(&t, 0, sizeof(struct hmark_tuple));
289
290 if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
291 if (hmark_ct_set_htuple(skb, &t, info) < 0)
292 return XT_CONTINUE;
293 } else {
294 if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0)
295 return XT_CONTINUE;
296 }
297
298 skb->mark = hmark_hash(&t, info);
299 return XT_CONTINUE;
300}
301
302static int hmark_tg_check(const struct xt_tgchk_param *par)
303{
304 const struct xt_hmark_info *info = par->targinfo;
305
306 if (!info->hmodulus) {
307 pr_info("xt_HMARK: hash modulus can't be zero\n");
308 return -EINVAL;
309 }
310 if (info->proto_mask &&
311 (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) {
312 pr_info("xt_HMARK: proto mask must be zero with L3 mode\n");
313 return -EINVAL;
314 }
315 if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
316 (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
317 XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) {
318 pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n");
319 return -EINVAL;
320 }
321 if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
322 (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
323 XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
324 pr_info("xt_HMARK: spi-set and port-set can't be combined\n");
325 return -EINVAL;
326 }
327 return 0;
328}
329
330static struct xt_target hmark_tg_reg[] __read_mostly = {
331 {
332 .name = "HMARK",
333 .family = NFPROTO_IPV4,
334 .target = hmark_tg_v4,
335 .targetsize = sizeof(struct xt_hmark_info),
336 .checkentry = hmark_tg_check,
337 .me = THIS_MODULE,
338 },
339#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
340 {
341 .name = "HMARK",
342 .family = NFPROTO_IPV6,
343 .target = hmark_tg_v6,
344 .targetsize = sizeof(struct xt_hmark_info),
345 .checkentry = hmark_tg_check,
346 .me = THIS_MODULE,
347 },
348#endif
349};
350
351static int __init hmark_tg_init(void)
352{
353 return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
354}
355
356static void __exit hmark_tg_exit(void)
357{
358 xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
359}
360
361module_init(hmark_tg_init);
362module_exit(hmark_tg_exit);
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
new file mode 100644
index 000000000000..ff5f75fddb15
--- /dev/null
+++ b/net/netfilter/xt_LOG.c
@@ -0,0 +1,925 @@
1/*
2 * This is a module which is used for logging packets.
3 */
4
5/* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/module.h>
15#include <linux/spinlock.h>
16#include <linux/skbuff.h>
17#include <linux/if_arp.h>
18#include <linux/ip.h>
19#include <net/ipv6.h>
20#include <net/icmp.h>
21#include <net/udp.h>
22#include <net/tcp.h>
23#include <net/route.h>
24
25#include <linux/netfilter.h>
26#include <linux/netfilter/x_tables.h>
27#include <linux/netfilter/xt_LOG.h>
28#include <linux/netfilter_ipv6/ip6_tables.h>
29#include <net/netfilter/nf_log.h>
30#include <net/netfilter/xt_log.h>
31
32static struct nf_loginfo default_loginfo = {
33 .type = NF_LOG_TYPE_LOG,
34 .u = {
35 .log = {
36 .level = 5,
37 .logflags = NF_LOG_MASK,
38 },
39 },
40};
41
42static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb,
43 u8 proto, int fragment, unsigned int offset)
44{
45 struct udphdr _udph;
46 const struct udphdr *uh;
47
48 if (proto == IPPROTO_UDP)
49 /* Max length: 10 "PROTO=UDP " */
50 sb_add(m, "PROTO=UDP ");
51 else /* Max length: 14 "PROTO=UDPLITE " */
52 sb_add(m, "PROTO=UDPLITE ");
53
54 if (fragment)
55 goto out;
56
57 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
58 uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
59 if (uh == NULL) {
60 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
61
62 return 1;
63 }
64
65 /* Max length: 20 "SPT=65535 DPT=65535 " */
66 sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest),
67 ntohs(uh->len));
68
69out:
70 return 0;
71}
72
73static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
74 u8 proto, int fragment, unsigned int offset,
75 unsigned int logflags)
76{
77 struct tcphdr _tcph;
78 const struct tcphdr *th;
79
80 /* Max length: 10 "PROTO=TCP " */
81 sb_add(m, "PROTO=TCP ");
82
83 if (fragment)
84 return 0;
85
86 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
87 th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
88 if (th == NULL) {
89 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
90 return 1;
91 }
92
93 /* Max length: 20 "SPT=65535 DPT=65535 " */
94 sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest));
95 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
96 if (logflags & XT_LOG_TCPSEQ)
97 sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq));
98
99 /* Max length: 13 "WINDOW=65535 " */
100 sb_add(m, "WINDOW=%u ", ntohs(th->window));
101 /* Max length: 9 "RES=0x3C " */
102 sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
103 TCP_RESERVED_BITS) >> 22));
104 /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
105 if (th->cwr)
106 sb_add(m, "CWR ");
107 if (th->ece)
108 sb_add(m, "ECE ");
109 if (th->urg)
110 sb_add(m, "URG ");
111 if (th->ack)
112 sb_add(m, "ACK ");
113 if (th->psh)
114 sb_add(m, "PSH ");
115 if (th->rst)
116 sb_add(m, "RST ");
117 if (th->syn)
118 sb_add(m, "SYN ");
119 if (th->fin)
120 sb_add(m, "FIN ");
121 /* Max length: 11 "URGP=65535 " */
122 sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
123
124 if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
125 u_int8_t _opt[60 - sizeof(struct tcphdr)];
126 const u_int8_t *op;
127 unsigned int i;
128 unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
129
130 op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
131 optsize, _opt);
132 if (op == NULL) {
133 sb_add(m, "OPT (TRUNCATED)");
134 return 1;
135 }
136
137 /* Max length: 127 "OPT (" 15*4*2chars ") " */
138 sb_add(m, "OPT (");
139 for (i = 0; i < optsize; i++)
140 sb_add(m, "%02X", op[i]);
141
142 sb_add(m, ") ");
143 }
144
145 return 0;
146}
147
148/* One level of recursion won't kill us */
149static void dump_ipv4_packet(struct sbuff *m,
150 const struct nf_loginfo *info,
151 const struct sk_buff *skb,
152 unsigned int iphoff)
153{
154 struct iphdr _iph;
155 const struct iphdr *ih;
156 unsigned int logflags;
157
158 if (info->type == NF_LOG_TYPE_LOG)
159 logflags = info->u.log.logflags;
160 else
161 logflags = NF_LOG_MASK;
162
163 ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
164 if (ih == NULL) {
165 sb_add(m, "TRUNCATED");
166 return;
167 }
168
169 /* Important fields:
170 * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
171 /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
172 sb_add(m, "SRC=%pI4 DST=%pI4 ",
173 &ih->saddr, &ih->daddr);
174
175 /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
176 sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
177 ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
178 ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
179
180 /* Max length: 6 "CE DF MF " */
181 if (ntohs(ih->frag_off) & IP_CE)
182 sb_add(m, "CE ");
183 if (ntohs(ih->frag_off) & IP_DF)
184 sb_add(m, "DF ");
185 if (ntohs(ih->frag_off) & IP_MF)
186 sb_add(m, "MF ");
187
188 /* Max length: 11 "FRAG:65535 " */
189 if (ntohs(ih->frag_off) & IP_OFFSET)
190 sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
191
192 if ((logflags & XT_LOG_IPOPT) &&
193 ih->ihl * 4 > sizeof(struct iphdr)) {
194 const unsigned char *op;
195 unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
196 unsigned int i, optsize;
197
198 optsize = ih->ihl * 4 - sizeof(struct iphdr);
199 op = skb_header_pointer(skb, iphoff+sizeof(_iph),
200 optsize, _opt);
201 if (op == NULL) {
202 sb_add(m, "TRUNCATED");
203 return;
204 }
205
206 /* Max length: 127 "OPT (" 15*4*2chars ") " */
207 sb_add(m, "OPT (");
208 for (i = 0; i < optsize; i++)
209 sb_add(m, "%02X", op[i]);
210 sb_add(m, ") ");
211 }
212
213 switch (ih->protocol) {
214 case IPPROTO_TCP:
215 if (dump_tcp_header(m, skb, ih->protocol,
216 ntohs(ih->frag_off) & IP_OFFSET,
217 iphoff+ih->ihl*4, logflags))
218 return;
219 break;
220 case IPPROTO_UDP:
221 case IPPROTO_UDPLITE:
222 if (dump_udp_header(m, skb, ih->protocol,
223 ntohs(ih->frag_off) & IP_OFFSET,
224 iphoff+ih->ihl*4))
225 return;
226 break;
227 case IPPROTO_ICMP: {
228 struct icmphdr _icmph;
229 const struct icmphdr *ich;
230 static const size_t required_len[NR_ICMP_TYPES+1]
231 = { [ICMP_ECHOREPLY] = 4,
232 [ICMP_DEST_UNREACH]
233 = 8 + sizeof(struct iphdr),
234 [ICMP_SOURCE_QUENCH]
235 = 8 + sizeof(struct iphdr),
236 [ICMP_REDIRECT]
237 = 8 + sizeof(struct iphdr),
238 [ICMP_ECHO] = 4,
239 [ICMP_TIME_EXCEEDED]
240 = 8 + sizeof(struct iphdr),
241 [ICMP_PARAMETERPROB]
242 = 8 + sizeof(struct iphdr),
243 [ICMP_TIMESTAMP] = 20,
244 [ICMP_TIMESTAMPREPLY] = 20,
245 [ICMP_ADDRESS] = 12,
246 [ICMP_ADDRESSREPLY] = 12 };
247
248 /* Max length: 11 "PROTO=ICMP " */
249 sb_add(m, "PROTO=ICMP ");
250
251 if (ntohs(ih->frag_off) & IP_OFFSET)
252 break;
253
254 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
255 ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
256 sizeof(_icmph), &_icmph);
257 if (ich == NULL) {
258 sb_add(m, "INCOMPLETE [%u bytes] ",
259 skb->len - iphoff - ih->ihl*4);
260 break;
261 }
262
263 /* Max length: 18 "TYPE=255 CODE=255 " */
264 sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
265
266 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
267 if (ich->type <= NR_ICMP_TYPES &&
268 required_len[ich->type] &&
269 skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
270 sb_add(m, "INCOMPLETE [%u bytes] ",
271 skb->len - iphoff - ih->ihl*4);
272 break;
273 }
274
275 switch (ich->type) {
276 case ICMP_ECHOREPLY:
277 case ICMP_ECHO:
278 /* Max length: 19 "ID=65535 SEQ=65535 " */
279 sb_add(m, "ID=%u SEQ=%u ",
280 ntohs(ich->un.echo.id),
281 ntohs(ich->un.echo.sequence));
282 break;
283
284 case ICMP_PARAMETERPROB:
285 /* Max length: 14 "PARAMETER=255 " */
286 sb_add(m, "PARAMETER=%u ",
287 ntohl(ich->un.gateway) >> 24);
288 break;
289 case ICMP_REDIRECT:
290 /* Max length: 24 "GATEWAY=255.255.255.255 " */
291 sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
292 /* Fall through */
293 case ICMP_DEST_UNREACH:
294 case ICMP_SOURCE_QUENCH:
295 case ICMP_TIME_EXCEEDED:
296 /* Max length: 3+maxlen */
297 if (!iphoff) { /* Only recurse once. */
298 sb_add(m, "[");
299 dump_ipv4_packet(m, info, skb,
300 iphoff + ih->ihl*4+sizeof(_icmph));
301 sb_add(m, "] ");
302 }
303
304 /* Max length: 10 "MTU=65535 " */
305 if (ich->type == ICMP_DEST_UNREACH &&
306 ich->code == ICMP_FRAG_NEEDED)
307 sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
308 }
309 break;
310 }
311 /* Max Length */
312 case IPPROTO_AH: {
313 struct ip_auth_hdr _ahdr;
314 const struct ip_auth_hdr *ah;
315
316 if (ntohs(ih->frag_off) & IP_OFFSET)
317 break;
318
319 /* Max length: 9 "PROTO=AH " */
320 sb_add(m, "PROTO=AH ");
321
322 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
323 ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
324 sizeof(_ahdr), &_ahdr);
325 if (ah == NULL) {
326 sb_add(m, "INCOMPLETE [%u bytes] ",
327 skb->len - iphoff - ih->ihl*4);
328 break;
329 }
330
331 /* Length: 15 "SPI=0xF1234567 " */
332 sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
333 break;
334 }
335 case IPPROTO_ESP: {
336 struct ip_esp_hdr _esph;
337 const struct ip_esp_hdr *eh;
338
339 /* Max length: 10 "PROTO=ESP " */
340 sb_add(m, "PROTO=ESP ");
341
342 if (ntohs(ih->frag_off) & IP_OFFSET)
343 break;
344
345 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
346 eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
347 sizeof(_esph), &_esph);
348 if (eh == NULL) {
349 sb_add(m, "INCOMPLETE [%u bytes] ",
350 skb->len - iphoff - ih->ihl*4);
351 break;
352 }
353
354 /* Length: 15 "SPI=0xF1234567 " */
355 sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
356 break;
357 }
358 /* Max length: 10 "PROTO 255 " */
359 default:
360 sb_add(m, "PROTO=%u ", ih->protocol);
361 }
362
363 /* Max length: 15 "UID=4294967295 " */
364 if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
365 read_lock_bh(&skb->sk->sk_callback_lock);
366 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
367 sb_add(m, "UID=%u GID=%u ",
368 skb->sk->sk_socket->file->f_cred->fsuid,
369 skb->sk->sk_socket->file->f_cred->fsgid);
370 read_unlock_bh(&skb->sk->sk_callback_lock);
371 }
372
373 /* Max length: 16 "MARK=0xFFFFFFFF " */
374 if (!iphoff && skb->mark)
375 sb_add(m, "MARK=0x%x ", skb->mark);
376
377 /* Proto Max log string length */
378 /* IP: 40+46+6+11+127 = 230 */
379 /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
380 /* UDP: 10+max(25,20) = 35 */
381 /* UDPLITE: 14+max(25,20) = 39 */
382 /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
383 /* ESP: 10+max(25)+15 = 50 */
384 /* AH: 9+max(25)+15 = 49 */
385 /* unknown: 10 */
386
387 /* (ICMP allows recursion one level deep) */
388 /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
389 /* maxlen = 230+ 91 + 230 + 252 = 803 */
390}
391
392static void dump_ipv4_mac_header(struct sbuff *m,
393 const struct nf_loginfo *info,
394 const struct sk_buff *skb)
395{
396 struct net_device *dev = skb->dev;
397 unsigned int logflags = 0;
398
399 if (info->type == NF_LOG_TYPE_LOG)
400 logflags = info->u.log.logflags;
401
402 if (!(logflags & XT_LOG_MACDECODE))
403 goto fallback;
404
405 switch (dev->type) {
406 case ARPHRD_ETHER:
407 sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
408 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
409 ntohs(eth_hdr(skb)->h_proto));
410 return;
411 default:
412 break;
413 }
414
415fallback:
416 sb_add(m, "MAC=");
417 if (dev->hard_header_len &&
418 skb->mac_header != skb->network_header) {
419 const unsigned char *p = skb_mac_header(skb);
420 unsigned int i;
421
422 sb_add(m, "%02x", *p++);
423 for (i = 1; i < dev->hard_header_len; i++, p++)
424 sb_add(m, ":%02x", *p);
425 }
426 sb_add(m, " ");
427}
428
429static void
430log_packet_common(struct sbuff *m,
431 u_int8_t pf,
432 unsigned int hooknum,
433 const struct sk_buff *skb,
434 const struct net_device *in,
435 const struct net_device *out,
436 const struct nf_loginfo *loginfo,
437 const char *prefix)
438{
439 sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
440 prefix,
441 in ? in->name : "",
442 out ? out->name : "");
443#ifdef CONFIG_BRIDGE_NETFILTER
444 if (skb->nf_bridge) {
445 const struct net_device *physindev;
446 const struct net_device *physoutdev;
447
448 physindev = skb->nf_bridge->physindev;
449 if (physindev && in != physindev)
450 sb_add(m, "PHYSIN=%s ", physindev->name);
451 physoutdev = skb->nf_bridge->physoutdev;
452 if (physoutdev && out != physoutdev)
453 sb_add(m, "PHYSOUT=%s ", physoutdev->name);
454 }
455#endif
456}
457
458
459static void
460ipt_log_packet(u_int8_t pf,
461 unsigned int hooknum,
462 const struct sk_buff *skb,
463 const struct net_device *in,
464 const struct net_device *out,
465 const struct nf_loginfo *loginfo,
466 const char *prefix)
467{
468 struct sbuff *m = sb_open();
469
470 if (!loginfo)
471 loginfo = &default_loginfo;
472
473 log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
474
475 if (in != NULL)
476 dump_ipv4_mac_header(m, loginfo, skb);
477
478 dump_ipv4_packet(m, loginfo, skb, 0);
479
480 sb_close(m);
481}
482
483#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
484/* One level of recursion won't kill us */
485static void dump_ipv6_packet(struct sbuff *m,
486 const struct nf_loginfo *info,
487 const struct sk_buff *skb, unsigned int ip6hoff,
488 int recurse)
489{
490 u_int8_t currenthdr;
491 int fragment;
492 struct ipv6hdr _ip6h;
493 const struct ipv6hdr *ih;
494 unsigned int ptr;
495 unsigned int hdrlen = 0;
496 unsigned int logflags;
497
498 if (info->type == NF_LOG_TYPE_LOG)
499 logflags = info->u.log.logflags;
500 else
501 logflags = NF_LOG_MASK;
502
503 ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
504 if (ih == NULL) {
505 sb_add(m, "TRUNCATED");
506 return;
507 }
508
509 /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
510 sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
511
512 /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
513 sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
514 ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
515 (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
516 ih->hop_limit,
517 (ntohl(*(__be32 *)ih) & 0x000fffff));
518
519 fragment = 0;
520 ptr = ip6hoff + sizeof(struct ipv6hdr);
521 currenthdr = ih->nexthdr;
522 while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
523 struct ipv6_opt_hdr _hdr;
524 const struct ipv6_opt_hdr *hp;
525
526 hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
527 if (hp == NULL) {
528 sb_add(m, "TRUNCATED");
529 return;
530 }
531
532 /* Max length: 48 "OPT (...) " */
533 if (logflags & XT_LOG_IPOPT)
534 sb_add(m, "OPT ( ");
535
536 switch (currenthdr) {
537 case IPPROTO_FRAGMENT: {
538 struct frag_hdr _fhdr;
539 const struct frag_hdr *fh;
540
541 sb_add(m, "FRAG:");
542 fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
543 &_fhdr);
544 if (fh == NULL) {
545 sb_add(m, "TRUNCATED ");
546 return;
547 }
548
549 /* Max length: 6 "65535 " */
550 sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
551
552 /* Max length: 11 "INCOMPLETE " */
553 if (fh->frag_off & htons(0x0001))
554 sb_add(m, "INCOMPLETE ");
555
556 sb_add(m, "ID:%08x ", ntohl(fh->identification));
557
558 if (ntohs(fh->frag_off) & 0xFFF8)
559 fragment = 1;
560
561 hdrlen = 8;
562
563 break;
564 }
565 case IPPROTO_DSTOPTS:
566 case IPPROTO_ROUTING:
567 case IPPROTO_HOPOPTS:
568 if (fragment) {
569 if (logflags & XT_LOG_IPOPT)
570 sb_add(m, ")");
571 return;
572 }
573 hdrlen = ipv6_optlen(hp);
574 break;
575 /* Max Length */
576 case IPPROTO_AH:
577 if (logflags & XT_LOG_IPOPT) {
578 struct ip_auth_hdr _ahdr;
579 const struct ip_auth_hdr *ah;
580
581 /* Max length: 3 "AH " */
582 sb_add(m, "AH ");
583
584 if (fragment) {
585 sb_add(m, ")");
586 return;
587 }
588
589 ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
590 &_ahdr);
591 if (ah == NULL) {
592 /*
593 * Max length: 26 "INCOMPLETE [65535
594 * bytes] )"
595 */
596 sb_add(m, "INCOMPLETE [%u bytes] )",
597 skb->len - ptr);
598 return;
599 }
600
601 /* Length: 15 "SPI=0xF1234567 */
602 sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
603
604 }
605
606 hdrlen = (hp->hdrlen+2)<<2;
607 break;
608 case IPPROTO_ESP:
609 if (logflags & XT_LOG_IPOPT) {
610 struct ip_esp_hdr _esph;
611 const struct ip_esp_hdr *eh;
612
613 /* Max length: 4 "ESP " */
614 sb_add(m, "ESP ");
615
616 if (fragment) {
617 sb_add(m, ")");
618 return;
619 }
620
621 /*
622 * Max length: 26 "INCOMPLETE [65535 bytes] )"
623 */
624 eh = skb_header_pointer(skb, ptr, sizeof(_esph),
625 &_esph);
626 if (eh == NULL) {
627 sb_add(m, "INCOMPLETE [%u bytes] )",
628 skb->len - ptr);
629 return;
630 }
631
632 /* Length: 16 "SPI=0xF1234567 )" */
633 sb_add(m, "SPI=0x%x )", ntohl(eh->spi));
634
635 }
636 return;
637 default:
638 /* Max length: 20 "Unknown Ext Hdr 255" */
639 sb_add(m, "Unknown Ext Hdr %u", currenthdr);
640 return;
641 }
642 if (logflags & XT_LOG_IPOPT)
643 sb_add(m, ") ");
644
645 currenthdr = hp->nexthdr;
646 ptr += hdrlen;
647 }
648
649 switch (currenthdr) {
650 case IPPROTO_TCP:
651 if (dump_tcp_header(m, skb, currenthdr, fragment, ptr,
652 logflags))
653 return;
654 break;
655 case IPPROTO_UDP:
656 case IPPROTO_UDPLITE:
657 if (dump_udp_header(m, skb, currenthdr, fragment, ptr))
658 return;
659 break;
660 case IPPROTO_ICMPV6: {
661 struct icmp6hdr _icmp6h;
662 const struct icmp6hdr *ic;
663
664 /* Max length: 13 "PROTO=ICMPv6 " */
665 sb_add(m, "PROTO=ICMPv6 ");
666
667 if (fragment)
668 break;
669
670 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
671 ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
672 if (ic == NULL) {
673 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
674 return;
675 }
676
677 /* Max length: 18 "TYPE=255 CODE=255 " */
678 sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
679
680 switch (ic->icmp6_type) {
681 case ICMPV6_ECHO_REQUEST:
682 case ICMPV6_ECHO_REPLY:
683 /* Max length: 19 "ID=65535 SEQ=65535 " */
684 sb_add(m, "ID=%u SEQ=%u ",
685 ntohs(ic->icmp6_identifier),
686 ntohs(ic->icmp6_sequence));
687 break;
688 case ICMPV6_MGM_QUERY:
689 case ICMPV6_MGM_REPORT:
690 case ICMPV6_MGM_REDUCTION:
691 break;
692
693 case ICMPV6_PARAMPROB:
694 /* Max length: 17 "POINTER=ffffffff " */
695 sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
696 /* Fall through */
697 case ICMPV6_DEST_UNREACH:
698 case ICMPV6_PKT_TOOBIG:
699 case ICMPV6_TIME_EXCEED:
700 /* Max length: 3+maxlen */
701 if (recurse) {
702 sb_add(m, "[");
703 dump_ipv6_packet(m, info, skb,
704 ptr + sizeof(_icmp6h), 0);
705 sb_add(m, "] ");
706 }
707
708 /* Max length: 10 "MTU=65535 " */
709 if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
710 sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
711 }
712 break;
713 }
714 /* Max length: 10 "PROTO=255 " */
715 default:
716 sb_add(m, "PROTO=%u ", currenthdr);
717 }
718
719 /* Max length: 15 "UID=4294967295 " */
720 if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
721 read_lock_bh(&skb->sk->sk_callback_lock);
722 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
723 sb_add(m, "UID=%u GID=%u ",
724 skb->sk->sk_socket->file->f_cred->fsuid,
725 skb->sk->sk_socket->file->f_cred->fsgid);
726 read_unlock_bh(&skb->sk->sk_callback_lock);
727 }
728
729 /* Max length: 16 "MARK=0xFFFFFFFF " */
730 if (!recurse && skb->mark)
731 sb_add(m, "MARK=0x%x ", skb->mark);
732}
733
734static void dump_ipv6_mac_header(struct sbuff *m,
735 const struct nf_loginfo *info,
736 const struct sk_buff *skb)
737{
738 struct net_device *dev = skb->dev;
739 unsigned int logflags = 0;
740
741 if (info->type == NF_LOG_TYPE_LOG)
742 logflags = info->u.log.logflags;
743
744 if (!(logflags & XT_LOG_MACDECODE))
745 goto fallback;
746
747 switch (dev->type) {
748 case ARPHRD_ETHER:
749 sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
750 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
751 ntohs(eth_hdr(skb)->h_proto));
752 return;
753 default:
754 break;
755 }
756
757fallback:
758 sb_add(m, "MAC=");
759 if (dev->hard_header_len &&
760 skb->mac_header != skb->network_header) {
761 const unsigned char *p = skb_mac_header(skb);
762 unsigned int len = dev->hard_header_len;
763 unsigned int i;
764
765 if (dev->type == ARPHRD_SIT) {
766 p -= ETH_HLEN;
767
768 if (p < skb->head)
769 p = NULL;
770 }
771
772 if (p != NULL) {
773 sb_add(m, "%02x", *p++);
774 for (i = 1; i < len; i++)
775 sb_add(m, ":%02x", *p++);
776 }
777 sb_add(m, " ");
778
779 if (dev->type == ARPHRD_SIT) {
780 const struct iphdr *iph =
781 (struct iphdr *)skb_mac_header(skb);
782 sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
783 &iph->daddr);
784 }
785 } else
786 sb_add(m, " ");
787}
788
789static void
790ip6t_log_packet(u_int8_t pf,
791 unsigned int hooknum,
792 const struct sk_buff *skb,
793 const struct net_device *in,
794 const struct net_device *out,
795 const struct nf_loginfo *loginfo,
796 const char *prefix)
797{
798 struct sbuff *m = sb_open();
799
800 if (!loginfo)
801 loginfo = &default_loginfo;
802
803 log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
804
805 if (in != NULL)
806 dump_ipv6_mac_header(m, loginfo, skb);
807
808 dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
809
810 sb_close(m);
811}
812#endif
813
814static unsigned int
815log_tg(struct sk_buff *skb, const struct xt_action_param *par)
816{
817 const struct xt_log_info *loginfo = par->targinfo;
818 struct nf_loginfo li;
819
820 li.type = NF_LOG_TYPE_LOG;
821 li.u.log.level = loginfo->level;
822 li.u.log.logflags = loginfo->logflags;
823
824 if (par->family == NFPROTO_IPV4)
825 ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
826 par->out, &li, loginfo->prefix);
827#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
828 else if (par->family == NFPROTO_IPV6)
829 ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
830 par->out, &li, loginfo->prefix);
831#endif
832 else
833 WARN_ON_ONCE(1);
834
835 return XT_CONTINUE;
836}
837
838static int log_tg_check(const struct xt_tgchk_param *par)
839{
840 const struct xt_log_info *loginfo = par->targinfo;
841
842 if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
843 return -EINVAL;
844
845 if (loginfo->level >= 8) {
846 pr_debug("level %u >= 8\n", loginfo->level);
847 return -EINVAL;
848 }
849
850 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
851 pr_debug("prefix is not null-terminated\n");
852 return -EINVAL;
853 }
854
855 return 0;
856}
857
858static struct xt_target log_tg_regs[] __read_mostly = {
859 {
860 .name = "LOG",
861 .family = NFPROTO_IPV4,
862 .target = log_tg,
863 .targetsize = sizeof(struct xt_log_info),
864 .checkentry = log_tg_check,
865 .me = THIS_MODULE,
866 },
867#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
868 {
869 .name = "LOG",
870 .family = NFPROTO_IPV6,
871 .target = log_tg,
872 .targetsize = sizeof(struct xt_log_info),
873 .checkentry = log_tg_check,
874 .me = THIS_MODULE,
875 },
876#endif
877};
878
879static struct nf_logger ipt_log_logger __read_mostly = {
880 .name = "ipt_LOG",
881 .logfn = &ipt_log_packet,
882 .me = THIS_MODULE,
883};
884
885#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
886static struct nf_logger ip6t_log_logger __read_mostly = {
887 .name = "ip6t_LOG",
888 .logfn = &ip6t_log_packet,
889 .me = THIS_MODULE,
890};
891#endif
892
893static int __init log_tg_init(void)
894{
895 int ret;
896
897 ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
898 if (ret < 0)
899 return ret;
900
901 nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
902#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
903 nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
904#endif
905 return 0;
906}
907
908static void __exit log_tg_exit(void)
909{
910 nf_log_unregister(&ipt_log_logger);
911#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
912 nf_log_unregister(&ip6t_log_logger);
913#endif
914 xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
915}
916
917module_init(log_tg_init);
918module_exit(log_tg_exit);
919
920MODULE_LICENSE("GPL");
921MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
922MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
923MODULE_DESCRIPTION("Xtables: IPv4/IPv6 packet logging");
924MODULE_ALIAS("ipt_LOG");
925MODULE_ALIAS("ip6t_LOG");
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 190ad37c5cf8..71a266de5fb4 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -67,15 +67,13 @@ tcpmss_mangle_packet(struct sk_buff *skb,
67 67
68 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 68 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
69 if (dst_mtu(skb_dst(skb)) <= minlen) { 69 if (dst_mtu(skb_dst(skb)) <= minlen) {
70 if (net_ratelimit()) 70 net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
71 pr_err("unknown or invalid path-MTU (%u)\n", 71 dst_mtu(skb_dst(skb)));
72 dst_mtu(skb_dst(skb)));
73 return -1; 72 return -1;
74 } 73 }
75 if (in_mtu <= minlen) { 74 if (in_mtu <= minlen) {
76 if (net_ratelimit()) 75 net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
77 pr_err("unknown or invalid path-MTU (%u)\n", 76 in_mtu);
78 in_mtu);
79 return -1; 77 return -1;
80 } 78 }
81 newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen; 79 newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 4d5057902839..ee2e5bc5a8c7 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -87,7 +87,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
87 const struct xt_tee_tginfo *info = par->targinfo; 87 const struct xt_tee_tginfo *info = par->targinfo;
88 struct iphdr *iph; 88 struct iphdr *iph;
89 89
90 if (percpu_read(tee_active)) 90 if (__this_cpu_read(tee_active))
91 return XT_CONTINUE; 91 return XT_CONTINUE;
92 /* 92 /*
93 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for 93 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
@@ -124,9 +124,9 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
124 ip_send_check(iph); 124 ip_send_check(iph);
125 125
126 if (tee_tg_route4(skb, info)) { 126 if (tee_tg_route4(skb, info)) {
127 percpu_write(tee_active, true); 127 __this_cpu_write(tee_active, true);
128 ip_local_out(skb); 128 ip_local_out(skb);
129 percpu_write(tee_active, false); 129 __this_cpu_write(tee_active, false);
130 } else { 130 } else {
131 kfree_skb(skb); 131 kfree_skb(skb);
132 } 132 }
@@ -168,7 +168,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
168{ 168{
169 const struct xt_tee_tginfo *info = par->targinfo; 169 const struct xt_tee_tginfo *info = par->targinfo;
170 170
171 if (percpu_read(tee_active)) 171 if (__this_cpu_read(tee_active))
172 return XT_CONTINUE; 172 return XT_CONTINUE;
173 skb = pskb_copy(skb, GFP_ATOMIC); 173 skb = pskb_copy(skb, GFP_ATOMIC);
174 if (skb == NULL) 174 if (skb == NULL)
@@ -186,9 +186,9 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
186 --iph->hop_limit; 186 --iph->hop_limit;
187 } 187 }
188 if (tee_tg_route6(skb, info)) { 188 if (tee_tg_route6(skb, info)) {
189 percpu_write(tee_active, true); 189 __this_cpu_write(tee_active, true);
190 ip6_local_out(skb); 190 ip6_local_out(skb);
191 percpu_write(tee_active, false); 191 __this_cpu_write(tee_active, false);
192 } else { 192 } else {
193 kfree_skb(skb); 193 kfree_skb(skb);
194 } 194 }
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 35a959a096e0..146033a86de8 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -282,10 +282,10 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
282 struct sock *sk; 282 struct sock *sk;
283 const struct in6_addr *laddr; 283 const struct in6_addr *laddr;
284 __be16 lport; 284 __be16 lport;
285 int thoff; 285 int thoff = 0;
286 int tproto; 286 int tproto;
287 287
288 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); 288 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
289 if (tproto < 0) { 289 if (tproto < 0) {
290 pr_debug("unable to find transport header in IPv6 packet, dropping\n"); 290 pr_debug("unable to find transport header in IPv6 packet, dropping\n");
291 return NF_DROP; 291 return NF_DROP;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index d95f9c963cde..26a668a84aa2 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -171,8 +171,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
171 171
172 if (ht->cfg.max && ht->count >= ht->cfg.max) { 172 if (ht->cfg.max && ht->count >= ht->cfg.max) {
173 /* FIXME: do something. question is what.. */ 173 /* FIXME: do something. question is what.. */
174 if (net_ratelimit()) 174 net_err_ratelimited("max count of %u reached\n", ht->cfg.max);
175 pr_err("max count of %u reached\n", ht->cfg.max);
176 ent = NULL; 175 ent = NULL;
177 } else 176 } else
178 ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); 177 ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
@@ -388,9 +387,20 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
388 387
389#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) 388#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
390 389
390/* in byte mode, the lowest possible rate is one packet/second.
391 * credit_cap is used as a counter that tells us how many times we can
392 * refill the "credits available" counter when it becomes empty.
393 */
394#define MAX_CPJ_BYTES (0xFFFFFFFF / HZ)
395#define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES)
396
397static u32 xt_hashlimit_len_to_chunks(u32 len)
398{
399 return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1;
400}
401
391/* Precision saver. */ 402/* Precision saver. */
392static inline u_int32_t 403static u32 user2credits(u32 user)
393user2credits(u_int32_t user)
394{ 404{
395 /* If multiplying would overflow... */ 405 /* If multiplying would overflow... */
396 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) 406 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -400,12 +410,53 @@ user2credits(u_int32_t user)
400 return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; 410 return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
401} 411}
402 412
403static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) 413static u32 user2credits_byte(u32 user)
404{ 414{
405 dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY; 415 u64 us = user;
406 if (dh->rateinfo.credit > dh->rateinfo.credit_cap) 416 us *= HZ * CREDITS_PER_JIFFY_BYTES;
407 dh->rateinfo.credit = dh->rateinfo.credit_cap; 417 return (u32) (us >> 32);
418}
419
420static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
421{
422 unsigned long delta = now - dh->rateinfo.prev;
423 u32 cap;
424
425 if (delta == 0)
426 return;
427
408 dh->rateinfo.prev = now; 428 dh->rateinfo.prev = now;
429
430 if (mode & XT_HASHLIMIT_BYTES) {
431 u32 tmp = dh->rateinfo.credit;
432 dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta;
433 cap = CREDITS_PER_JIFFY_BYTES * HZ;
434 if (tmp >= dh->rateinfo.credit) {/* overflow */
435 dh->rateinfo.credit = cap;
436 return;
437 }
438 } else {
439 dh->rateinfo.credit += delta * CREDITS_PER_JIFFY;
440 cap = dh->rateinfo.credit_cap;
441 }
442 if (dh->rateinfo.credit > cap)
443 dh->rateinfo.credit = cap;
444}
445
446static void rateinfo_init(struct dsthash_ent *dh,
447 struct xt_hashlimit_htable *hinfo)
448{
449 dh->rateinfo.prev = jiffies;
450 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
451 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
452 dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg);
453 dh->rateinfo.credit_cap = hinfo->cfg.burst;
454 } else {
455 dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
456 hinfo->cfg.burst);
457 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
458 dh->rateinfo.credit_cap = dh->rateinfo.credit;
459 }
409} 460}
410 461
411static inline __be32 maskl(__be32 a, unsigned int l) 462static inline __be32 maskl(__be32 a, unsigned int l)
@@ -511,6 +562,21 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
511 return 0; 562 return 0;
512} 563}
513 564
565static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh)
566{
567 u64 tmp = xt_hashlimit_len_to_chunks(len);
568 tmp = tmp * dh->rateinfo.cost;
569
570 if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ))
571 tmp = CREDITS_PER_JIFFY_BYTES * HZ;
572
573 if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) {
574 dh->rateinfo.credit_cap--;
575 dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
576 }
577 return (u32) tmp;
578}
579
514static bool 580static bool
515hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) 581hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
516{ 582{
@@ -519,6 +585,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
519 unsigned long now = jiffies; 585 unsigned long now = jiffies;
520 struct dsthash_ent *dh; 586 struct dsthash_ent *dh;
521 struct dsthash_dst dst; 587 struct dsthash_dst dst;
588 u32 cost;
522 589
523 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) 590 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
524 goto hotdrop; 591 goto hotdrop;
@@ -532,21 +599,21 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
532 goto hotdrop; 599 goto hotdrop;
533 } 600 }
534 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); 601 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
535 dh->rateinfo.prev = jiffies; 602 rateinfo_init(dh, hinfo);
536 dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
537 hinfo->cfg.burst);
538 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
539 hinfo->cfg.burst);
540 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
541 } else { 603 } else {
542 /* update expiration timeout */ 604 /* update expiration timeout */
543 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); 605 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
544 rateinfo_recalc(dh, now); 606 rateinfo_recalc(dh, now, hinfo->cfg.mode);
545 } 607 }
546 608
547 if (dh->rateinfo.credit >= dh->rateinfo.cost) { 609 if (info->cfg.mode & XT_HASHLIMIT_BYTES)
610 cost = hashlimit_byte_cost(skb->len, dh);
611 else
612 cost = dh->rateinfo.cost;
613
614 if (dh->rateinfo.credit >= cost) {
548 /* below the limit */ 615 /* below the limit */
549 dh->rateinfo.credit -= dh->rateinfo.cost; 616 dh->rateinfo.credit -= cost;
550 spin_unlock(&dh->lock); 617 spin_unlock(&dh->lock);
551 rcu_read_unlock_bh(); 618 rcu_read_unlock_bh();
552 return !(info->cfg.mode & XT_HASHLIMIT_INVERT); 619 return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
@@ -568,14 +635,6 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
568 struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 635 struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
569 int ret; 636 int ret;
570 637
571 /* Check for overflow. */
572 if (info->cfg.burst == 0 ||
573 user2credits(info->cfg.avg * info->cfg.burst) <
574 user2credits(info->cfg.avg)) {
575 pr_info("overflow, try lower: %u/%u\n",
576 info->cfg.avg, info->cfg.burst);
577 return -ERANGE;
578 }
579 if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) 638 if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
580 return -EINVAL; 639 return -EINVAL;
581 if (info->name[sizeof(info->name)-1] != '\0') 640 if (info->name[sizeof(info->name)-1] != '\0')
@@ -588,6 +647,26 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
588 return -EINVAL; 647 return -EINVAL;
589 } 648 }
590 649
650 if (info->cfg.mode & ~XT_HASHLIMIT_ALL) {
651 pr_info("Unknown mode mask %X, kernel too old?\n",
652 info->cfg.mode);
653 return -EINVAL;
654 }
655
656 /* Check for overflow. */
657 if (info->cfg.mode & XT_HASHLIMIT_BYTES) {
658 if (user2credits_byte(info->cfg.avg) == 0) {
659 pr_info("overflow, rate too high: %u\n", info->cfg.avg);
660 return -EINVAL;
661 }
662 } else if (info->cfg.burst == 0 ||
663 user2credits(info->cfg.avg * info->cfg.burst) <
664 user2credits(info->cfg.avg)) {
665 pr_info("overflow, try lower: %u/%u\n",
666 info->cfg.avg, info->cfg.burst);
667 return -ERANGE;
668 }
669
591 mutex_lock(&hashlimit_mutex); 670 mutex_lock(&hashlimit_mutex);
592 info->hinfo = htable_find_get(net, info->name, par->family); 671 info->hinfo = htable_find_get(net, info->name, par->family);
593 if (info->hinfo == NULL) { 672 if (info->hinfo == NULL) {
@@ -680,10 +759,11 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
680 struct seq_file *s) 759 struct seq_file *s)
681{ 760{
682 int res; 761 int res;
762 const struct xt_hashlimit_htable *ht = s->private;
683 763
684 spin_lock(&ent->lock); 764 spin_lock(&ent->lock);
685 /* recalculate to show accurate numbers */ 765 /* recalculate to show accurate numbers */
686 rateinfo_recalc(ent, jiffies); 766 rateinfo_recalc(ent, jiffies, ht->cfg.mode);
687 767
688 switch (family) { 768 switch (family) {
689 case NFPROTO_IPV4: 769 case NFPROTO_IPV4:
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 32b7a579a032..5c22ce8ab309 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
88} 88}
89 89
90/* Precision saver. */ 90/* Precision saver. */
91static u_int32_t 91static u32 user2credits(u32 user)
92user2credits(u_int32_t user)
93{ 92{
94 /* If multiplying would overflow... */ 93 /* If multiplying would overflow... */
95 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) 94 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -123,7 +122,7 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
123 128. */ 122 128. */
124 priv->prev = jiffies; 123 priv->prev = jiffies;
125 priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ 124 priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
126 r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ 125 r->credit_cap = priv->credit; /* Credits full. */
127 r->cost = user2credits(r->avg); 126 r->cost = user2credits(r->avg);
128 } 127 }
129 return 0; 128 return 0;
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c
index 8160f6b1435d..d5b4fd4f91ed 100644
--- a/net/netfilter/xt_mac.c
+++ b/net/netfilter/xt_mac.c
@@ -36,7 +36,7 @@ static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
36 return false; 36 return false;
37 if (skb_mac_header(skb) + ETH_HLEN > skb->data) 37 if (skb_mac_header(skb) + ETH_HLEN > skb->data)
38 return false; 38 return false;
39 ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0; 39 ret = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr);
40 ret ^= info->invert; 40 ret ^= info->invert;
41 return ret; 41 return ret;
42} 42}
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index d2ff15a2412b..fc0d6dbe5d17 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -314,7 +314,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par)
314#ifdef CONFIG_PROC_FS 314#ifdef CONFIG_PROC_FS
315 struct proc_dir_entry *pde; 315 struct proc_dir_entry *pde;
316#endif 316#endif
317 unsigned i; 317 unsigned int i;
318 int ret = -EINVAL; 318 int ret = -EINVAL;
319 319
320 if (unlikely(!hash_rnd_inited)) { 320 if (unlikely(!hash_rnd_inited)) {
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 0ec8138aa470..035960ec5cb9 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -44,6 +44,14 @@ const struct ip_set_adt_opt n = { \
44 .cmdflags = cfs, \ 44 .cmdflags = cfs, \
45 .timeout = t, \ 45 .timeout = t, \
46} 46}
47#define ADT_MOPT(n, f, d, fs, cfs, t) \
48struct ip_set_adt_opt n = { \
49 .family = f, \
50 .dim = d, \
51 .flags = fs, \
52 .cmdflags = cfs, \
53 .timeout = t, \
54}
47 55
48/* Revision 0 interface: backward compatible with netfilter/iptables */ 56/* Revision 0 interface: backward compatible with netfilter/iptables */
49 57
@@ -296,11 +304,14 @@ static unsigned int
296set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) 304set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
297{ 305{
298 const struct xt_set_info_target_v2 *info = par->targinfo; 306 const struct xt_set_info_target_v2 *info = par->targinfo;
299 ADT_OPT(add_opt, par->family, info->add_set.dim, 307 ADT_MOPT(add_opt, par->family, info->add_set.dim,
300 info->add_set.flags, info->flags, info->timeout); 308 info->add_set.flags, info->flags, info->timeout);
301 ADT_OPT(del_opt, par->family, info->del_set.dim, 309 ADT_OPT(del_opt, par->family, info->del_set.dim,
302 info->del_set.flags, 0, UINT_MAX); 310 info->del_set.flags, 0, UINT_MAX);
303 311
312 /* Normalize to fit into jiffies */
313 if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
314 add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
304 if (info->add_set.index != IPSET_INVALID_ID) 315 if (info->add_set.index != IPSET_INVALID_ID)
305 ip_set_add(info->add_set.index, skb, par, &add_opt); 316 ip_set_add(info->add_set.index, skb, par, &add_opt);
306 if (info->del_set.index != IPSET_INVALID_ID) 317 if (info->del_set.index != IPSET_INVALID_ID)
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 72bb07f57f97..9ea482d08cf7 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -263,10 +263,10 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
263 struct sock *sk; 263 struct sock *sk;
264 struct in6_addr *daddr, *saddr; 264 struct in6_addr *daddr, *saddr;
265 __be16 dport, sport; 265 __be16 dport, sport;
266 int thoff, tproto; 266 int thoff = 0, tproto;
267 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; 267 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
268 268
269 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); 269 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
270 if (tproto < 0) { 270 if (tproto < 0) {
271 pr_debug("unable to find transport header in IPv6 packet, dropping\n"); 271 pr_debug("unable to find transport header in IPv6 packet, dropping\n");
272 return NF_DROP; 272 return NF_DROP;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 2560e7b441c6..7c94aedd0912 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -597,7 +597,7 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap,
597 iter = iter->next; 597 iter = iter->next;
598 iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE; 598 iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE;
599 } 599 }
600 ret_val = netlbl_secattr_catmap_setbit(iter, spot, GFP_ATOMIC); 600 ret_val = netlbl_secattr_catmap_setbit(iter, spot, flags);
601 } 601 }
602 602
603 return ret_val; 603 return ret_val;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 629b06182f3f..b3025a603d56 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -104,27 +104,27 @@ static inline int netlink_is_kernel(struct sock *sk)
104} 104}
105 105
106struct nl_pid_hash { 106struct nl_pid_hash {
107 struct hlist_head *table; 107 struct hlist_head *table;
108 unsigned long rehash_time; 108 unsigned long rehash_time;
109 109
110 unsigned int mask; 110 unsigned int mask;
111 unsigned int shift; 111 unsigned int shift;
112 112
113 unsigned int entries; 113 unsigned int entries;
114 unsigned int max_shift; 114 unsigned int max_shift;
115 115
116 u32 rnd; 116 u32 rnd;
117}; 117};
118 118
119struct netlink_table { 119struct netlink_table {
120 struct nl_pid_hash hash; 120 struct nl_pid_hash hash;
121 struct hlist_head mc_list; 121 struct hlist_head mc_list;
122 struct listeners __rcu *listeners; 122 struct listeners __rcu *listeners;
123 unsigned int nl_nonroot; 123 unsigned int nl_nonroot;
124 unsigned int groups; 124 unsigned int groups;
125 struct mutex *cb_mutex; 125 struct mutex *cb_mutex;
126 struct module *module; 126 struct module *module;
127 int registered; 127 int registered;
128}; 128};
129 129
130static struct netlink_table *nl_table; 130static struct netlink_table *nl_table;
@@ -132,7 +132,6 @@ static struct netlink_table *nl_table;
132static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 132static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
133 133
134static int netlink_dump(struct sock *sk); 134static int netlink_dump(struct sock *sk);
135static void netlink_destroy_callback(struct netlink_callback *cb);
136 135
137static DEFINE_RWLOCK(nl_table_lock); 136static DEFINE_RWLOCK(nl_table_lock);
138static atomic_t nl_table_users = ATOMIC_INIT(0); 137static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -149,6 +148,18 @@ static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid
149 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; 148 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
150} 149}
151 150
151static void netlink_destroy_callback(struct netlink_callback *cb)
152{
153 kfree_skb(cb->skb);
154 kfree(cb);
155}
156
157static void netlink_consume_callback(struct netlink_callback *cb)
158{
159 consume_skb(cb->skb);
160 kfree(cb);
161}
162
152static void netlink_sock_destruct(struct sock *sk) 163static void netlink_sock_destruct(struct sock *sk)
153{ 164{
154 struct netlink_sock *nlk = nlk_sk(sk); 165 struct netlink_sock *nlk = nlk_sk(sk);
@@ -414,9 +425,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
414 sock_init_data(sock, sk); 425 sock_init_data(sock, sk);
415 426
416 nlk = nlk_sk(sk); 427 nlk = nlk_sk(sk);
417 if (cb_mutex) 428 if (cb_mutex) {
418 nlk->cb_mutex = cb_mutex; 429 nlk->cb_mutex = cb_mutex;
419 else { 430 } else {
420 nlk->cb_mutex = &nlk->cb_def_mutex; 431 nlk->cb_mutex = &nlk->cb_def_mutex;
421 mutex_init(nlk->cb_mutex); 432 mutex_init(nlk->cb_mutex);
422 } 433 }
@@ -522,8 +533,9 @@ static int netlink_release(struct socket *sock)
522 nl_table[sk->sk_protocol].module = NULL; 533 nl_table[sk->sk_protocol].module = NULL;
523 nl_table[sk->sk_protocol].registered = 0; 534 nl_table[sk->sk_protocol].registered = 0;
524 } 535 }
525 } else if (nlk->subscriptions) 536 } else if (nlk->subscriptions) {
526 netlink_update_listeners(sk); 537 netlink_update_listeners(sk);
538 }
527 netlink_table_ungrab(); 539 netlink_table_ungrab();
528 540
529 kfree(nlk->groups); 541 kfree(nlk->groups);
@@ -829,12 +841,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
829 return 0; 841 return 0;
830} 842}
831 843
832int netlink_sendskb(struct sock *sk, struct sk_buff *skb) 844static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
833{ 845{
834 int len = skb->len; 846 int len = skb->len;
835 847
836 skb_queue_tail(&sk->sk_receive_queue, skb); 848 skb_queue_tail(&sk->sk_receive_queue, skb);
837 sk->sk_data_ready(sk, len); 849 sk->sk_data_ready(sk, len);
850 return len;
851}
852
853int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
854{
855 int len = __netlink_sendskb(sk, skb);
856
838 sock_put(sk); 857 sock_put(sk);
839 return len; 858 return len;
840} 859}
@@ -859,7 +878,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
859 struct sk_buff *nskb = skb_clone(skb, allocation); 878 struct sk_buff *nskb = skb_clone(skb, allocation);
860 if (!nskb) 879 if (!nskb)
861 return skb; 880 return skb;
862 kfree_skb(skb); 881 consume_skb(skb);
863 skb = nskb; 882 skb = nskb;
864 } 883 }
865 884
@@ -889,8 +908,10 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
889 ret = skb->len; 908 ret = skb->len;
890 skb_set_owner_r(skb, sk); 909 skb_set_owner_r(skb, sk);
891 nlk->netlink_rcv(skb); 910 nlk->netlink_rcv(skb);
911 consume_skb(skb);
912 } else {
913 kfree_skb(skb);
892 } 914 }
893 kfree_skb(skb);
894 sock_put(sk); 915 sock_put(sk);
895 return ret; 916 return ret;
896} 917}
@@ -957,8 +978,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
957 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 978 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
958 !test_bit(0, &nlk->state)) { 979 !test_bit(0, &nlk->state)) {
959 skb_set_owner_r(skb, sk); 980 skb_set_owner_r(skb, sk);
960 skb_queue_tail(&sk->sk_receive_queue, skb); 981 __netlink_sendskb(sk, skb);
961 sk->sk_data_ready(sk, skb->len);
962 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); 982 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
963 } 983 }
964 return -1; 984 return -1;
@@ -1080,8 +1100,8 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1080 if (info.delivery_failure) { 1100 if (info.delivery_failure) {
1081 kfree_skb(info.skb2); 1101 kfree_skb(info.skb2);
1082 return -ENOBUFS; 1102 return -ENOBUFS;
1083 } else 1103 }
1084 consume_skb(info.skb2); 1104 consume_skb(info.skb2);
1085 1105
1086 if (info.delivered) { 1106 if (info.delivered) {
1087 if (info.congested && (allocation & __GFP_WAIT)) 1107 if (info.congested && (allocation & __GFP_WAIT))
@@ -1234,8 +1254,9 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1234 nlk->flags |= NETLINK_RECV_NO_ENOBUFS; 1254 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1235 clear_bit(0, &nlk->state); 1255 clear_bit(0, &nlk->state);
1236 wake_up_interruptible(&nlk->wait); 1256 wake_up_interruptible(&nlk->wait);
1237 } else 1257 } else {
1238 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; 1258 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1259 }
1239 err = 0; 1260 err = 0;
1240 break; 1261 break;
1241 default: 1262 default:
@@ -1639,11 +1660,23 @@ void netlink_set_nonroot(int protocol, unsigned int flags)
1639} 1660}
1640EXPORT_SYMBOL(netlink_set_nonroot); 1661EXPORT_SYMBOL(netlink_set_nonroot);
1641 1662
1642static void netlink_destroy_callback(struct netlink_callback *cb) 1663struct nlmsghdr *
1664__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1643{ 1665{
1644 kfree_skb(cb->skb); 1666 struct nlmsghdr *nlh;
1645 kfree(cb); 1667 int size = NLMSG_LENGTH(len);
1668
1669 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1670 nlh->nlmsg_type = type;
1671 nlh->nlmsg_len = size;
1672 nlh->nlmsg_flags = flags;
1673 nlh->nlmsg_pid = pid;
1674 nlh->nlmsg_seq = seq;
1675 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1676 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1677 return nlh;
1646} 1678}
1679EXPORT_SYMBOL(__nlmsg_put);
1647 1680
1648/* 1681/*
1649 * It looks a bit ugly. 1682 * It looks a bit ugly.
@@ -1680,10 +1713,8 @@ static int netlink_dump(struct sock *sk)
1680 1713
1681 if (sk_filter(sk, skb)) 1714 if (sk_filter(sk, skb))
1682 kfree_skb(skb); 1715 kfree_skb(skb);
1683 else { 1716 else
1684 skb_queue_tail(&sk->sk_receive_queue, skb); 1717 __netlink_sendskb(sk, skb);
1685 sk->sk_data_ready(sk, skb->len);
1686 }
1687 return 0; 1718 return 0;
1688 } 1719 }
1689 1720
@@ -1697,17 +1728,15 @@ static int netlink_dump(struct sock *sk)
1697 1728
1698 if (sk_filter(sk, skb)) 1729 if (sk_filter(sk, skb))
1699 kfree_skb(skb); 1730 kfree_skb(skb);
1700 else { 1731 else
1701 skb_queue_tail(&sk->sk_receive_queue, skb); 1732 __netlink_sendskb(sk, skb);
1702 sk->sk_data_ready(sk, skb->len);
1703 }
1704 1733
1705 if (cb->done) 1734 if (cb->done)
1706 cb->done(cb); 1735 cb->done(cb);
1707 nlk->cb = NULL; 1736 nlk->cb = NULL;
1708 mutex_unlock(nlk->cb_mutex); 1737 mutex_unlock(nlk->cb_mutex);
1709 1738
1710 netlink_destroy_callback(cb); 1739 netlink_consume_callback(cb);
1711 return 0; 1740 return 0;
1712 1741
1713errout_skb: 1742errout_skb:
@@ -1718,10 +1747,7 @@ errout_skb:
1718 1747
1719int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 1748int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1720 const struct nlmsghdr *nlh, 1749 const struct nlmsghdr *nlh,
1721 int (*dump)(struct sk_buff *skb, 1750 struct netlink_dump_control *control)
1722 struct netlink_callback *),
1723 int (*done)(struct netlink_callback *),
1724 u16 min_dump_alloc)
1725{ 1751{
1726 struct netlink_callback *cb; 1752 struct netlink_callback *cb;
1727 struct sock *sk; 1753 struct sock *sk;
@@ -1732,10 +1758,11 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1732 if (cb == NULL) 1758 if (cb == NULL)
1733 return -ENOBUFS; 1759 return -ENOBUFS;
1734 1760
1735 cb->dump = dump; 1761 cb->dump = control->dump;
1736 cb->done = done; 1762 cb->done = control->done;
1737 cb->nlh = nlh; 1763 cb->nlh = nlh;
1738 cb->min_dump_alloc = min_dump_alloc; 1764 cb->data = control->data;
1765 cb->min_dump_alloc = control->min_dump_alloc;
1739 atomic_inc(&skb->users); 1766 atomic_inc(&skb->users);
1740 cb->skb = skb; 1767 cb->skb = skb;
1741 1768
@@ -1978,11 +2005,11 @@ static void netlink_seq_stop(struct seq_file *seq, void *v)
1978 2005
1979static int netlink_seq_show(struct seq_file *seq, void *v) 2006static int netlink_seq_show(struct seq_file *seq, void *v)
1980{ 2007{
1981 if (v == SEQ_START_TOKEN) 2008 if (v == SEQ_START_TOKEN) {
1982 seq_puts(seq, 2009 seq_puts(seq,
1983 "sk Eth Pid Groups " 2010 "sk Eth Pid Groups "
1984 "Rmem Wmem Dump Locks Drops Inode\n"); 2011 "Rmem Wmem Dump Locks Drops Inode\n");
1985 else { 2012 } else {
1986 struct sock *s = v; 2013 struct sock *s = v;
1987 struct netlink_sock *nlk = nlk_sk(s); 2014 struct netlink_sock *nlk = nlk_sk(s);
1988 2015
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index c29d2568c9e0..2cc7c1ee7690 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -498,6 +498,37 @@ int genl_unregister_family(struct genl_family *family)
498} 498}
499EXPORT_SYMBOL(genl_unregister_family); 499EXPORT_SYMBOL(genl_unregister_family);
500 500
501/**
502 * genlmsg_put - Add generic netlink header to netlink message
503 * @skb: socket buffer holding the message
504 * @pid: netlink pid the message is addressed to
505 * @seq: sequence number (usually the one of the sender)
506 * @family: generic netlink family
507 * @flags netlink message flags
508 * @cmd: generic netlink command
509 *
510 * Returns pointer to user specific header
511 */
512void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
513 struct genl_family *family, int flags, u8 cmd)
514{
515 struct nlmsghdr *nlh;
516 struct genlmsghdr *hdr;
517
518 nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
519 family->hdrsize, flags);
520 if (nlh == NULL)
521 return NULL;
522
523 hdr = nlmsg_data(nlh);
524 hdr->cmd = cmd;
525 hdr->version = family->version;
526 hdr->reserved = 0;
527
528 return (char *) hdr + GENL_HDRLEN;
529}
530EXPORT_SYMBOL(genlmsg_put);
531
501static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 532static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
502{ 533{
503 struct genl_ops *ops; 534 struct genl_ops *ops;
@@ -532,8 +563,13 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
532 return -EOPNOTSUPP; 563 return -EOPNOTSUPP;
533 564
534 genl_unlock(); 565 genl_unlock();
535 err = netlink_dump_start(net->genl_sock, skb, nlh, 566 {
536 ops->dumpit, ops->done, 0); 567 struct netlink_dump_control c = {
568 .dump = ops->dumpit,
569 .done = ops->done,
570 };
571 err = netlink_dump_start(net->genl_sock, skb, nlh, &c);
572 }
537 genl_lock(); 573 genl_lock();
538 return err; 574 return err;
539 } 575 }
@@ -599,11 +635,12 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
599 if (hdr == NULL) 635 if (hdr == NULL)
600 return -1; 636 return -1;
601 637
602 NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name); 638 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
603 NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id); 639 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
604 NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version); 640 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
605 NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize); 641 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
606 NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr); 642 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
643 goto nla_put_failure;
607 644
608 if (!list_empty(&family->ops_list)) { 645 if (!list_empty(&family->ops_list)) {
609 struct nlattr *nla_ops; 646 struct nlattr *nla_ops;
@@ -621,8 +658,9 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
621 if (nest == NULL) 658 if (nest == NULL)
622 goto nla_put_failure; 659 goto nla_put_failure;
623 660
624 NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); 661 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
625 NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); 662 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags))
663 goto nla_put_failure;
626 664
627 nla_nest_end(skb, nest); 665 nla_nest_end(skb, nest);
628 } 666 }
@@ -646,9 +684,10 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
646 if (nest == NULL) 684 if (nest == NULL)
647 goto nla_put_failure; 685 goto nla_put_failure;
648 686
649 NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); 687 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
650 NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, 688 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
651 grp->name); 689 grp->name))
690 goto nla_put_failure;
652 691
653 nla_nest_end(skb, nest); 692 nla_nest_end(skb, nest);
654 } 693 }
@@ -674,8 +713,9 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
674 if (hdr == NULL) 713 if (hdr == NULL)
675 return -1; 714 return -1;
676 715
677 NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name); 716 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) ||
678 NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id); 717 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id))
718 goto nla_put_failure;
679 719
680 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 720 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
681 if (nla_grps == NULL) 721 if (nla_grps == NULL)
@@ -685,9 +725,10 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
685 if (nest == NULL) 725 if (nest == NULL)
686 goto nla_put_failure; 726 goto nla_put_failure;
687 727
688 NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); 728 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
689 NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, 729 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
690 grp->name); 730 grp->name))
731 goto nla_put_failure;
691 732
692 nla_nest_end(skb, nest); 733 nla_nest_end(skb, nest);
693 nla_nest_end(skb, nla_grps); 734 nla_nest_end(skb, nla_grps);
@@ -795,7 +836,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
795#ifdef CONFIG_MODULES 836#ifdef CONFIG_MODULES
796 if (res == NULL) { 837 if (res == NULL) {
797 genl_unlock(); 838 genl_unlock();
798 request_module("net-pf-%d-proto-%d-type-%s", 839 request_module("net-pf-%d-proto-%d-family-%s",
799 PF_NETLINK, NETLINK_GENERIC, name); 840 PF_NETLINK, NETLINK_GENERIC, name);
800 genl_lock(); 841 genl_lock();
801 res = genl_family_find_byname(name); 842 res = genl_family_find_byname(name);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 7dab229bfbcc..06592d8b4a2b 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -31,7 +31,6 @@
31#include <net/net_namespace.h> 31#include <net/net_namespace.h>
32#include <net/sock.h> 32#include <net/sock.h>
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <asm/system.h>
35#include <linux/fcntl.h> 34#include <linux/fcntl.h>
36#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 35#include <linux/termios.h> /* For TIOCINQ/OUTQ */
37#include <linux/mm.h> 36#include <linux/mm.h>
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 64e6dde9749d..743262becd6e 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -21,7 +21,6 @@
21#include <linux/if_ether.h> /* For the statistics structure. */ 21#include <linux/if_ether.h> /* For the statistics structure. */
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#include <asm/system.h>
25#include <asm/uaccess.h> 24#include <asm/uaccess.h>
26#include <asm/io.h> 25#include <asm/io.h>
27 26
@@ -98,7 +97,7 @@ static int nr_rebuild_header(struct sk_buff *skb)
98 97
99static int nr_header(struct sk_buff *skb, struct net_device *dev, 98static int nr_header(struct sk_buff *skb, struct net_device *dev,
100 unsigned short type, 99 unsigned short type,
101 const void *daddr, const void *saddr, unsigned len) 100 const void *daddr, const void *saddr, unsigned int len)
102{ 101{
103 unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); 102 unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
104 103
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index 6d4ef6d65b3d..c3073a2ef634 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -24,7 +24,6 @@
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/tcp_states.h> 25#include <net/tcp_states.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/system.h>
28#include <linux/fcntl.h> 27#include <linux/fcntl.h>
29#include <linux/mm.h> 28#include <linux/mm.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index 607fddb4fdbb..0b4bcb2bf38f 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -23,7 +23,6 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/system.h>
27#include <linux/fcntl.h> 26#include <linux/fcntl.h>
28#include <linux/mm.h> 27#include <linux/mm.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 2cf330162d7e..70ffff76a967 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -26,7 +26,6 @@
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <net/sock.h> 27#include <net/sock.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/system.h>
30#include <linux/fcntl.h> 29#include <linux/fcntl.h>
31#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 30#include <linux/termios.h> /* For TIOCINQ/OUTQ */
32#include <linux/mm.h> 31#include <linux/mm.h>
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 6a947ae50dbd..ca40e2298f5a 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -23,7 +23,6 @@
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/system.h>
27#include <linux/fcntl.h> 26#include <linux/fcntl.h>
28#include <linux/mm.h> 27#include <linux/mm.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 1cb98e88f5e1..ff2c1b142f57 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -24,7 +24,6 @@
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/tcp_states.h> 25#include <net/tcp_states.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/system.h>
28#include <linux/fcntl.h> 27#include <linux/fcntl.h>
29#include <linux/mm.h> 28#include <linux/mm.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index 1e0fa9e57aac..42f630b9a698 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,18 +146,12 @@ static ctl_table nr_table[] = {
146 { } 146 { }
147}; 147};
148 148
149static struct ctl_path nr_path[] = {
150 { .procname = "net", },
151 { .procname = "netrom", },
152 { }
153};
154
155void __init nr_register_sysctl(void) 149void __init nr_register_sysctl(void)
156{ 150{
157 nr_table_header = register_sysctl_paths(nr_path, nr_table); 151 nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
158} 152}
159 153
160void nr_unregister_sysctl(void) 154void nr_unregister_sysctl(void)
161{ 155{
162 unregister_sysctl_table(nr_table_header); 156 unregister_net_sysctl_table(nr_table_header);
163} 157}
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 44c865b86d6f..8d8d9bc4b6ff 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -14,6 +14,7 @@ menuconfig NFC
14 be called nfc. 14 be called nfc.
15 15
16source "net/nfc/nci/Kconfig" 16source "net/nfc/nci/Kconfig"
17source "net/nfc/hci/Kconfig"
17source "net/nfc/llcp/Kconfig" 18source "net/nfc/llcp/Kconfig"
18 19
19source "drivers/nfc/Kconfig" 20source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index 7b4a6dcfa566..d1a117c2c401 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_NFC) += nfc.o 5obj-$(CONFIG_NFC) += nfc.o
6obj-$(CONFIG_NFC_NCI) += nci/ 6obj-$(CONFIG_NFC_NCI) += nci/
7obj-$(CONFIG_NFC_HCI) += hci/
7 8
8nfc-objs := core.o netlink.o af_nfc.o rawsock.o 9nfc-objs := core.o netlink.o af_nfc.o rawsock.o
9nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o 10nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index da67756425ce..9d68441e2a5a 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -30,7 +30,7 @@ static DEFINE_RWLOCK(proto_tab_lock);
30static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX]; 30static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX];
31 31
32static int nfc_sock_create(struct net *net, struct socket *sock, int proto, 32static int nfc_sock_create(struct net *net, struct socket *sock, int proto,
33 int kern) 33 int kern)
34{ 34{
35 int rc = -EPROTONOSUPPORT; 35 int rc = -EPROTONOSUPPORT;
36 36
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 3ddf6e698df0..9f6ce011d35d 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -33,6 +33,8 @@
33 33
34#define VERSION "0.1" 34#define VERSION "0.1"
35 35
36#define NFC_CHECK_PRES_FREQ_MS 2000
37
36int nfc_devlist_generation; 38int nfc_devlist_generation;
37DEFINE_MUTEX(nfc_devlist_mutex); 39DEFINE_MUTEX(nfc_devlist_mutex);
38 40
@@ -95,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev)
95 goto error; 97 goto error;
96 } 98 }
97 99
98 if (dev->polling || dev->remote_activated) { 100 if (dev->polling || dev->active_target) {
99 rc = -EBUSY; 101 rc = -EBUSY;
100 goto error; 102 goto error;
101 } 103 }
@@ -181,13 +183,29 @@ error:
181 return rc; 183 return rc;
182} 184}
183 185
184int nfc_dep_link_up(struct nfc_dev *dev, int target_index, 186static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx)
185 u8 comm_mode, u8 rf_mode) 187{
188 int i;
189
190 if (dev->n_targets == 0)
191 return NULL;
192
193 for (i = 0; i < dev->n_targets ; i++) {
194 if (dev->targets[i].idx == target_idx)
195 return &dev->targets[i];
196 }
197
198 return NULL;
199}
200
201int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
186{ 202{
187 int rc = 0; 203 int rc = 0;
204 u8 *gb;
205 size_t gb_len;
206 struct nfc_target *target;
188 207
189 pr_debug("dev_name=%s comm:%d rf:%d\n", 208 pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode);
190 dev_name(&dev->dev), comm_mode, rf_mode);
191 209
192 if (!dev->ops->dep_link_up) 210 if (!dev->ops->dep_link_up)
193 return -EOPNOTSUPP; 211 return -EOPNOTSUPP;
@@ -204,7 +222,21 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index,
204 goto error; 222 goto error;
205 } 223 }
206 224
207 rc = dev->ops->dep_link_up(dev, target_index, comm_mode, rf_mode); 225 gb = nfc_llcp_general_bytes(dev, &gb_len);
226 if (gb_len > NFC_MAX_GT_LEN) {
227 rc = -EINVAL;
228 goto error;
229 }
230
231 target = nfc_find_target(dev, target_index);
232 if (target == NULL) {
233 rc = -ENOTCONN;
234 goto error;
235 }
236
237 rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
238 if (!rc)
239 dev->active_target = target;
208 240
209error: 241error:
210 device_unlock(&dev->dev); 242 device_unlock(&dev->dev);
@@ -240,6 +272,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
240 rc = dev->ops->dep_link_down(dev); 272 rc = dev->ops->dep_link_down(dev);
241 if (!rc) { 273 if (!rc) {
242 dev->dep_link_up = false; 274 dev->dep_link_up = false;
275 dev->active_target = NULL;
243 nfc_llcp_mac_is_down(dev); 276 nfc_llcp_mac_is_down(dev);
244 nfc_genl_dep_link_down_event(dev); 277 nfc_genl_dep_link_down_event(dev);
245 } 278 }
@@ -250,7 +283,7 @@ error:
250} 283}
251 284
252int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, 285int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
253 u8 comm_mode, u8 rf_mode) 286 u8 comm_mode, u8 rf_mode)
254{ 287{
255 dev->dep_link_up = true; 288 dev->dep_link_up = true;
256 dev->dep_rf_mode = rf_mode; 289 dev->dep_rf_mode = rf_mode;
@@ -271,6 +304,7 @@ EXPORT_SYMBOL(nfc_dep_link_is_up);
271int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) 304int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
272{ 305{
273 int rc; 306 int rc;
307 struct nfc_target *target;
274 308
275 pr_debug("dev_name=%s target_idx=%u protocol=%u\n", 309 pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
276 dev_name(&dev->dev), target_idx, protocol); 310 dev_name(&dev->dev), target_idx, protocol);
@@ -282,9 +316,25 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
282 goto error; 316 goto error;
283 } 317 }
284 318
285 rc = dev->ops->activate_target(dev, target_idx, protocol); 319 if (dev->active_target) {
286 if (!rc) 320 rc = -EBUSY;
287 dev->remote_activated = true; 321 goto error;
322 }
323
324 target = nfc_find_target(dev, target_idx);
325 if (target == NULL) {
326 rc = -ENOTCONN;
327 goto error;
328 }
329
330 rc = dev->ops->activate_target(dev, target, protocol);
331 if (!rc) {
332 dev->active_target = target;
333
334 if (dev->ops->check_presence)
335 mod_timer(&dev->check_pres_timer, jiffies +
336 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
337 }
288 338
289error: 339error:
290 device_unlock(&dev->dev); 340 device_unlock(&dev->dev);
@@ -311,8 +361,21 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
311 goto error; 361 goto error;
312 } 362 }
313 363
314 dev->ops->deactivate_target(dev, target_idx); 364 if (dev->active_target == NULL) {
315 dev->remote_activated = false; 365 rc = -ENOTCONN;
366 goto error;
367 }
368
369 if (dev->active_target->idx != target_idx) {
370 rc = -ENOTCONN;
371 goto error;
372 }
373
374 if (dev->ops->check_presence)
375 del_timer_sync(&dev->check_pres_timer);
376
377 dev->ops->deactivate_target(dev, dev->active_target);
378 dev->active_target = NULL;
316 379
317error: 380error:
318 device_unlock(&dev->dev); 381 device_unlock(&dev->dev);
@@ -330,10 +393,8 @@ error:
330 * 393 *
331 * The user must wait for the callback before calling this function again. 394 * The user must wait for the callback before calling this function again.
332 */ 395 */
333int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, 396int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
334 struct sk_buff *skb, 397 data_exchange_cb_t cb, void *cb_context)
335 data_exchange_cb_t cb,
336 void *cb_context)
337{ 398{
338 int rc; 399 int rc;
339 400
@@ -348,7 +409,27 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx,
348 goto error; 409 goto error;
349 } 410 }
350 411
351 rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context); 412 if (dev->active_target == NULL) {
413 rc = -ENOTCONN;
414 kfree_skb(skb);
415 goto error;
416 }
417
418 if (dev->active_target->idx != target_idx) {
419 rc = -EADDRNOTAVAIL;
420 kfree_skb(skb);
421 goto error;
422 }
423
424 if (dev->ops->check_presence)
425 del_timer_sync(&dev->check_pres_timer);
426
427 rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
428 cb_context);
429
430 if (!rc && dev->ops->check_presence)
431 mod_timer(&dev->check_pres_timer, jiffies +
432 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
352 433
353error: 434error:
354 device_unlock(&dev->dev); 435 device_unlock(&dev->dev);
@@ -357,8 +438,7 @@ error:
357 438
358int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len) 439int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
359{ 440{
360 pr_debug("dev_name=%s gb_len=%d\n", 441 pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len);
361 dev_name(&dev->dev), gb_len);
362 442
363 if (gb_len > NFC_MAX_GT_LEN) 443 if (gb_len > NFC_MAX_GT_LEN)
364 return -EINVAL; 444 return -EINVAL;
@@ -367,12 +447,6 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
367} 447}
368EXPORT_SYMBOL(nfc_set_remote_general_bytes); 448EXPORT_SYMBOL(nfc_set_remote_general_bytes);
369 449
370u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len)
371{
372 return nfc_llcp_general_bytes(dev, gt_len);
373}
374EXPORT_SYMBOL(nfc_get_local_general_bytes);
375
376/** 450/**
377 * nfc_alloc_send_skb - allocate a skb for data exchange responses 451 * nfc_alloc_send_skb - allocate a skb for data exchange responses
378 * 452 *
@@ -380,8 +454,8 @@ EXPORT_SYMBOL(nfc_get_local_general_bytes);
380 * @gfp: gfp flags 454 * @gfp: gfp flags
381 */ 455 */
382struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, 456struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
383 unsigned int flags, unsigned int size, 457 unsigned int flags, unsigned int size,
384 unsigned int *err) 458 unsigned int *err)
385{ 459{
386 struct sk_buff *skb; 460 struct sk_buff *skb;
387 unsigned int total_size; 461 unsigned int total_size;
@@ -427,9 +501,12 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
427 * The device driver must call this function when one or many nfc targets 501 * The device driver must call this function when one or many nfc targets
428 * are found. After calling this function, the device driver must stop 502 * are found. After calling this function, the device driver must stop
429 * polling for targets. 503 * polling for targets.
504 * IMPORTANT: this function must not be called from an atomic context.
505 * In addition, it must also not be called from a context that would prevent
506 * the NFC Core to call other nfc ops entry point concurrently.
430 */ 507 */
431int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, 508int nfc_targets_found(struct nfc_dev *dev,
432 int n_targets) 509 struct nfc_target *targets, int n_targets)
433{ 510{
434 int i; 511 int i;
435 512
@@ -438,24 +515,24 @@ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
438 dev->polling = false; 515 dev->polling = false;
439 516
440 for (i = 0; i < n_targets; i++) 517 for (i = 0; i < n_targets; i++)
441 targets[i].idx = dev->target_idx++; 518 targets[i].idx = dev->target_next_idx++;
442 519
443 spin_lock_bh(&dev->targets_lock); 520 device_lock(&dev->dev);
444 521
445 dev->targets_generation++; 522 dev->targets_generation++;
446 523
447 kfree(dev->targets); 524 kfree(dev->targets);
448 dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target), 525 dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target),
449 GFP_ATOMIC); 526 GFP_ATOMIC);
450 527
451 if (!dev->targets) { 528 if (!dev->targets) {
452 dev->n_targets = 0; 529 dev->n_targets = 0;
453 spin_unlock_bh(&dev->targets_lock); 530 device_unlock(&dev->dev);
454 return -ENOMEM; 531 return -ENOMEM;
455 } 532 }
456 533
457 dev->n_targets = n_targets; 534 dev->n_targets = n_targets;
458 spin_unlock_bh(&dev->targets_lock); 535 device_unlock(&dev->dev);
459 536
460 nfc_genl_targets_found(dev); 537 nfc_genl_targets_found(dev);
461 538
@@ -463,17 +540,105 @@ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
463} 540}
464EXPORT_SYMBOL(nfc_targets_found); 541EXPORT_SYMBOL(nfc_targets_found);
465 542
543/**
544 * nfc_target_lost - inform that an activated target went out of field
545 *
546 * @dev: The nfc device that had the activated target in field
547 * @target_idx: the nfc index of the target
548 *
549 * The device driver must call this function when the activated target
550 * goes out of the field.
551 * IMPORTANT: this function must not be called from an atomic context.
552 * In addition, it must also not be called from a context that would prevent
553 * the NFC Core to call other nfc ops entry point concurrently.
554 */
555int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
556{
557 struct nfc_target *tg;
558 int i;
559
560 pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx);
561
562 device_lock(&dev->dev);
563
564 for (i = 0; i < dev->n_targets; i++) {
565 tg = &dev->targets[i];
566 if (tg->idx == target_idx)
567 break;
568 }
569
570 if (i == dev->n_targets) {
571 device_unlock(&dev->dev);
572 return -EINVAL;
573 }
574
575 dev->targets_generation++;
576 dev->n_targets--;
577 dev->active_target = NULL;
578
579 if (dev->n_targets) {
580 memcpy(&dev->targets[i], &dev->targets[i + 1],
581 (dev->n_targets - i) * sizeof(struct nfc_target));
582 } else {
583 kfree(dev->targets);
584 dev->targets = NULL;
585 }
586
587 device_unlock(&dev->dev);
588
589 nfc_genl_target_lost(dev, target_idx);
590
591 return 0;
592}
593EXPORT_SYMBOL(nfc_target_lost);
594
466static void nfc_release(struct device *d) 595static void nfc_release(struct device *d)
467{ 596{
468 struct nfc_dev *dev = to_nfc_dev(d); 597 struct nfc_dev *dev = to_nfc_dev(d);
469 598
470 pr_debug("dev_name=%s\n", dev_name(&dev->dev)); 599 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
471 600
601 if (dev->ops->check_presence) {
602 del_timer_sync(&dev->check_pres_timer);
603 destroy_workqueue(dev->check_pres_wq);
604 }
605
472 nfc_genl_data_exit(&dev->genl_data); 606 nfc_genl_data_exit(&dev->genl_data);
473 kfree(dev->targets); 607 kfree(dev->targets);
474 kfree(dev); 608 kfree(dev);
475} 609}
476 610
611static void nfc_check_pres_work(struct work_struct *work)
612{
613 struct nfc_dev *dev = container_of(work, struct nfc_dev,
614 check_pres_work);
615 int rc;
616
617 device_lock(&dev->dev);
618
619 if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) {
620 rc = dev->ops->check_presence(dev, dev->active_target);
621 if (!rc) {
622 mod_timer(&dev->check_pres_timer, jiffies +
623 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
624 } else {
625 u32 active_target_idx = dev->active_target->idx;
626 device_unlock(&dev->dev);
627 nfc_target_lost(dev, active_target_idx);
628 return;
629 }
630 }
631
632 device_unlock(&dev->dev);
633}
634
635static void nfc_check_pres_timeout(unsigned long data)
636{
637 struct nfc_dev *dev = (struct nfc_dev *)data;
638
639 queue_work(dev->check_pres_wq, &dev->check_pres_work);
640}
641
477struct class nfc_class = { 642struct class nfc_class = {
478 .name = "nfc", 643 .name = "nfc",
479 .dev_release = nfc_release, 644 .dev_release = nfc_release,
@@ -483,12 +648,12 @@ EXPORT_SYMBOL(nfc_class);
483static int match_idx(struct device *d, void *data) 648static int match_idx(struct device *d, void *data)
484{ 649{
485 struct nfc_dev *dev = to_nfc_dev(d); 650 struct nfc_dev *dev = to_nfc_dev(d);
486 unsigned *idx = data; 651 unsigned int *idx = data;
487 652
488 return dev->idx == *idx; 653 return dev->idx == *idx;
489} 654}
490 655
491struct nfc_dev *nfc_get_device(unsigned idx) 656struct nfc_dev *nfc_get_device(unsigned int idx)
492{ 657{
493 struct device *d; 658 struct device *d;
494 659
@@ -506,15 +671,14 @@ struct nfc_dev *nfc_get_device(unsigned idx)
506 * @supported_protocols: NFC protocols supported by the device 671 * @supported_protocols: NFC protocols supported by the device
507 */ 672 */
508struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, 673struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
509 u32 supported_protocols, 674 u32 supported_protocols,
510 int tx_headroom, 675 int tx_headroom, int tx_tailroom)
511 int tx_tailroom)
512{ 676{
513 static atomic_t dev_no = ATOMIC_INIT(0); 677 static atomic_t dev_no = ATOMIC_INIT(0);
514 struct nfc_dev *dev; 678 struct nfc_dev *dev;
515 679
516 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || 680 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
517 !ops->deactivate_target || !ops->data_exchange) 681 !ops->deactivate_target || !ops->data_exchange)
518 return NULL; 682 return NULL;
519 683
520 if (!supported_protocols) 684 if (!supported_protocols)
@@ -534,12 +698,29 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
534 dev->tx_headroom = tx_headroom; 698 dev->tx_headroom = tx_headroom;
535 dev->tx_tailroom = tx_tailroom; 699 dev->tx_tailroom = tx_tailroom;
536 700
537 spin_lock_init(&dev->targets_lock);
538 nfc_genl_data_init(&dev->genl_data); 701 nfc_genl_data_init(&dev->genl_data);
539 702
703
540 /* first generation must not be 0 */ 704 /* first generation must not be 0 */
541 dev->targets_generation = 1; 705 dev->targets_generation = 1;
542 706
707 if (ops->check_presence) {
708 char name[32];
709 init_timer(&dev->check_pres_timer);
710 dev->check_pres_timer.data = (unsigned long)dev;
711 dev->check_pres_timer.function = nfc_check_pres_timeout;
712
713 INIT_WORK(&dev->check_pres_work, nfc_check_pres_work);
714 snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx);
715 dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT |
716 WQ_UNBOUND |
717 WQ_MEM_RECLAIM, 1);
718 if (dev->check_pres_wq == NULL) {
719 kfree(dev);
720 return NULL;
721 }
722 }
723
543 return dev; 724 return dev;
544} 725}
545EXPORT_SYMBOL(nfc_allocate_device); 726EXPORT_SYMBOL(nfc_allocate_device);
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
new file mode 100644
index 000000000000..fd67f51d18e9
--- /dev/null
+++ b/net/nfc/hci/Kconfig
@@ -0,0 +1,17 @@
1config NFC_HCI
2 depends on NFC
3 tristate "NFC HCI implementation"
4 default n
5 help
6 Say Y here if you want to build support for a kernel NFC HCI
7 implementation. This is mostly needed for devices that only process
8 HCI frames, like for example the NXP pn544.
9
10config NFC_SHDLC
11 depends on NFC_HCI
12 select CRC_CCITT
13 bool "SHDLC link layer for HCI based NFC drivers"
14 default n
15 ---help---
16 Say yes if you use an NFC HCI driver that requires SHDLC link layer.
17 If unsure, say N here.
diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile
new file mode 100644
index 000000000000..f9c44b2fb065
--- /dev/null
+++ b/net/nfc/hci/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Linux NFC HCI layer.
3#
4
5obj-$(CONFIG_NFC_HCI) += hci.o
6
7hci-y := core.o hcp.o command.o
8hci-$(CONFIG_NFC_SHDLC) += shdlc.o
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
new file mode 100644
index 000000000000..8729abf5f18b
--- /dev/null
+++ b/net/nfc/hci/command.c
@@ -0,0 +1,354 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "hci: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/module.h>
26
27#include <net/nfc/hci.h>
28
29#include "hci.h"
30
31static int nfc_hci_result_to_errno(u8 result)
32{
33 switch (result) {
34 case NFC_HCI_ANY_OK:
35 return 0;
36 case NFC_HCI_ANY_E_TIMEOUT:
37 return -ETIMEDOUT;
38 default:
39 return -1;
40 }
41}
42
43static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result,
44 struct sk_buff *skb, void *cb_data)
45{
46 struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
47
48 pr_debug("HCI Cmd completed with HCI result=%d\n", result);
49
50 hcp_ew->exec_result = nfc_hci_result_to_errno(result);
51 if (hcp_ew->exec_result == 0)
52 hcp_ew->result_skb = skb;
53 else
54 kfree_skb(skb);
55 hcp_ew->exec_complete = true;
56
57 wake_up(hcp_ew->wq);
58}
59
60static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
61 const u8 *param, size_t param_len,
62 struct sk_buff **skb)
63{
64 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq);
65 struct hcp_exec_waiter hcp_ew;
66 hcp_ew.wq = &ew_wq;
67 hcp_ew.exec_complete = false;
68 hcp_ew.result_skb = NULL;
69
70 pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len);
71
72 /* TODO: Define hci cmd execution delay. Should it be the same
73 * for all commands?
74 */
75 hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe,
76 NFC_HCI_HCP_COMMAND, cmd,
77 param, param_len,
78 nfc_hci_execute_cb, &hcp_ew,
79 3000);
80 if (hcp_ew.exec_result < 0)
81 return hcp_ew.exec_result;
82
83 wait_event(ew_wq, hcp_ew.exec_complete == true);
84
85 if (hcp_ew.exec_result == 0) {
86 if (skb)
87 *skb = hcp_ew.result_skb;
88 else
89 kfree_skb(hcp_ew.result_skb);
90 }
91
92 return hcp_ew.exec_result;
93}
94
95int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
96 const u8 *param, size_t param_len)
97{
98 u8 pipe;
99
100 pr_debug("%d to gate %d\n", event, gate);
101
102 pipe = hdev->gate2pipe[gate];
103 if (pipe == NFC_HCI_INVALID_PIPE)
104 return -EADDRNOTAVAIL;
105
106 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event,
107 param, param_len, NULL, NULL, 0);
108}
109EXPORT_SYMBOL(nfc_hci_send_event);
110
111int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
112 const u8 *param, size_t param_len)
113{
114 u8 pipe;
115
116 pr_debug("\n");
117
118 pipe = hdev->gate2pipe[gate];
119 if (pipe == NFC_HCI_INVALID_PIPE)
120 return -EADDRNOTAVAIL;
121
122 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
123 response, param, param_len, NULL, NULL,
124 0);
125}
126EXPORT_SYMBOL(nfc_hci_send_response);
127
128/*
129 * Execute an hci command sent to gate.
130 * skb will contain response data if success. skb can be NULL if you are not
131 * interested by the response.
132 */
133int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
134 const u8 *param, size_t param_len, struct sk_buff **skb)
135{
136 u8 pipe;
137
138 pr_debug("\n");
139
140 pipe = hdev->gate2pipe[gate];
141 if (pipe == NFC_HCI_INVALID_PIPE)
142 return -EADDRNOTAVAIL;
143
144 return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb);
145}
146EXPORT_SYMBOL(nfc_hci_send_cmd);
147
148int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
149 const u8 *param, size_t param_len)
150{
151 int r;
152 u8 *tmp;
153
154 /* TODO ELa: reg idx must be inserted before param, but we don't want
155 * to ask the caller to do it to keep a simpler API.
156 * For now, just create a new temporary param buffer. This is far from
157 * optimal though, and the plan is to modify APIs to pass idx down to
158 * nfc_hci_hcp_message_tx where the frame is actually built, thereby
159 * eliminating the need for the temp allocation-copy here.
160 */
161
162 pr_debug("idx=%d to gate %d\n", idx, gate);
163
164 tmp = kmalloc(1 + param_len, GFP_KERNEL);
165 if (tmp == NULL)
166 return -ENOMEM;
167
168 *tmp = idx;
169 memcpy(tmp + 1, param, param_len);
170
171 r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER,
172 tmp, param_len + 1, NULL);
173
174 kfree(tmp);
175
176 return r;
177}
178EXPORT_SYMBOL(nfc_hci_set_param);
179
180int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
181 struct sk_buff **skb)
182{
183 pr_debug("gate=%d regidx=%d\n", gate, idx);
184
185 return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER,
186 &idx, 1, skb);
187}
188EXPORT_SYMBOL(nfc_hci_get_param);
189
190static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe)
191{
192 struct sk_buff *skb;
193 int r;
194
195 pr_debug("pipe=%d\n", pipe);
196
197 r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE,
198 NULL, 0, &skb);
199 if (r == 0) {
200 /* dest host other than host controller will send
201 * number of pipes already open on this gate before
202 * execution. The number can be found in skb->data[0]
203 */
204 kfree_skb(skb);
205 }
206
207 return r;
208}
209
210static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe)
211{
212 pr_debug("\n");
213
214 return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE,
215 NULL, 0, NULL);
216}
217
218static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host,
219 u8 dest_gate, int *result)
220{
221 struct sk_buff *skb;
222 struct hci_create_pipe_params params;
223 struct hci_create_pipe_resp *resp;
224 u8 pipe;
225
226 pr_debug("gate=%d\n", dest_gate);
227
228 params.src_gate = NFC_HCI_ADMIN_GATE;
229 params.dest_host = dest_host;
230 params.dest_gate = dest_gate;
231
232 *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
233 NFC_HCI_ADM_CREATE_PIPE,
234 (u8 *) &params, sizeof(params), &skb);
235 if (*result == 0) {
236 resp = (struct hci_create_pipe_resp *)skb->data;
237 pipe = resp->pipe;
238 kfree_skb(skb);
239
240 pr_debug("pipe created=%d\n", pipe);
241
242 return pipe;
243 } else
244 return NFC_HCI_INVALID_PIPE;
245}
246
247static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
248{
249 pr_debug("\n");
250
251 return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
252 NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL);
253}
254
255static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
256{
257 int r;
258
259 u8 param[2];
260
261 /* TODO: Find out what the identity reference data is
262 * and fill param with it. HCI spec 6.1.3.5 */
263
264 pr_debug("\n");
265
266 r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
267 NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL);
268
269 return 0;
270}
271
272int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
273{
274 int r;
275 u8 pipe = hdev->gate2pipe[gate];
276
277 pr_debug("\n");
278
279 if (pipe == NFC_HCI_INVALID_PIPE)
280 return -EADDRNOTAVAIL;
281
282 r = nfc_hci_close_pipe(hdev, pipe);
283 if (r < 0)
284 return r;
285
286 if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) {
287 r = nfc_hci_delete_pipe(hdev, pipe);
288 if (r < 0)
289 return r;
290 }
291
292 hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE;
293
294 return 0;
295}
296EXPORT_SYMBOL(nfc_hci_disconnect_gate);
297
298int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
299{
300 int r;
301
302 pr_debug("\n");
303
304 r = nfc_hci_clear_all_pipes(hdev);
305 if (r < 0)
306 return r;
307
308 memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
309
310 return 0;
311}
312EXPORT_SYMBOL(nfc_hci_disconnect_all_gates);
313
314int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
315{
316 u8 pipe = NFC_HCI_INVALID_PIPE;
317 bool pipe_created = false;
318 int r;
319
320 pr_debug("\n");
321
322 if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
323 return -EADDRINUSE;
324
325 switch (dest_gate) {
326 case NFC_HCI_LINK_MGMT_GATE:
327 pipe = NFC_HCI_LINK_MGMT_PIPE;
328 break;
329 case NFC_HCI_ADMIN_GATE:
330 pipe = NFC_HCI_ADMIN_PIPE;
331 break;
332 default:
333 pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r);
334 if (pipe == NFC_HCI_INVALID_PIPE)
335 return r;
336 pipe_created = true;
337 break;
338 }
339
340 r = nfc_hci_open_pipe(hdev, pipe);
341 if (r < 0) {
342 if (pipe_created)
343 if (nfc_hci_delete_pipe(hdev, pipe) < 0) {
344 /* TODO: Cannot clean by deleting pipe...
345 * -> inconsistent state */
346 }
347 return r;
348 }
349
350 hdev->gate2pipe[dest_gate] = pipe;
351
352 return 0;
353}
354EXPORT_SYMBOL(nfc_hci_connect_gate);
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
new file mode 100644
index 000000000000..e1a640d2b588
--- /dev/null
+++ b/net/nfc/hci/core.c
@@ -0,0 +1,798 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "hci: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/nfc.h>
26
27#include <net/nfc/nfc.h>
28#include <net/nfc/hci.h>
29
30#include "hci.h"
31
32/* Largest headroom needed for outgoing HCI commands */
33#define HCI_CMDS_HEADROOM 1
34
35static void nfc_hci_msg_tx_work(struct work_struct *work)
36{
37 struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
38 msg_tx_work);
39 struct hci_msg *msg;
40 struct sk_buff *skb;
41 int r = 0;
42
43 mutex_lock(&hdev->msg_tx_mutex);
44
45 if (hdev->cmd_pending_msg) {
46 if (timer_pending(&hdev->cmd_timer) == 0) {
47 if (hdev->cmd_pending_msg->cb)
48 hdev->cmd_pending_msg->cb(hdev,
49 NFC_HCI_ANY_E_TIMEOUT,
50 NULL,
51 hdev->
52 cmd_pending_msg->
53 cb_context);
54 kfree(hdev->cmd_pending_msg);
55 hdev->cmd_pending_msg = NULL;
56 } else
57 goto exit;
58 }
59
60next_msg:
61 if (list_empty(&hdev->msg_tx_queue))
62 goto exit;
63
64 msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, msg_l);
65 list_del(&msg->msg_l);
66
67 pr_debug("msg_tx_queue has a cmd to send\n");
68 while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
69 r = hdev->ops->xmit(hdev, skb);
70 if (r < 0) {
71 kfree_skb(skb);
72 skb_queue_purge(&msg->msg_frags);
73 if (msg->cb)
74 msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL,
75 msg->cb_context);
76 kfree(msg);
77 break;
78 }
79 }
80
81 if (r)
82 goto next_msg;
83
84 if (msg->wait_response == false) {
85 kfree(msg);
86 goto next_msg;
87 }
88
89 hdev->cmd_pending_msg = msg;
90 mod_timer(&hdev->cmd_timer, jiffies +
91 msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay));
92
93exit:
94 mutex_unlock(&hdev->msg_tx_mutex);
95}
96
97static void nfc_hci_msg_rx_work(struct work_struct *work)
98{
99 struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
100 msg_rx_work);
101 struct sk_buff *skb;
102 struct hcp_message *message;
103 u8 pipe;
104 u8 type;
105 u8 instruction;
106
107 while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
108 pipe = skb->data[0];
109 skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN);
110 message = (struct hcp_message *)skb->data;
111 type = HCP_MSG_GET_TYPE(message->header);
112 instruction = HCP_MSG_GET_CMD(message->header);
113 skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN);
114
115 nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb);
116 }
117}
118
119void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
120 struct sk_buff *skb)
121{
122 mutex_lock(&hdev->msg_tx_mutex);
123
124 if (hdev->cmd_pending_msg == NULL) {
125 kfree_skb(skb);
126 goto exit;
127 }
128
129 del_timer_sync(&hdev->cmd_timer);
130
131 if (hdev->cmd_pending_msg->cb)
132 hdev->cmd_pending_msg->cb(hdev, result, skb,
133 hdev->cmd_pending_msg->cb_context);
134 else
135 kfree_skb(skb);
136
137 kfree(hdev->cmd_pending_msg);
138 hdev->cmd_pending_msg = NULL;
139
140 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
141
142exit:
143 mutex_unlock(&hdev->msg_tx_mutex);
144}
145
146void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
147 struct sk_buff *skb)
148{
149 kfree_skb(skb);
150}
151
152static u32 nfc_hci_sak_to_protocol(u8 sak)
153{
154 switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) {
155 case NFC_HCI_TYPE_A_SEL_PROT_MIFARE:
156 return NFC_PROTO_MIFARE_MASK;
157 case NFC_HCI_TYPE_A_SEL_PROT_ISO14443:
158 return NFC_PROTO_ISO14443_MASK;
159 case NFC_HCI_TYPE_A_SEL_PROT_DEP:
160 return NFC_PROTO_NFC_DEP_MASK;
161 case NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP:
162 return NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK;
163 default:
164 return 0xffffffff;
165 }
166}
167
168static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
169{
170 struct nfc_target *targets;
171 struct sk_buff *atqa_skb = NULL;
172 struct sk_buff *sak_skb = NULL;
173 int r;
174
175 pr_debug("from gate %d\n", gate);
176
177 targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
178 if (targets == NULL)
179 return -ENOMEM;
180
181 switch (gate) {
182 case NFC_HCI_RF_READER_A_GATE:
183 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
184 NFC_HCI_RF_READER_A_ATQA, &atqa_skb);
185 if (r < 0)
186 goto exit;
187
188 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
189 NFC_HCI_RF_READER_A_SAK, &sak_skb);
190 if (r < 0)
191 goto exit;
192
193 if (atqa_skb->len != 2 || sak_skb->len != 1) {
194 r = -EPROTO;
195 goto exit;
196 }
197
198 targets->supported_protocols =
199 nfc_hci_sak_to_protocol(sak_skb->data[0]);
200 if (targets->supported_protocols == 0xffffffff) {
201 r = -EPROTO;
202 goto exit;
203 }
204
205 targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data);
206 targets->sel_res = sak_skb->data[0];
207
208 if (hdev->ops->complete_target_discovered) {
209 r = hdev->ops->complete_target_discovered(hdev, gate,
210 targets);
211 if (r < 0)
212 goto exit;
213 }
214 break;
215 case NFC_HCI_RF_READER_B_GATE:
216 targets->supported_protocols = NFC_PROTO_ISO14443_MASK;
217 break;
218 default:
219 if (hdev->ops->target_from_gate)
220 r = hdev->ops->target_from_gate(hdev, gate, targets);
221 else
222 r = -EPROTO;
223 if (r < 0)
224 goto exit;
225
226 if (hdev->ops->complete_target_discovered) {
227 r = hdev->ops->complete_target_discovered(hdev, gate,
228 targets);
229 if (r < 0)
230 goto exit;
231 }
232 break;
233 }
234
235 targets->hci_reader_gate = gate;
236
237 r = nfc_targets_found(hdev->ndev, targets, 1);
238
239exit:
240 kfree(targets);
241 kfree_skb(atqa_skb);
242 kfree_skb(sak_skb);
243
244 return r;
245}
246
247void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
248 struct sk_buff *skb)
249{
250 int r = 0;
251
252 switch (event) {
253 case NFC_HCI_EVT_TARGET_DISCOVERED:
254 if (skb->len < 1) { /* no status data? */
255 r = -EPROTO;
256 goto exit;
257 }
258
259 if (skb->data[0] == 3) {
260 /* TODO: Multiple targets in field, none activated
261 * poll is supposedly stopped, but there is no
262 * single target to activate, so nothing to report
263 * up.
264 * if we need to restart poll, we must save the
265 * protocols from the initial poll and reuse here.
266 */
267 }
268
269 if (skb->data[0] != 0) {
270 r = -EPROTO;
271 goto exit;
272 }
273
274 r = nfc_hci_target_discovered(hdev,
275 nfc_hci_pipe2gate(hdev, pipe));
276 break;
277 default:
278 /* TODO: Unknown events are hardware specific
279 * pass them to the driver (needs a new hci_ops) */
280 break;
281 }
282
283exit:
284 kfree_skb(skb);
285
286 if (r) {
287 /* TODO: There was an error dispatching the event,
288 * how to propagate up to nfc core?
289 */
290 }
291}
292
293static void nfc_hci_cmd_timeout(unsigned long data)
294{
295 struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data;
296
297 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
298}
299
300static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
301 u8 gates[])
302{
303 int r;
304 u8 *p = gates;
305 while (gate_count--) {
306 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p);
307 if (r < 0)
308 return r;
309 p++;
310 }
311
312 return 0;
313}
314
315static int hci_dev_session_init(struct nfc_hci_dev *hdev)
316{
317 struct sk_buff *skb = NULL;
318 int r;
319 u8 hci_gates[] = { /* NFC_HCI_ADMIN_GATE MUST be first */
320 NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE,
321 NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE,
322 NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE
323 };
324
325 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
326 NFC_HCI_ADMIN_GATE);
327 if (r < 0)
328 goto exit;
329
330 r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE,
331 NFC_HCI_ADMIN_SESSION_IDENTITY, &skb);
332 if (r < 0)
333 goto disconnect_all;
334
335 if (skb->len && skb->len == strlen(hdev->init_data.session_id))
336 if (memcmp(hdev->init_data.session_id, skb->data,
337 skb->len) == 0) {
338 /* TODO ELa: restore gate<->pipe table from
339 * some TBD location.
340 * note: it doesn't seem possible to get the chip
341 * currently open gate/pipe table.
342 * It is only possible to obtain the supported
343 * gate list.
344 */
345
346 /* goto exit
347 * For now, always do a full initialization */
348 }
349
350 r = nfc_hci_disconnect_all_gates(hdev);
351 if (r < 0)
352 goto exit;
353
354 r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates);
355 if (r < 0)
356 goto disconnect_all;
357
358 r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
359 hdev->init_data.gates);
360 if (r < 0)
361 goto disconnect_all;
362
363 r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
364 NFC_HCI_ADMIN_SESSION_IDENTITY,
365 hdev->init_data.session_id,
366 strlen(hdev->init_data.session_id));
367 if (r == 0)
368 goto exit;
369
370disconnect_all:
371 nfc_hci_disconnect_all_gates(hdev);
372
373exit:
374 if (skb)
375 kfree_skb(skb);
376
377 return r;
378}
379
380static int hci_dev_version(struct nfc_hci_dev *hdev)
381{
382 int r;
383 struct sk_buff *skb;
384
385 r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
386 NFC_HCI_ID_MGMT_VERSION_SW, &skb);
387 if (r < 0)
388 return r;
389
390 if (skb->len != 3) {
391 kfree_skb(skb);
392 return -EINVAL;
393 }
394
395 hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4;
396 hdev->sw_patch = skb->data[0] & 0x0f;
397 hdev->sw_flashlib_major = skb->data[1];
398 hdev->sw_flashlib_minor = skb->data[2];
399
400 kfree_skb(skb);
401
402 r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
403 NFC_HCI_ID_MGMT_VERSION_HW, &skb);
404 if (r < 0)
405 return r;
406
407 if (skb->len != 3) {
408 kfree_skb(skb);
409 return -EINVAL;
410 }
411
412 hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5;
413 hdev->hw_version = skb->data[0] & 0x1f;
414 hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6;
415 hdev->hw_software = skb->data[1] & 0x3f;
416 hdev->hw_bsid = skb->data[2];
417
418 kfree_skb(skb);
419
420 pr_info("SOFTWARE INFO:\n");
421 pr_info("RomLib : %d\n", hdev->sw_romlib);
422 pr_info("Patch : %d\n", hdev->sw_patch);
423 pr_info("FlashLib Major : %d\n", hdev->sw_flashlib_major);
424 pr_info("FlashLib Minor : %d\n", hdev->sw_flashlib_minor);
425 pr_info("HARDWARE INFO:\n");
426 pr_info("Derivative : %d\n", hdev->hw_derivative);
427 pr_info("HW Version : %d\n", hdev->hw_version);
428 pr_info("#MPW : %d\n", hdev->hw_mpw);
429 pr_info("Software : %d\n", hdev->hw_software);
430 pr_info("BSID Version : %d\n", hdev->hw_bsid);
431
432 return 0;
433}
434
435static int hci_dev_up(struct nfc_dev *nfc_dev)
436{
437 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
438 int r = 0;
439
440 if (hdev->ops->open) {
441 r = hdev->ops->open(hdev);
442 if (r < 0)
443 return r;
444 }
445
446 r = hci_dev_session_init(hdev);
447 if (r < 0)
448 goto exit;
449
450 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
451 NFC_HCI_EVT_END_OPERATION, NULL, 0);
452 if (r < 0)
453 goto exit;
454
455 if (hdev->ops->hci_ready) {
456 r = hdev->ops->hci_ready(hdev);
457 if (r < 0)
458 goto exit;
459 }
460
461 r = hci_dev_version(hdev);
462 if (r < 0)
463 goto exit;
464
465exit:
466 if (r < 0)
467 if (hdev->ops->close)
468 hdev->ops->close(hdev);
469 return r;
470}
471
472static int hci_dev_down(struct nfc_dev *nfc_dev)
473{
474 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
475
476 if (hdev->ops->close)
477 hdev->ops->close(hdev);
478
479 memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
480
481 return 0;
482}
483
484static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
485{
486 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
487
488 if (hdev->ops->start_poll)
489 return hdev->ops->start_poll(hdev, protocols);
490 else
491 return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
492 NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
493}
494
495static void hci_stop_poll(struct nfc_dev *nfc_dev)
496{
497 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
498
499 nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
500 NFC_HCI_EVT_END_OPERATION, NULL, 0);
501}
502
503static int hci_activate_target(struct nfc_dev *nfc_dev,
504 struct nfc_target *target, u32 protocol)
505{
506 return 0;
507}
508
509static void hci_deactivate_target(struct nfc_dev *nfc_dev,
510 struct nfc_target *target)
511{
512}
513
514static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
515 struct sk_buff *skb, data_exchange_cb_t cb,
516 void *cb_context)
517{
518 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
519 int r;
520 struct sk_buff *res_skb = NULL;
521
522 pr_debug("target_idx=%d\n", target->idx);
523
524 switch (target->hci_reader_gate) {
525 case NFC_HCI_RF_READER_A_GATE:
526 case NFC_HCI_RF_READER_B_GATE:
527 if (hdev->ops->data_exchange) {
528 r = hdev->ops->data_exchange(hdev, target, skb,
529 &res_skb);
530 if (r <= 0) /* handled */
531 break;
532 }
533
534 *skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */
535 r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
536 NFC_HCI_WR_XCHG_DATA,
537 skb->data, skb->len, &res_skb);
538 /*
539 * TODO: Check RF Error indicator to make sure data is valid.
540 * It seems that HCI cmd can complete without error, but data
541 * can be invalid if an RF error occured? Ignore for now.
542 */
543 if (r == 0)
544 skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */
545 break;
546 default:
547 if (hdev->ops->data_exchange) {
548 r = hdev->ops->data_exchange(hdev, target, skb,
549 &res_skb);
550 if (r == 1)
551 r = -ENOTSUPP;
552 }
553 else
554 r = -ENOTSUPP;
555 }
556
557 kfree_skb(skb);
558
559 cb(cb_context, res_skb, r);
560
561 return 0;
562}
563
564static int hci_check_presence(struct nfc_dev *nfc_dev,
565 struct nfc_target *target)
566{
567 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
568
569 if (hdev->ops->check_presence)
570 return hdev->ops->check_presence(hdev, target);
571
572 return 0;
573}
574
575static struct nfc_ops hci_nfc_ops = {
576 .dev_up = hci_dev_up,
577 .dev_down = hci_dev_down,
578 .start_poll = hci_start_poll,
579 .stop_poll = hci_stop_poll,
580 .activate_target = hci_activate_target,
581 .deactivate_target = hci_deactivate_target,
582 .data_exchange = hci_data_exchange,
583 .check_presence = hci_check_presence,
584};
585
586struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
587 struct nfc_hci_init_data *init_data,
588 u32 protocols,
589 int tx_headroom,
590 int tx_tailroom,
591 int max_link_payload)
592{
593 struct nfc_hci_dev *hdev;
594
595 if (ops->xmit == NULL)
596 return NULL;
597
598 if (protocols == 0)
599 return NULL;
600
601 hdev = kzalloc(sizeof(struct nfc_hci_dev), GFP_KERNEL);
602 if (hdev == NULL)
603 return NULL;
604
605 hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
606 tx_headroom + HCI_CMDS_HEADROOM,
607 tx_tailroom);
608 if (!hdev->ndev) {
609 kfree(hdev);
610 return NULL;
611 }
612
613 hdev->ops = ops;
614 hdev->max_data_link_payload = max_link_payload;
615 hdev->init_data = *init_data;
616
617 nfc_set_drvdata(hdev->ndev, hdev);
618
619 memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
620
621 return hdev;
622}
623EXPORT_SYMBOL(nfc_hci_allocate_device);
624
625void nfc_hci_free_device(struct nfc_hci_dev *hdev)
626{
627 nfc_free_device(hdev->ndev);
628 kfree(hdev);
629}
630EXPORT_SYMBOL(nfc_hci_free_device);
631
632int nfc_hci_register_device(struct nfc_hci_dev *hdev)
633{
634 struct device *dev = &hdev->ndev->dev;
635 const char *devname = dev_name(dev);
636 char name[32];
637 int r = 0;
638
639 mutex_init(&hdev->msg_tx_mutex);
640
641 INIT_LIST_HEAD(&hdev->msg_tx_queue);
642
643 INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work);
644 snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname);
645 hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
646 WQ_MEM_RECLAIM, 1);
647 if (hdev->msg_tx_wq == NULL) {
648 r = -ENOMEM;
649 goto exit;
650 }
651
652 init_timer(&hdev->cmd_timer);
653 hdev->cmd_timer.data = (unsigned long)hdev;
654 hdev->cmd_timer.function = nfc_hci_cmd_timeout;
655
656 skb_queue_head_init(&hdev->rx_hcp_frags);
657
658 INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work);
659 snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname);
660 hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
661 WQ_MEM_RECLAIM, 1);
662 if (hdev->msg_rx_wq == NULL) {
663 r = -ENOMEM;
664 goto exit;
665 }
666
667 skb_queue_head_init(&hdev->msg_rx_queue);
668
669 r = nfc_register_device(hdev->ndev);
670
671exit:
672 if (r < 0) {
673 if (hdev->msg_tx_wq)
674 destroy_workqueue(hdev->msg_tx_wq);
675 if (hdev->msg_rx_wq)
676 destroy_workqueue(hdev->msg_rx_wq);
677 }
678
679 return r;
680}
681EXPORT_SYMBOL(nfc_hci_register_device);
682
683void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
684{
685 struct hci_msg *msg;
686
687 skb_queue_purge(&hdev->rx_hcp_frags);
688 skb_queue_purge(&hdev->msg_rx_queue);
689
690 while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg,
691 msg_l)) != NULL) {
692 list_del(&msg->msg_l);
693 skb_queue_purge(&msg->msg_frags);
694 kfree(msg);
695 }
696
697 del_timer_sync(&hdev->cmd_timer);
698
699 nfc_unregister_device(hdev->ndev);
700
701 destroy_workqueue(hdev->msg_tx_wq);
702
703 destroy_workqueue(hdev->msg_rx_wq);
704}
705EXPORT_SYMBOL(nfc_hci_unregister_device);
706
707void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata)
708{
709 hdev->clientdata = clientdata;
710}
711EXPORT_SYMBOL(nfc_hci_set_clientdata);
712
713void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
714{
715 return hdev->clientdata;
716}
717EXPORT_SYMBOL(nfc_hci_get_clientdata);
718
719void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
720{
721 struct hcp_packet *packet;
722 u8 type;
723 u8 instruction;
724 struct sk_buff *hcp_skb;
725 u8 pipe;
726 struct sk_buff *frag_skb;
727 int msg_len;
728
729 if (skb == NULL) {
730 /* TODO ELa: lower layer had permanent failure, need to
731 * propagate that up
732 */
733
734 skb_queue_purge(&hdev->rx_hcp_frags);
735
736 return;
737 }
738
739 packet = (struct hcp_packet *)skb->data;
740 if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
741 skb_queue_tail(&hdev->rx_hcp_frags, skb);
742 return;
743 }
744
745 /* it's the last fragment. Does it need re-aggregation? */
746 if (skb_queue_len(&hdev->rx_hcp_frags)) {
747 pipe = packet->header & NFC_HCI_FRAGMENT;
748 skb_queue_tail(&hdev->rx_hcp_frags, skb);
749
750 msg_len = 0;
751 skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
752 msg_len += (frag_skb->len -
753 NFC_HCI_HCP_PACKET_HEADER_LEN);
754 }
755
756 hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
757 msg_len, GFP_KERNEL);
758 if (hcp_skb == NULL) {
759 /* TODO ELa: cannot deliver HCP message. How to
760 * propagate error up?
761 */
762 }
763
764 *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
765
766 skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
767 msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
768 memcpy(skb_put(hcp_skb, msg_len),
769 frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
770 msg_len);
771 }
772
773 skb_queue_purge(&hdev->rx_hcp_frags);
774 } else {
775 packet->header &= NFC_HCI_FRAGMENT;
776 hcp_skb = skb;
777 }
778
779 /* if this is a response, dispatch immediately to
780 * unblock waiting cmd context. Otherwise, enqueue to dispatch
781 * in separate context where handler can also execute command.
782 */
783 packet = (struct hcp_packet *)hcp_skb->data;
784 type = HCP_MSG_GET_TYPE(packet->message.header);
785 if (type == NFC_HCI_HCP_RESPONSE) {
786 pipe = packet->header;
787 instruction = HCP_MSG_GET_CMD(packet->message.header);
788 skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
789 NFC_HCI_HCP_MESSAGE_HEADER_LEN);
790 nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
791 } else {
792 skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
793 queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work);
794 }
795}
796EXPORT_SYMBOL(nfc_hci_recv_frame);
797
798MODULE_LICENSE("GPL");
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
new file mode 100644
index 000000000000..45f2fe4fd486
--- /dev/null
+++ b/net/nfc/hci/hci.h
@@ -0,0 +1,139 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef __LOCAL_HCI_H
21#define __LOCAL_HCI_H
22
23struct gate_pipe_map {
24 u8 gate;
25 u8 pipe;
26};
27
28struct hcp_message {
29 u8 header; /* type -cmd,evt,rsp- + instruction */
30 u8 data[];
31} __packed;
32
33struct hcp_packet {
34 u8 header; /* cbit+pipe */
35 struct hcp_message message;
36} __packed;
37
38/*
39 * HCI command execution completion callback.
40 * result will be one of the HCI response codes.
41 * skb contains the response data and must be disposed.
42 */
43typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result,
44 struct sk_buff *skb, void *cb_data);
45
46struct hcp_exec_waiter {
47 wait_queue_head_t *wq;
48 bool exec_complete;
49 int exec_result;
50 struct sk_buff *result_skb;
51};
52
53struct hci_msg {
54 struct list_head msg_l;
55 struct sk_buff_head msg_frags;
56 bool wait_response;
57 hci_cmd_cb_t cb;
58 void *cb_context;
59 unsigned long completion_delay;
60};
61
62struct hci_create_pipe_params {
63 u8 src_gate;
64 u8 dest_host;
65 u8 dest_gate;
66} __packed;
67
68struct hci_create_pipe_resp {
69 u8 src_host;
70 u8 src_gate;
71 u8 dest_host;
72 u8 dest_gate;
73 u8 pipe;
74} __packed;
75
76#define NFC_HCI_FRAGMENT 0x7f
77
78#define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f))
79#define HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6)
80#define HCP_MSG_GET_CMD(header) (header & 0x3f)
81
82int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
83 u8 type, u8 instruction,
84 const u8 *payload, size_t payload_len,
85 hci_cmd_cb_t cb, void *cb_data,
86 unsigned long completion_delay);
87
88u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
89
90void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
91 u8 instruction, struct sk_buff *skb);
92
93/* HCP headers */
94#define NFC_HCI_HCP_PACKET_HEADER_LEN 1
95#define NFC_HCI_HCP_MESSAGE_HEADER_LEN 1
96#define NFC_HCI_HCP_HEADER_LEN 2
97
98/* HCP types */
99#define NFC_HCI_HCP_COMMAND 0x00
100#define NFC_HCI_HCP_EVENT 0x01
101#define NFC_HCI_HCP_RESPONSE 0x02
102
103/* Generic commands */
104#define NFC_HCI_ANY_SET_PARAMETER 0x01
105#define NFC_HCI_ANY_GET_PARAMETER 0x02
106#define NFC_HCI_ANY_OPEN_PIPE 0x03
107#define NFC_HCI_ANY_CLOSE_PIPE 0x04
108
109/* Reader RF commands */
110#define NFC_HCI_WR_XCHG_DATA 0x10
111
112/* Admin commands */
113#define NFC_HCI_ADM_CREATE_PIPE 0x10
114#define NFC_HCI_ADM_DELETE_PIPE 0x11
115#define NFC_HCI_ADM_NOTIFY_PIPE_CREATED 0x12
116#define NFC_HCI_ADM_NOTIFY_PIPE_DELETED 0x13
117#define NFC_HCI_ADM_CLEAR_ALL_PIPE 0x14
118#define NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15
119
120/* Generic responses */
121#define NFC_HCI_ANY_OK 0x00
122#define NFC_HCI_ANY_E_NOT_CONNECTED 0x01
123#define NFC_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02
124#define NFC_HCI_ANY_E_NOK 0x03
125#define NFC_HCI_ANY_E_PIPES_FULL 0x04
126#define NFC_HCI_ANY_E_REG_PAR_UNKNOWN 0x05
127#define NFC_HCI_ANY_E_PIPE_NOT_OPENED 0x06
128#define NFC_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07
129#define NFC_HCI_ANY_E_INHIBITED 0x08
130#define NFC_HCI_ANY_E_TIMEOUT 0x09
131#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
132#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
133
134/* Pipes */
135#define NFC_HCI_INVALID_PIPE 0x80
136#define NFC_HCI_LINK_MGMT_PIPE 0x00
137#define NFC_HCI_ADMIN_PIPE 0x01
138
139#endif /* __LOCAL_HCI_H */
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
new file mode 100644
index 000000000000..7212cf2c5785
--- /dev/null
+++ b/net/nfc/hci/hcp.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "hci: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25
26#include <net/nfc/hci.h>
27
28#include "hci.h"
29
30/*
31 * Payload is the HCP message data only. Instruction will be prepended.
32 * Guarantees that cb will be called upon completion or timeout delay
33 * counted from the moment the cmd is sent to the transport.
34 */
35int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
36 u8 type, u8 instruction,
37 const u8 *payload, size_t payload_len,
38 hci_cmd_cb_t cb, void *cb_data,
39 unsigned long completion_delay)
40{
41 struct nfc_dev *ndev = hdev->ndev;
42 struct hci_msg *cmd;
43 const u8 *ptr = payload;
44 int hci_len, err;
45 bool firstfrag = true;
46
47 cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL);
48 if (cmd == NULL)
49 return -ENOMEM;
50
51 INIT_LIST_HEAD(&cmd->msg_l);
52 skb_queue_head_init(&cmd->msg_frags);
53 cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false;
54 cmd->cb = cb;
55 cmd->cb_context = cb_data;
56 cmd->completion_delay = completion_delay;
57
58 hci_len = payload_len + 1;
59 while (hci_len > 0) {
60 struct sk_buff *skb;
61 int skb_len, data_link_len;
62 struct hcp_packet *packet;
63
64 if (NFC_HCI_HCP_PACKET_HEADER_LEN + hci_len <=
65 hdev->max_data_link_payload)
66 data_link_len = hci_len;
67 else
68 data_link_len = hdev->max_data_link_payload -
69 NFC_HCI_HCP_PACKET_HEADER_LEN;
70
71 skb_len = ndev->tx_headroom + NFC_HCI_HCP_PACKET_HEADER_LEN +
72 data_link_len + ndev->tx_tailroom;
73 hci_len -= data_link_len;
74
75 skb = alloc_skb(skb_len, GFP_KERNEL);
76 if (skb == NULL) {
77 err = -ENOMEM;
78 goto out_skb_err;
79 }
80 skb_reserve(skb, ndev->tx_headroom);
81
82 skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len);
83
84 /* Only the last fragment will have the cb bit set to 1 */
85 packet = (struct hcp_packet *)skb->data;
86 packet->header = pipe;
87 if (firstfrag) {
88 firstfrag = false;
89 packet->message.header = HCP_HEADER(type, instruction);
90 if (ptr) {
91 memcpy(packet->message.data, ptr,
92 data_link_len - 1);
93 ptr += data_link_len - 1;
94 }
95 } else {
96 memcpy(&packet->message, ptr, data_link_len);
97 ptr += data_link_len;
98 }
99
100 /* This is the last fragment, set the cb bit */
101 if (hci_len == 0)
102 packet->header |= ~NFC_HCI_FRAGMENT;
103
104 skb_queue_tail(&cmd->msg_frags, skb);
105 }
106
107 mutex_lock(&hdev->msg_tx_mutex);
108 list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l);
109 mutex_unlock(&hdev->msg_tx_mutex);
110
111 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
112
113 return 0;
114
115out_skb_err:
116 skb_queue_purge(&cmd->msg_frags);
117 kfree(cmd);
118
119 return err;
120}
121
122u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe)
123{
124 int gate;
125
126 for (gate = 0; gate < NFC_HCI_MAX_GATES; gate++)
127 if (hdev->gate2pipe[gate] == pipe)
128 return gate;
129
130 return 0xff;
131}
132
133/*
134 * Receive hcp message for pipe, with type and cmd.
135 * skb contains optional message data only.
136 */
137void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
138 u8 instruction, struct sk_buff *skb)
139{
140 switch (type) {
141 case NFC_HCI_HCP_RESPONSE:
142 nfc_hci_resp_received(hdev, instruction, skb);
143 break;
144 case NFC_HCI_HCP_COMMAND:
145 nfc_hci_cmd_received(hdev, pipe, instruction, skb);
146 break;
147 case NFC_HCI_HCP_EVENT:
148 nfc_hci_event_received(hdev, pipe, instruction, skb);
149 break;
150 default:
151 pr_err("UNKNOWN MSG Type %d, instruction=%d\n",
152 type, instruction);
153 kfree_skb(skb);
154 break;
155 }
156}
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
new file mode 100644
index 000000000000..5665dc6d893a
--- /dev/null
+++ b/net/nfc/hci/shdlc.c
@@ -0,0 +1,957 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
21
22#include <linux/sched.h>
23#include <linux/export.h>
24#include <linux/wait.h>
25#include <linux/crc-ccitt.h>
26#include <linux/slab.h>
27#include <linux/skbuff.h>
28
29#include <net/nfc/hci.h>
30#include <net/nfc/shdlc.h>
31
32#define SHDLC_LLC_HEAD_ROOM 2
33#define SHDLC_LLC_TAIL_ROOM 2
34
35#define SHDLC_MAX_WINDOW 4
36#define SHDLC_SREJ_SUPPORT false
37
38#define SHDLC_CONTROL_HEAD_MASK 0xe0
39#define SHDLC_CONTROL_HEAD_I 0x80
40#define SHDLC_CONTROL_HEAD_I2 0xa0
41#define SHDLC_CONTROL_HEAD_S 0xc0
42#define SHDLC_CONTROL_HEAD_U 0xe0
43
44#define SHDLC_CONTROL_NS_MASK 0x38
45#define SHDLC_CONTROL_NR_MASK 0x07
46#define SHDLC_CONTROL_TYPE_MASK 0x18
47
48#define SHDLC_CONTROL_M_MASK 0x1f
49
50enum sframe_type {
51 S_FRAME_RR = 0x00,
52 S_FRAME_REJ = 0x01,
53 S_FRAME_RNR = 0x02,
54 S_FRAME_SREJ = 0x03
55};
56
57enum uframe_modifier {
58 U_FRAME_UA = 0x06,
59 U_FRAME_RSET = 0x19
60};
61
62#define SHDLC_CONNECT_VALUE_MS 5
63#define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4)
64#define SHDLC_T2_VALUE_MS 300
65
66#define SHDLC_DUMP_SKB(info, skb) \
67do { \
68 pr_debug("%s:\n", info); \
69 print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
70 16, 1, skb->data, skb->len, 0); \
71} while (0)
72
73/* checks x < y <= z modulo 8 */
74static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
75{
76 if (x < z)
77 return ((x < y) && (y <= z)) ? true : false;
78 else
79 return ((y > x) || (y <= z)) ? true : false;
80}
81
82/* checks x <= y < z modulo 8 */
83static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
84{
85 if (x <= z)
86 return ((x <= y) && (y < z)) ? true : false;
87 else /* x > z -> z+8 > x */
88 return ((y >= x) || (y < z)) ? true : false;
89}
90
91static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc,
92 int payload_len)
93{
94 struct sk_buff *skb;
95
96 skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM +
97 shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM +
98 payload_len, GFP_KERNEL);
99 if (skb)
100 skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM);
101
102 return skb;
103}
104
105static void nfc_shdlc_add_len_crc(struct sk_buff *skb)
106{
107 u16 crc;
108 int len;
109
110 len = skb->len + 2;
111 *skb_push(skb, 1) = len;
112
113 crc = crc_ccitt(0xffff, skb->data, skb->len);
114 crc = ~crc;
115 *skb_put(skb, 1) = crc & 0xff;
116 *skb_put(skb, 1) = crc >> 8;
117}
118
119/* immediately sends an S frame. */
120static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
121 enum sframe_type sframe_type, int nr)
122{
123 int r;
124 struct sk_buff *skb;
125
126 pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
127
128 skb = nfc_shdlc_alloc_skb(shdlc, 0);
129 if (skb == NULL)
130 return -ENOMEM;
131
132 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
133
134 nfc_shdlc_add_len_crc(skb);
135
136 r = shdlc->ops->xmit(shdlc, skb);
137
138 kfree_skb(skb);
139
140 return r;
141}
142
143/* immediately sends an U frame. skb may contain optional payload */
144static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
145 struct sk_buff *skb,
146 enum uframe_modifier uframe_modifier)
147{
148 int r;
149
150 pr_debug("uframe_modifier=%d\n", uframe_modifier);
151
152 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
153
154 nfc_shdlc_add_len_crc(skb);
155
156 r = shdlc->ops->xmit(shdlc, skb);
157
158 kfree_skb(skb);
159
160 return r;
161}
162
163/*
164 * Free ack_pending frames until y_nr - 1, and reset t2 according to
165 * the remaining oldest ack_pending frame sent time
166 */
167static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr)
168{
169 struct sk_buff *skb;
170 int dnr = shdlc->dnr; /* MUST initially be < y_nr */
171
172 pr_debug("release ack pending up to frame %d excluded\n", y_nr);
173
174 while (dnr != y_nr) {
175 pr_debug("release ack pending frame %d\n", dnr);
176
177 skb = skb_dequeue(&shdlc->ack_pending_q);
178 kfree_skb(skb);
179
180 dnr = (dnr + 1) % 8;
181 }
182
183 if (skb_queue_empty(&shdlc->ack_pending_q)) {
184 if (shdlc->t2_active) {
185 del_timer_sync(&shdlc->t2_timer);
186 shdlc->t2_active = false;
187
188 pr_debug
189 ("All sent frames acked. Stopped T2(retransmit)\n");
190 }
191 } else {
192 skb = skb_peek(&shdlc->ack_pending_q);
193
194 mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
195 msecs_to_jiffies(SHDLC_T2_VALUE_MS));
196 shdlc->t2_active = true;
197
198 pr_debug
199 ("Start T2(retransmit) for remaining unacked sent frames\n");
200 }
201}
202
203/*
204 * Receive validated frames from lower layer. skb contains HCI payload only.
205 * Handle according to algorithm at spec:10.8.2
206 */
207static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc,
208 struct sk_buff *skb, int ns, int nr)
209{
210 int x_ns = ns;
211 int y_nr = nr;
212
213 pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
214
215 if (shdlc->state != SHDLC_CONNECTED)
216 goto exit;
217
218 if (x_ns != shdlc->nr) {
219 nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
220 goto exit;
221 }
222
223 if (shdlc->t1_active == false) {
224 shdlc->t1_active = true;
225 mod_timer(&shdlc->t1_timer,
226 msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
227 pr_debug("(re)Start T1(send ack)\n");
228 }
229
230 if (skb->len) {
231 nfc_hci_recv_frame(shdlc->hdev, skb);
232 skb = NULL;
233 }
234
235 shdlc->nr = (shdlc->nr + 1) % 8;
236
237 if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
238 nfc_shdlc_reset_t2(shdlc, y_nr);
239
240 shdlc->dnr = y_nr;
241 }
242
243exit:
244 if (skb)
245 kfree_skb(skb);
246}
247
248static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr)
249{
250 pr_debug("remote acked up to frame %d excluded\n", y_nr);
251
252 if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
253 nfc_shdlc_reset_t2(shdlc, y_nr);
254 shdlc->dnr = y_nr;
255 }
256}
257
258static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc)
259{
260 struct sk_buff *skb;
261
262 pr_debug("ns reset to %d\n", shdlc->dnr);
263
264 while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
265 skb_pull(skb, 2); /* remove len+control */
266 skb_trim(skb, skb->len - 2); /* remove crc */
267 skb_queue_head(&shdlc->send_q, skb);
268 }
269 shdlc->ns = shdlc->dnr;
270}
271
272static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr)
273{
274 struct sk_buff *skb;
275
276 pr_debug("remote asks retransmition from frame %d\n", y_nr);
277
278 if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
279 if (shdlc->t2_active) {
280 del_timer_sync(&shdlc->t2_timer);
281 shdlc->t2_active = false;
282 pr_debug("Stopped T2(retransmit)\n");
283 }
284
285 if (shdlc->dnr != y_nr) {
286 while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
287 skb = skb_dequeue(&shdlc->ack_pending_q);
288 kfree_skb(skb);
289 }
290 }
291
292 nfc_shdlc_requeue_ack_pending(shdlc);
293 }
294}
295
296/* See spec RR:10.8.3 REJ:10.8.4 */
297static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
298 enum sframe_type s_frame_type, int nr)
299{
300 struct sk_buff *skb;
301
302 if (shdlc->state != SHDLC_CONNECTED)
303 return;
304
305 switch (s_frame_type) {
306 case S_FRAME_RR:
307 nfc_shdlc_rcv_ack(shdlc, nr);
308 if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */
309 shdlc->rnr = false;
310 if (shdlc->send_q.qlen == 0) {
311 skb = nfc_shdlc_alloc_skb(shdlc, 0);
312 if (skb)
313 skb_queue_tail(&shdlc->send_q, skb);
314 }
315 }
316 break;
317 case S_FRAME_REJ:
318 nfc_shdlc_rcv_rej(shdlc, nr);
319 break;
320 case S_FRAME_RNR:
321 nfc_shdlc_rcv_ack(shdlc, nr);
322 shdlc->rnr = true;
323 break;
324 default:
325 break;
326 }
327}
328
329static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
330{
331 pr_debug("result=%d\n", r);
332
333 del_timer_sync(&shdlc->connect_timer);
334
335 if (r == 0) {
336 shdlc->ns = 0;
337 shdlc->nr = 0;
338 shdlc->dnr = 0;
339
340 shdlc->state = SHDLC_CONNECTED;
341 } else {
342 shdlc->state = SHDLC_DISCONNECTED;
343
344 /*
345 * TODO: Could it be possible that there are pending
346 * executing commands that are waiting for connect to complete
347 * before they can be carried? As connect is a blocking
348 * operation, it would require that the userspace process can
349 * send commands on the same device from a second thread before
350 * the device is up. I don't think that is possible, is it?
351 */
352 }
353
354 shdlc->connect_result = r;
355
356 wake_up(shdlc->connect_wq);
357}
358
359static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc)
360{
361 struct sk_buff *skb;
362
363 pr_debug("\n");
364
365 skb = nfc_shdlc_alloc_skb(shdlc, 2);
366 if (skb == NULL)
367 return -ENOMEM;
368
369 *skb_put(skb, 1) = SHDLC_MAX_WINDOW;
370 *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
371
372 return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
373}
374
375static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc)
376{
377 struct sk_buff *skb;
378
379 pr_debug("\n");
380
381 skb = nfc_shdlc_alloc_skb(shdlc, 0);
382 if (skb == NULL)
383 return -ENOMEM;
384
385 return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
386}
387
388static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
389 struct sk_buff *skb,
390 enum uframe_modifier u_frame_modifier)
391{
392 u8 w = SHDLC_MAX_WINDOW;
393 bool srej_support = SHDLC_SREJ_SUPPORT;
394 int r;
395
396 pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
397
398 switch (u_frame_modifier) {
399 case U_FRAME_RSET:
400 if (shdlc->state == SHDLC_NEGOCIATING) {
401 /* we sent RSET, but chip wants to negociate */
402 if (skb->len > 0)
403 w = skb->data[0];
404
405 if (skb->len > 1)
406 srej_support = skb->data[1] & 0x01 ? true :
407 false;
408
409 if ((w <= SHDLC_MAX_WINDOW) &&
410 (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
411 shdlc->w = w;
412 shdlc->srej_support = srej_support;
413 r = nfc_shdlc_connect_send_ua(shdlc);
414 nfc_shdlc_connect_complete(shdlc, r);
415 }
416 } else if (shdlc->state > SHDLC_NEGOCIATING) {
417 /*
418 * TODO: Chip wants to reset link
419 * send ua, empty skb lists, reset counters
420 * propagate info to HCI layer
421 */
422 }
423 break;
424 case U_FRAME_UA:
425 if ((shdlc->state == SHDLC_CONNECTING &&
426 shdlc->connect_tries > 0) ||
427 (shdlc->state == SHDLC_NEGOCIATING))
428 nfc_shdlc_connect_complete(shdlc, 0);
429 break;
430 default:
431 break;
432 }
433
434 kfree_skb(skb);
435}
436
437static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
438{
439 struct sk_buff *skb;
440 u8 control;
441 int nr;
442 int ns;
443 enum sframe_type s_frame_type;
444 enum uframe_modifier u_frame_modifier;
445
446 if (shdlc->rcv_q.qlen)
447 pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
448
449 while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
450 control = skb->data[0];
451 skb_pull(skb, 1);
452 switch (control & SHDLC_CONTROL_HEAD_MASK) {
453 case SHDLC_CONTROL_HEAD_I:
454 case SHDLC_CONTROL_HEAD_I2:
455 ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
456 nr = control & SHDLC_CONTROL_NR_MASK;
457 nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
458 break;
459 case SHDLC_CONTROL_HEAD_S:
460 s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
461 nr = control & SHDLC_CONTROL_NR_MASK;
462 nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
463 kfree_skb(skb);
464 break;
465 case SHDLC_CONTROL_HEAD_U:
466 u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
467 nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
468 break;
469 default:
470 pr_err("UNKNOWN Control=%d\n", control);
471 kfree_skb(skb);
472 break;
473 }
474 }
475}
476
477static int nfc_shdlc_w_used(int ns, int dnr)
478{
479 int unack_count;
480
481 if (dnr <= ns)
482 unack_count = ns - dnr;
483 else
484 unack_count = 8 - dnr + ns;
485
486 return unack_count;
487}
488
489/* Send frames according to algorithm at spec:10.8.1 */
490static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
491{
492 struct sk_buff *skb;
493 int r;
494 unsigned long time_sent;
495
496 if (shdlc->send_q.qlen)
497 pr_debug
498 ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
499 shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
500 shdlc->rnr == false ? "false" : "true",
501 shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr),
502 shdlc->ack_pending_q.qlen);
503
504 while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
505 (shdlc->rnr == false)) {
506
507 if (shdlc->t1_active) {
508 del_timer_sync(&shdlc->t1_timer);
509 shdlc->t1_active = false;
510 pr_debug("Stopped T1(send ack)\n");
511 }
512
513 skb = skb_dequeue(&shdlc->send_q);
514
515 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
516 shdlc->nr;
517
518 pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
519 shdlc->nr);
520 /* SHDLC_DUMP_SKB("shdlc frame written", skb); */
521
522 nfc_shdlc_add_len_crc(skb);
523
524 r = shdlc->ops->xmit(shdlc, skb);
525 if (r < 0) {
526 /*
527 * TODO: Cannot send, shdlc machine is dead, we
528 * must propagate the information up to HCI.
529 */
530 shdlc->hard_fault = r;
531 break;
532 }
533
534 shdlc->ns = (shdlc->ns + 1) % 8;
535
536 time_sent = jiffies;
537 *(unsigned long *)skb->cb = time_sent;
538
539 skb_queue_tail(&shdlc->ack_pending_q, skb);
540
541 if (shdlc->t2_active == false) {
542 shdlc->t2_active = true;
543 mod_timer(&shdlc->t2_timer, time_sent +
544 msecs_to_jiffies(SHDLC_T2_VALUE_MS));
545 pr_debug("Started T2 (retransmit)\n");
546 }
547 }
548}
549
550static void nfc_shdlc_connect_timeout(unsigned long data)
551{
552 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
553
554 pr_debug("\n");
555
556 queue_work(shdlc->sm_wq, &shdlc->sm_work);
557}
558
559static void nfc_shdlc_t1_timeout(unsigned long data)
560{
561 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
562
563 pr_debug("SoftIRQ: need to send ack\n");
564
565 queue_work(shdlc->sm_wq, &shdlc->sm_work);
566}
567
568static void nfc_shdlc_t2_timeout(unsigned long data)
569{
570 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
571
572 pr_debug("SoftIRQ: need to retransmit\n");
573
574 queue_work(shdlc->sm_wq, &shdlc->sm_work);
575}
576
577static void nfc_shdlc_sm_work(struct work_struct *work)
578{
579 struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work);
580 int r;
581
582 pr_debug("\n");
583
584 mutex_lock(&shdlc->state_mutex);
585
586 switch (shdlc->state) {
587 case SHDLC_DISCONNECTED:
588 skb_queue_purge(&shdlc->rcv_q);
589 skb_queue_purge(&shdlc->send_q);
590 skb_queue_purge(&shdlc->ack_pending_q);
591 break;
592 case SHDLC_CONNECTING:
593 if (shdlc->connect_tries++ < 5)
594 r = nfc_shdlc_connect_initiate(shdlc);
595 else
596 r = -ETIME;
597 if (r < 0)
598 nfc_shdlc_connect_complete(shdlc, r);
599 else {
600 mod_timer(&shdlc->connect_timer, jiffies +
601 msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
602
603 shdlc->state = SHDLC_NEGOCIATING;
604 }
605 break;
606 case SHDLC_NEGOCIATING:
607 if (timer_pending(&shdlc->connect_timer) == 0) {
608 shdlc->state = SHDLC_CONNECTING;
609 queue_work(shdlc->sm_wq, &shdlc->sm_work);
610 }
611
612 nfc_shdlc_handle_rcv_queue(shdlc);
613 break;
614 case SHDLC_CONNECTED:
615 nfc_shdlc_handle_rcv_queue(shdlc);
616 nfc_shdlc_handle_send_queue(shdlc);
617
618 if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
619 pr_debug
620 ("Handle T1(send ack) elapsed (T1 now inactive)\n");
621
622 shdlc->t1_active = false;
623 r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
624 shdlc->nr);
625 if (r < 0)
626 shdlc->hard_fault = r;
627 }
628
629 if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
630 pr_debug
631 ("Handle T2(retransmit) elapsed (T2 inactive)\n");
632
633 shdlc->t2_active = false;
634
635 nfc_shdlc_requeue_ack_pending(shdlc);
636 nfc_shdlc_handle_send_queue(shdlc);
637 }
638
639 if (shdlc->hard_fault) {
640 /*
641 * TODO: Handle hard_fault that occured during
642 * this invocation of the shdlc worker
643 */
644 }
645 break;
646 default:
647 break;
648 }
649 mutex_unlock(&shdlc->state_mutex);
650}
651
652/*
653 * Called from syscall context to establish shdlc link. Sleeps until
654 * link is ready or failure.
655 */
656static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
657{
658 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
659
660 pr_debug("\n");
661
662 mutex_lock(&shdlc->state_mutex);
663
664 shdlc->state = SHDLC_CONNECTING;
665 shdlc->connect_wq = &connect_wq;
666 shdlc->connect_tries = 0;
667 shdlc->connect_result = 1;
668
669 mutex_unlock(&shdlc->state_mutex);
670
671 queue_work(shdlc->sm_wq, &shdlc->sm_work);
672
673 wait_event(connect_wq, shdlc->connect_result != 1);
674
675 return shdlc->connect_result;
676}
677
678static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
679{
680 pr_debug("\n");
681
682 mutex_lock(&shdlc->state_mutex);
683
684 shdlc->state = SHDLC_DISCONNECTED;
685
686 mutex_unlock(&shdlc->state_mutex);
687
688 queue_work(shdlc->sm_wq, &shdlc->sm_work);
689}
690
691/*
692 * Receive an incoming shdlc frame. Frame has already been crc-validated.
693 * skb contains only LLC header and payload.
694 * If skb == NULL, it is a notification that the link below is dead.
695 */
696void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb)
697{
698 if (skb == NULL) {
699 pr_err("NULL Frame -> link is dead\n");
700 shdlc->hard_fault = -EREMOTEIO;
701 } else {
702 SHDLC_DUMP_SKB("incoming frame", skb);
703 skb_queue_tail(&shdlc->rcv_q, skb);
704 }
705
706 queue_work(shdlc->sm_wq, &shdlc->sm_work);
707}
708EXPORT_SYMBOL(nfc_shdlc_recv_frame);
709
710static int nfc_shdlc_open(struct nfc_hci_dev *hdev)
711{
712 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
713 int r;
714
715 pr_debug("\n");
716
717 if (shdlc->ops->open) {
718 r = shdlc->ops->open(shdlc);
719 if (r < 0)
720 return r;
721 }
722
723 r = nfc_shdlc_connect(shdlc);
724 if (r < 0 && shdlc->ops->close)
725 shdlc->ops->close(shdlc);
726
727 return r;
728}
729
730static void nfc_shdlc_close(struct nfc_hci_dev *hdev)
731{
732 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
733
734 pr_debug("\n");
735
736 nfc_shdlc_disconnect(shdlc);
737
738 if (shdlc->ops->close)
739 shdlc->ops->close(shdlc);
740}
741
742static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev)
743{
744 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
745 int r = 0;
746
747 pr_debug("\n");
748
749 if (shdlc->ops->hci_ready)
750 r = shdlc->ops->hci_ready(shdlc);
751
752 return r;
753}
754
755static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
756{
757 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
758
759 SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb);
760
761 skb_queue_tail(&shdlc->send_q, skb);
762
763 queue_work(shdlc->sm_wq, &shdlc->sm_work);
764
765 return 0;
766}
767
768static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols)
769{
770 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
771
772 pr_debug("\n");
773
774 if (shdlc->ops->start_poll)
775 return shdlc->ops->start_poll(shdlc, protocols);
776
777 return 0;
778}
779
780static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
781 struct nfc_target *target)
782{
783 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
784
785 if (shdlc->ops->target_from_gate)
786 return shdlc->ops->target_from_gate(shdlc, gate, target);
787
788 return -EPERM;
789}
790
791static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev,
792 u8 gate,
793 struct nfc_target *target)
794{
795 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
796
797 pr_debug("\n");
798
799 if (shdlc->ops->complete_target_discovered)
800 return shdlc->ops->complete_target_discovered(shdlc, gate,
801 target);
802
803 return 0;
804}
805
806static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
807 struct nfc_target *target,
808 struct sk_buff *skb,
809 struct sk_buff **res_skb)
810{
811 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
812
813 if (shdlc->ops->data_exchange)
814 return shdlc->ops->data_exchange(shdlc, target, skb, res_skb);
815
816 return -EPERM;
817}
818
819static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
820 struct nfc_target *target)
821{
822 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
823
824 if (shdlc->ops->check_presence)
825 return shdlc->ops->check_presence(shdlc, target);
826
827 return 0;
828}
829
830static struct nfc_hci_ops shdlc_ops = {
831 .open = nfc_shdlc_open,
832 .close = nfc_shdlc_close,
833 .hci_ready = nfc_shdlc_hci_ready,
834 .xmit = nfc_shdlc_xmit,
835 .start_poll = nfc_shdlc_start_poll,
836 .target_from_gate = nfc_shdlc_target_from_gate,
837 .complete_target_discovered = nfc_shdlc_complete_target_discovered,
838 .data_exchange = nfc_shdlc_data_exchange,
839 .check_presence = nfc_shdlc_check_presence,
840};
841
842struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
843 struct nfc_hci_init_data *init_data,
844 u32 protocols,
845 int tx_headroom, int tx_tailroom,
846 int max_link_payload, const char *devname)
847{
848 struct nfc_shdlc *shdlc;
849 int r;
850 char name[32];
851
852 if (ops->xmit == NULL)
853 return NULL;
854
855 shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL);
856 if (shdlc == NULL)
857 return NULL;
858
859 mutex_init(&shdlc->state_mutex);
860 shdlc->ops = ops;
861 shdlc->state = SHDLC_DISCONNECTED;
862
863 init_timer(&shdlc->connect_timer);
864 shdlc->connect_timer.data = (unsigned long)shdlc;
865 shdlc->connect_timer.function = nfc_shdlc_connect_timeout;
866
867 init_timer(&shdlc->t1_timer);
868 shdlc->t1_timer.data = (unsigned long)shdlc;
869 shdlc->t1_timer.function = nfc_shdlc_t1_timeout;
870
871 init_timer(&shdlc->t2_timer);
872 shdlc->t2_timer.data = (unsigned long)shdlc;
873 shdlc->t2_timer.function = nfc_shdlc_t2_timeout;
874
875 shdlc->w = SHDLC_MAX_WINDOW;
876 shdlc->srej_support = SHDLC_SREJ_SUPPORT;
877
878 skb_queue_head_init(&shdlc->rcv_q);
879 skb_queue_head_init(&shdlc->send_q);
880 skb_queue_head_init(&shdlc->ack_pending_q);
881
882 INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work);
883 snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname);
884 shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
885 WQ_MEM_RECLAIM, 1);
886 if (shdlc->sm_wq == NULL)
887 goto err_allocwq;
888
889 shdlc->client_headroom = tx_headroom;
890 shdlc->client_tailroom = tx_tailroom;
891
892 shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols,
893 tx_headroom + SHDLC_LLC_HEAD_ROOM,
894 tx_tailroom + SHDLC_LLC_TAIL_ROOM,
895 max_link_payload);
896 if (shdlc->hdev == NULL)
897 goto err_allocdev;
898
899 nfc_hci_set_clientdata(shdlc->hdev, shdlc);
900
901 r = nfc_hci_register_device(shdlc->hdev);
902 if (r < 0)
903 goto err_regdev;
904
905 return shdlc;
906
907err_regdev:
908 nfc_hci_free_device(shdlc->hdev);
909
910err_allocdev:
911 destroy_workqueue(shdlc->sm_wq);
912
913err_allocwq:
914 kfree(shdlc);
915
916 return NULL;
917}
918EXPORT_SYMBOL(nfc_shdlc_allocate);
919
920void nfc_shdlc_free(struct nfc_shdlc *shdlc)
921{
922 pr_debug("\n");
923
924 /* TODO: Check that this cannot be called while still in use */
925
926 nfc_hci_unregister_device(shdlc->hdev);
927 nfc_hci_free_device(shdlc->hdev);
928
929 destroy_workqueue(shdlc->sm_wq);
930
931 skb_queue_purge(&shdlc->rcv_q);
932 skb_queue_purge(&shdlc->send_q);
933 skb_queue_purge(&shdlc->ack_pending_q);
934
935 kfree(shdlc);
936}
937EXPORT_SYMBOL(nfc_shdlc_free);
938
939void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata)
940{
941 pr_debug("\n");
942
943 shdlc->clientdata = clientdata;
944}
945EXPORT_SYMBOL(nfc_shdlc_set_clientdata);
946
947void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc)
948{
949 return shdlc->clientdata;
950}
951EXPORT_SYMBOL(nfc_shdlc_get_clientdata);
952
953struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc)
954{
955 return shdlc->hdev;
956}
957EXPORT_SYMBOL(nfc_shdlc_get_hci_dev);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 151f2ef429c4..bf8ae4f0b90c 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -102,7 +102,7 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
102 length = llcp_tlv_length[type]; 102 length = llcp_tlv_length[type];
103 if (length == 0 && value_length == 0) 103 if (length == 0 && value_length == 0)
104 return NULL; 104 return NULL;
105 else 105 else if (length == 0)
106 length = value_length; 106 length = value_length;
107 107
108 *tlv_length = 2 + length; 108 *tlv_length = 2 + length;
@@ -118,7 +118,7 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
118} 118}
119 119
120int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, 120int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
121 u8 *tlv_array, u16 tlv_array_len) 121 u8 *tlv_array, u16 tlv_array_len)
122{ 122{
123 u8 *tlv = tlv_array, type, length, offset = 0; 123 u8 *tlv = tlv_array, type, length, offset = 0;
124 124
@@ -152,6 +152,8 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
152 case LLCP_TLV_RW: 152 case LLCP_TLV_RW:
153 local->remote_rw = llcp_tlv_rw(tlv); 153 local->remote_rw = llcp_tlv_rw(tlv);
154 break; 154 break;
155 case LLCP_TLV_SN:
156 break;
155 default: 157 default:
156 pr_err("Invalid gt tlv value 0x%x\n", type); 158 pr_err("Invalid gt tlv value 0x%x\n", type);
157 break; 159 break;
@@ -162,15 +164,15 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
162 } 164 }
163 165
164 pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n", 166 pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n",
165 local->remote_version, local->remote_miu, 167 local->remote_version, local->remote_miu,
166 local->remote_lto, local->remote_opt, 168 local->remote_lto, local->remote_opt,
167 local->remote_wks, local->remote_rw); 169 local->remote_wks, local->remote_rw);
168 170
169 return 0; 171 return 0;
170} 172}
171 173
172static struct sk_buff *llcp_add_header(struct sk_buff *pdu, 174static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
173 u8 dsap, u8 ssap, u8 ptype) 175 u8 dsap, u8 ssap, u8 ptype)
174{ 176{
175 u8 header[2]; 177 u8 header[2];
176 178
@@ -186,7 +188,8 @@ static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
186 return pdu; 188 return pdu;
187} 189}
188 190
189static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length) 191static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv,
192 u8 tlv_length)
190{ 193{
191 /* XXX Add an skb length check */ 194 /* XXX Add an skb length check */
192 195
@@ -199,7 +202,7 @@ static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length)
199} 202}
200 203
201static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock, 204static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
202 u8 cmd, u16 size) 205 u8 cmd, u16 size)
203{ 206{
204 struct sk_buff *skb; 207 struct sk_buff *skb;
205 int err; 208 int err;
@@ -208,7 +211,7 @@ static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
208 return NULL; 211 return NULL;
209 212
210 skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, 213 skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
211 size + LLCP_HEADER_SIZE, &err); 214 size + LLCP_HEADER_SIZE, &err);
212 if (skb == NULL) { 215 if (skb == NULL) {
213 pr_err("Could not allocate PDU\n"); 216 pr_err("Could not allocate PDU\n");
214 return NULL; 217 return NULL;
@@ -245,7 +248,7 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
245 248
246 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); 249 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
247 250
248 skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC); 251 skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC);
249 252
250 skb_queue_tail(&local->tx_queue, skb); 253 skb_queue_tail(&local->tx_queue, skb);
251 254
@@ -276,7 +279,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
276 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM); 279 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
277 280
278 return nfc_data_exchange(dev, local->target_idx, skb, 281 return nfc_data_exchange(dev, local->target_idx, skb,
279 nfc_llcp_recv, local); 282 nfc_llcp_recv, local);
280} 283}
281 284
282int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) 285int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
@@ -284,6 +287,9 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
284 struct nfc_llcp_local *local; 287 struct nfc_llcp_local *local;
285 struct sk_buff *skb; 288 struct sk_buff *skb;
286 u8 *service_name_tlv = NULL, service_name_tlv_length; 289 u8 *service_name_tlv = NULL, service_name_tlv_length;
290 u8 *miux_tlv = NULL, miux_tlv_length;
291 u8 *rw_tlv = NULL, rw_tlv_length, rw;
292 __be16 miux;
287 int err; 293 int err;
288 u16 size = 0; 294 u16 size = 0;
289 295
@@ -295,12 +301,21 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
295 301
296 if (sock->service_name != NULL) { 302 if (sock->service_name != NULL) {
297 service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN, 303 service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN,
298 sock->service_name, 304 sock->service_name,
299 sock->service_name_len, 305 sock->service_name_len,
300 &service_name_tlv_length); 306 &service_name_tlv_length);
301 size += service_name_tlv_length; 307 size += service_name_tlv_length;
302 } 308 }
303 309
310 miux = cpu_to_be16(LLCP_MAX_MIUX);
311 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
312 &miux_tlv_length);
313 size += miux_tlv_length;
314
315 rw = LLCP_MAX_RW;
316 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
317 size += rw_tlv_length;
318
304 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); 319 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
305 320
306 skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size); 321 skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size);
@@ -311,7 +326,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
311 326
312 if (service_name_tlv != NULL) 327 if (service_name_tlv != NULL)
313 skb = llcp_add_tlv(skb, service_name_tlv, 328 skb = llcp_add_tlv(skb, service_name_tlv,
314 service_name_tlv_length); 329 service_name_tlv_length);
330
331 skb = llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
332 skb = llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
315 333
316 skb_queue_tail(&local->tx_queue, skb); 334 skb_queue_tail(&local->tx_queue, skb);
317 335
@@ -321,6 +339,8 @@ error_tlv:
321 pr_err("error %d\n", err); 339 pr_err("error %d\n", err);
322 340
323 kfree(service_name_tlv); 341 kfree(service_name_tlv);
342 kfree(miux_tlv);
343 kfree(rw_tlv);
324 344
325 return err; 345 return err;
326} 346}
@@ -329,6 +349,11 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
329{ 349{
330 struct nfc_llcp_local *local; 350 struct nfc_llcp_local *local;
331 struct sk_buff *skb; 351 struct sk_buff *skb;
352 u8 *miux_tlv = NULL, miux_tlv_length;
353 u8 *rw_tlv = NULL, rw_tlv_length, rw;
354 __be16 miux;
355 int err;
356 u16 size = 0;
332 357
333 pr_debug("Sending CC\n"); 358 pr_debug("Sending CC\n");
334 359
@@ -336,13 +361,35 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
336 if (local == NULL) 361 if (local == NULL)
337 return -ENODEV; 362 return -ENODEV;
338 363
339 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, 0); 364 miux = cpu_to_be16(LLCP_MAX_MIUX);
340 if (skb == NULL) 365 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
341 return -ENOMEM; 366 &miux_tlv_length);
367 size += miux_tlv_length;
368
369 rw = LLCP_MAX_RW;
370 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
371 size += rw_tlv_length;
372
373 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
374 if (skb == NULL) {
375 err = -ENOMEM;
376 goto error_tlv;
377 }
378
379 skb = llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
380 skb = llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
342 381
343 skb_queue_tail(&local->tx_queue, skb); 382 skb_queue_tail(&local->tx_queue, skb);
344 383
345 return 0; 384 return 0;
385
386error_tlv:
387 pr_err("error %d\n", err);
388
389 kfree(miux_tlv);
390 kfree(rw_tlv);
391
392 return err;
346} 393}
347 394
348int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) 395int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
@@ -369,7 +416,7 @@ int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
369 416
370 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); 417 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
371 418
372 skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM); 419 skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM);
373 420
374 memcpy(skb_put(skb, 1), &reason, 1); 421 memcpy(skb_put(skb, 1), &reason, 1);
375 422
@@ -397,3 +444,87 @@ int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
397 444
398 return 0; 445 return 0;
399} 446}
447
448int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
449 struct msghdr *msg, size_t len)
450{
451 struct sk_buff *pdu;
452 struct sock *sk = &sock->sk;
453 struct nfc_llcp_local *local;
454 size_t frag_len = 0, remaining_len;
455 u8 *msg_data, *msg_ptr;
456
457 pr_debug("Send I frame len %zd\n", len);
458
459 local = sock->local;
460 if (local == NULL)
461 return -ENODEV;
462
463 msg_data = kzalloc(len, GFP_KERNEL);
464 if (msg_data == NULL)
465 return -ENOMEM;
466
467 if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
468 kfree(msg_data);
469 return -EFAULT;
470 }
471
472 remaining_len = len;
473 msg_ptr = msg_data;
474
475 while (remaining_len > 0) {
476
477 frag_len = min_t(size_t, local->remote_miu, remaining_len);
478
479 pr_debug("Fragment %zd bytes remaining %zd",
480 frag_len, remaining_len);
481
482 pdu = llcp_allocate_pdu(sock, LLCP_PDU_I,
483 frag_len + LLCP_SEQUENCE_SIZE);
484 if (pdu == NULL)
485 return -ENOMEM;
486
487 skb_put(pdu, LLCP_SEQUENCE_SIZE);
488
489 memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
490
491 skb_queue_tail(&sock->tx_queue, pdu);
492
493 lock_sock(sk);
494
495 nfc_llcp_queue_i_frames(sock);
496
497 release_sock(sk);
498
499 remaining_len -= frag_len;
500 msg_ptr += frag_len;
501 }
502
503 kfree(msg_data);
504
505 return len;
506}
507
508int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
509{
510 struct sk_buff *skb;
511 struct nfc_llcp_local *local;
512
513 pr_debug("Send rr nr %d\n", sock->recv_n);
514
515 local = sock->local;
516 if (local == NULL)
517 return -ENODEV;
518
519 skb = llcp_allocate_pdu(sock, LLCP_PDU_RR, LLCP_SEQUENCE_SIZE);
520 if (skb == NULL)
521 return -ENOMEM;
522
523 skb_put(skb, LLCP_SEQUENCE_SIZE);
524
525 skb->data[2] = sock->recv_n;
526
527 skb_queue_head(&local->tx_queue, skb);
528
529 return 0;
530}
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 1d32680807d6..42994fac26d6 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -37,7 +37,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
37 struct sock *sk, *parent_sk; 37 struct sock *sk, *parent_sk;
38 int i; 38 int i;
39 39
40
41 mutex_lock(&local->socket_lock); 40 mutex_lock(&local->socket_lock);
42 41
43 for (i = 0; i < LLCP_MAX_SAP; i++) { 42 for (i = 0; i < LLCP_MAX_SAP; i++) {
@@ -47,7 +46,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
47 46
48 /* Release all child sockets */ 47 /* Release all child sockets */
49 list_for_each_entry_safe(s, n, &parent->list, list) { 48 list_for_each_entry_safe(s, n, &parent->list, list) {
50 list_del(&s->list); 49 list_del_init(&s->list);
51 sk = &s->sk; 50 sk = &s->sk;
52 51
53 lock_sock(sk); 52 lock_sock(sk);
@@ -56,9 +55,12 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
56 nfc_put_device(s->dev); 55 nfc_put_device(s->dev);
57 56
58 sk->sk_state = LLCP_CLOSED; 57 sk->sk_state = LLCP_CLOSED;
59 sock_set_flag(sk, SOCK_DEAD);
60 58
61 release_sock(sk); 59 release_sock(sk);
60
61 sock_orphan(sk);
62
63 s->local = NULL;
62 } 64 }
63 65
64 parent_sk = &parent->sk; 66 parent_sk = &parent->sk;
@@ -70,18 +72,19 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
70 struct sock *accept_sk; 72 struct sock *accept_sk;
71 73
72 list_for_each_entry_safe(lsk, n, &parent->accept_queue, 74 list_for_each_entry_safe(lsk, n, &parent->accept_queue,
73 accept_queue) { 75 accept_queue) {
74 accept_sk = &lsk->sk; 76 accept_sk = &lsk->sk;
75 lock_sock(accept_sk); 77 lock_sock(accept_sk);
76 78
77 nfc_llcp_accept_unlink(accept_sk); 79 nfc_llcp_accept_unlink(accept_sk);
78 80
79 accept_sk->sk_state = LLCP_CLOSED; 81 accept_sk->sk_state = LLCP_CLOSED;
80 sock_set_flag(accept_sk, SOCK_DEAD);
81 82
82 release_sock(accept_sk); 83 release_sock(accept_sk);
83 84
84 sock_orphan(accept_sk); 85 sock_orphan(accept_sk);
86
87 lsk->local = NULL;
85 } 88 }
86 } 89 }
87 90
@@ -89,18 +92,32 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
89 nfc_put_device(parent->dev); 92 nfc_put_device(parent->dev);
90 93
91 parent_sk->sk_state = LLCP_CLOSED; 94 parent_sk->sk_state = LLCP_CLOSED;
92 sock_set_flag(parent_sk, SOCK_DEAD);
93 95
94 release_sock(parent_sk); 96 release_sock(parent_sk);
97
98 sock_orphan(parent_sk);
99
100 parent->local = NULL;
95 } 101 }
96 102
97 mutex_unlock(&local->socket_lock); 103 mutex_unlock(&local->socket_lock);
98} 104}
99 105
106static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local)
107{
108 mutex_lock(&local->sdp_lock);
109
110 local->local_wks = 0;
111 local->local_sdp = 0;
112 local->local_sap = 0;
113
114 mutex_unlock(&local->sdp_lock);
115}
116
100static void nfc_llcp_timeout_work(struct work_struct *work) 117static void nfc_llcp_timeout_work(struct work_struct *work)
101{ 118{
102 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, 119 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
103 timeout_work); 120 timeout_work);
104 121
105 nfc_dep_link_down(local->dev); 122 nfc_dep_link_down(local->dev);
106} 123}
@@ -146,7 +163,7 @@ static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
146 163
147 num_wks = ARRAY_SIZE(wks); 164 num_wks = ARRAY_SIZE(wks);
148 165
149 for (sap = 0 ; sap < num_wks; sap++) { 166 for (sap = 0; sap < num_wks; sap++) {
150 if (wks[sap] == NULL) 167 if (wks[sap] == NULL)
151 continue; 168 continue;
152 169
@@ -158,13 +175,13 @@ static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
158} 175}
159 176
160u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, 177u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
161 struct nfc_llcp_sock *sock) 178 struct nfc_llcp_sock *sock)
162{ 179{
163 mutex_lock(&local->sdp_lock); 180 mutex_lock(&local->sdp_lock);
164 181
165 if (sock->service_name != NULL && sock->service_name_len > 0) { 182 if (sock->service_name != NULL && sock->service_name_len > 0) {
166 int ssap = nfc_llcp_wks_sap(sock->service_name, 183 int ssap = nfc_llcp_wks_sap(sock->service_name,
167 sock->service_name_len); 184 sock->service_name_len);
168 185
169 if (ssap > 0) { 186 if (ssap > 0) {
170 pr_debug("WKS %d\n", ssap); 187 pr_debug("WKS %d\n", ssap);
@@ -176,7 +193,7 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
176 return LLCP_SAP_MAX; 193 return LLCP_SAP_MAX;
177 } 194 }
178 195
179 set_bit(BIT(ssap), &local->local_wks); 196 set_bit(ssap, &local->local_wks);
180 mutex_unlock(&local->sdp_lock); 197 mutex_unlock(&local->sdp_lock);
181 198
182 return ssap; 199 return ssap;
@@ -195,25 +212,25 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
195 212
196 pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap); 213 pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
197 214
198 set_bit(BIT(ssap), &local->local_sdp); 215 set_bit(ssap, &local->local_sdp);
199 mutex_unlock(&local->sdp_lock); 216 mutex_unlock(&local->sdp_lock);
200 217
201 return LLCP_WKS_NUM_SAP + ssap; 218 return LLCP_WKS_NUM_SAP + ssap;
202 219
203 } else if (sock->ssap != 0) { 220 } else if (sock->ssap != 0) {
204 if (sock->ssap < LLCP_WKS_NUM_SAP) { 221 if (sock->ssap < LLCP_WKS_NUM_SAP) {
205 if (!(local->local_wks & BIT(sock->ssap))) { 222 if (!test_bit(sock->ssap, &local->local_wks)) {
206 set_bit(BIT(sock->ssap), &local->local_wks); 223 set_bit(sock->ssap, &local->local_wks);
207 mutex_unlock(&local->sdp_lock); 224 mutex_unlock(&local->sdp_lock);
208 225
209 return sock->ssap; 226 return sock->ssap;
210 } 227 }
211 228
212 } else if (sock->ssap < LLCP_SDP_NUM_SAP) { 229 } else if (sock->ssap < LLCP_SDP_NUM_SAP) {
213 if (!(local->local_sdp & 230 if (!test_bit(sock->ssap - LLCP_WKS_NUM_SAP,
214 BIT(sock->ssap - LLCP_WKS_NUM_SAP))) { 231 &local->local_sdp)) {
215 set_bit(BIT(sock->ssap - LLCP_WKS_NUM_SAP), 232 set_bit(sock->ssap - LLCP_WKS_NUM_SAP,
216 &local->local_sdp); 233 &local->local_sdp);
217 mutex_unlock(&local->sdp_lock); 234 mutex_unlock(&local->sdp_lock);
218 235
219 return sock->ssap; 236 return sock->ssap;
@@ -238,7 +255,7 @@ u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local)
238 return LLCP_SAP_MAX; 255 return LLCP_SAP_MAX;
239 } 256 }
240 257
241 set_bit(BIT(local_ssap), &local->local_sap); 258 set_bit(local_ssap, &local->local_sap);
242 259
243 mutex_unlock(&local->sdp_lock); 260 mutex_unlock(&local->sdp_lock);
244 261
@@ -265,12 +282,12 @@ void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
265 282
266 mutex_lock(&local->sdp_lock); 283 mutex_lock(&local->sdp_lock);
267 284
268 clear_bit(1 << local_ssap, sdp); 285 clear_bit(local_ssap, sdp);
269 286
270 mutex_unlock(&local->sdp_lock); 287 mutex_unlock(&local->sdp_lock);
271} 288}
272 289
273u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len) 290u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
274{ 291{
275 struct nfc_llcp_local *local; 292 struct nfc_llcp_local *local;
276 293
@@ -290,23 +307,30 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
290 u8 *gb_cur, *version_tlv, version, version_length; 307 u8 *gb_cur, *version_tlv, version, version_length;
291 u8 *lto_tlv, lto, lto_length; 308 u8 *lto_tlv, lto, lto_length;
292 u8 *wks_tlv, wks_length; 309 u8 *wks_tlv, wks_length;
310 u8 *miux_tlv, miux_length;
311 __be16 miux;
293 u8 gb_len = 0; 312 u8 gb_len = 0;
294 313
295 version = LLCP_VERSION_11; 314 version = LLCP_VERSION_11;
296 version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version, 315 version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
297 1, &version_length); 316 1, &version_length);
298 gb_len += version_length; 317 gb_len += version_length;
299 318
300 /* 1500 ms */ 319 /* 1500 ms */
301 lto = 150; 320 lto = 150;
302 lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &lto, 1, &lto_length); 321 lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &lto, 1, &lto_length);
303 gb_len += lto_length; 322 gb_len += lto_length;
304 323
305 pr_debug("Local wks 0x%lx\n", local->local_wks); 324 pr_debug("Local wks 0x%lx\n", local->local_wks);
306 wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2, 325 wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2,
307 &wks_length); 326 &wks_length);
308 gb_len += wks_length; 327 gb_len += wks_length;
309 328
329 miux = cpu_to_be16(LLCP_MAX_MIUX);
330 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
331 &miux_length);
332 gb_len += miux_length;
333
310 gb_len += ARRAY_SIZE(llcp_magic); 334 gb_len += ARRAY_SIZE(llcp_magic);
311 335
312 if (gb_len > NFC_MAX_GT_LEN) { 336 if (gb_len > NFC_MAX_GT_LEN) {
@@ -328,6 +352,9 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
328 memcpy(gb_cur, wks_tlv, wks_length); 352 memcpy(gb_cur, wks_tlv, wks_length);
329 gb_cur += wks_length; 353 gb_cur += wks_length;
330 354
355 memcpy(gb_cur, miux_tlv, miux_length);
356 gb_cur += miux_length;
357
331 kfree(version_tlv); 358 kfree(version_tlv);
332 kfree(lto_tlv); 359 kfree(lto_tlv);
333 360
@@ -349,8 +376,7 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
349 memcpy(local->remote_gb, gb, gb_len); 376 memcpy(local->remote_gb, gb, gb_len);
350 local->remote_gb_len = gb_len; 377 local->remote_gb_len = gb_len;
351 378
352 if (local->remote_gb == NULL || 379 if (local->remote_gb == NULL || local->remote_gb_len == 0)
353 local->remote_gb_len == 0)
354 return -ENODEV; 380 return -ENODEV;
355 381
356 if (memcmp(local->remote_gb, llcp_magic, 3)) { 382 if (memcmp(local->remote_gb, llcp_magic, 3)) {
@@ -359,26 +385,30 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
359 } 385 }
360 386
361 return nfc_llcp_parse_tlv(local, 387 return nfc_llcp_parse_tlv(local,
362 &local->remote_gb[3], local->remote_gb_len - 3); 388 &local->remote_gb[3],
389 local->remote_gb_len - 3);
363} 390}
364 391
365static void nfc_llcp_tx_work(struct work_struct *work) 392static void nfc_llcp_tx_work(struct work_struct *work)
366{ 393{
367 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, 394 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
368 tx_work); 395 tx_work);
369 struct sk_buff *skb; 396 struct sk_buff *skb;
370 397
371 skb = skb_dequeue(&local->tx_queue); 398 skb = skb_dequeue(&local->tx_queue);
372 if (skb != NULL) { 399 if (skb != NULL) {
373 pr_debug("Sending pending skb\n"); 400 pr_debug("Sending pending skb\n");
401 print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET,
402 16, 1, skb->data, skb->len, true);
403
374 nfc_data_exchange(local->dev, local->target_idx, 404 nfc_data_exchange(local->dev, local->target_idx,
375 skb, nfc_llcp_recv, local); 405 skb, nfc_llcp_recv, local);
376 } else { 406 } else {
377 nfc_llcp_send_symm(local->dev); 407 nfc_llcp_send_symm(local->dev);
378 } 408 }
379 409
380 mod_timer(&local->link_timer, 410 mod_timer(&local->link_timer,
381 jiffies + msecs_to_jiffies(local->remote_lto)); 411 jiffies + msecs_to_jiffies(local->remote_lto));
382} 412}
383 413
384static u8 nfc_llcp_dsap(struct sk_buff *pdu) 414static u8 nfc_llcp_dsap(struct sk_buff *pdu)
@@ -408,16 +438,18 @@ static u8 nfc_llcp_nr(struct sk_buff *pdu)
408 438
409static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) 439static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
410{ 440{
411 pdu->data[2] = (sock->send_n << 4) | ((sock->recv_n - 1) % 16); 441 pdu->data[2] = (sock->send_n << 4) | (sock->recv_n);
412 sock->send_n = (sock->send_n + 1) % 16; 442 sock->send_n = (sock->send_n + 1) % 16;
413 sock->recv_ack_n = (sock->recv_n - 1) % 16; 443 sock->recv_ack_n = (sock->recv_n - 1) % 16;
414} 444}
415 445
416static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, 446static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
417 u8 ssap, u8 dsap) 447 u8 ssap, u8 dsap)
418{ 448{
419 struct nfc_llcp_sock *sock, *llcp_sock, *n; 449 struct nfc_llcp_sock *sock, *llcp_sock, *n;
420 450
451 pr_debug("ssap dsap %d %d\n", ssap, dsap);
452
421 if (ssap == 0 && dsap == 0) 453 if (ssap == 0 && dsap == 0)
422 return NULL; 454 return NULL;
423 455
@@ -438,7 +470,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
438 470
439 list_for_each_entry_safe(llcp_sock, n, &sock->list, list) { 471 list_for_each_entry_safe(llcp_sock, n, &sock->list, list) {
440 pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock, 472 pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
441 &llcp_sock->sk, llcp_sock->dsap); 473 &llcp_sock->sk, llcp_sock->dsap);
442 if (llcp_sock->dsap == dsap) { 474 if (llcp_sock->dsap == dsap) {
443 sock_hold(&llcp_sock->sk); 475 sock_hold(&llcp_sock->sk);
444 mutex_unlock(&local->socket_lock); 476 mutex_unlock(&local->socket_lock);
@@ -482,7 +514,7 @@ static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
482} 514}
483 515
484static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, 516static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
485 struct sk_buff *skb) 517 struct sk_buff *skb)
486{ 518{
487 struct sock *new_sk, *parent; 519 struct sock *new_sk, *parent;
488 struct nfc_llcp_sock *sock, *new_sock; 520 struct nfc_llcp_sock *sock, *new_sock;
@@ -494,7 +526,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
494 pr_debug("%d %d\n", dsap, ssap); 526 pr_debug("%d %d\n", dsap, ssap);
495 527
496 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], 528 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
497 skb->len - LLCP_HEADER_SIZE); 529 skb->len - LLCP_HEADER_SIZE);
498 530
499 if (dsap != LLCP_SAP_SDP) { 531 if (dsap != LLCP_SAP_SDP) {
500 bound_sap = dsap; 532 bound_sap = dsap;
@@ -513,7 +545,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
513 lock_sock(&sock->sk); 545 lock_sock(&sock->sk);
514 546
515 if (sock->dsap == LLCP_SAP_SDP && 547 if (sock->dsap == LLCP_SAP_SDP &&
516 sock->sk.sk_state == LLCP_LISTEN) 548 sock->sk.sk_state == LLCP_LISTEN)
517 goto enqueue; 549 goto enqueue;
518 } else { 550 } else {
519 u8 *sn; 551 u8 *sn;
@@ -529,23 +561,23 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
529 561
530 mutex_lock(&local->socket_lock); 562 mutex_lock(&local->socket_lock);
531 for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET; 563 for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET;
532 bound_sap++) { 564 bound_sap++) {
533 sock = local->sockets[bound_sap]; 565 sock = local->sockets[bound_sap];
534 if (sock == NULL) 566 if (sock == NULL)
535 continue; 567 continue;
536 568
537 if (sock->service_name == NULL || 569 if (sock->service_name == NULL ||
538 sock->service_name_len == 0) 570 sock->service_name_len == 0)
539 continue; 571 continue;
540 572
541 if (sock->service_name_len != sn_len) 573 if (sock->service_name_len != sn_len)
542 continue; 574 continue;
543 575
544 if (sock->dsap == LLCP_SAP_SDP && 576 if (sock->dsap == LLCP_SAP_SDP &&
545 sock->sk.sk_state == LLCP_LISTEN && 577 sock->sk.sk_state == LLCP_LISTEN &&
546 !memcmp(sn, sock->service_name, sn_len)) { 578 !memcmp(sn, sock->service_name, sn_len)) {
547 pr_debug("Found service name at SAP %d\n", 579 pr_debug("Found service name at SAP %d\n",
548 bound_sap); 580 bound_sap);
549 sock_hold(&sock->sk); 581 sock_hold(&sock->sk);
550 mutex_unlock(&local->socket_lock); 582 mutex_unlock(&local->socket_lock);
551 583
@@ -570,8 +602,7 @@ enqueue:
570 goto fail; 602 goto fail;
571 } 603 }
572 604
573 new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, 605 new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
574 GFP_ATOMIC);
575 if (new_sk == NULL) { 606 if (new_sk == NULL) {
576 reason = LLCP_DM_REJ; 607 reason = LLCP_DM_REJ;
577 release_sock(&sock->sk); 608 release_sock(&sock->sk);
@@ -616,8 +647,39 @@ fail:
616 647
617} 648}
618 649
650int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
651{
652 int nr_frames = 0;
653 struct nfc_llcp_local *local = sock->local;
654
655 pr_debug("Remote ready %d tx queue len %d remote rw %d",
656 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
657 local->remote_rw);
658
659 /* Try to queue some I frames for transmission */
660 while (sock->remote_ready &&
661 skb_queue_len(&sock->tx_pending_queue) < local->remote_rw) {
662 struct sk_buff *pdu, *pending_pdu;
663
664 pdu = skb_dequeue(&sock->tx_queue);
665 if (pdu == NULL)
666 break;
667
668 /* Update N(S)/N(R) */
669 nfc_llcp_set_nrns(sock, pdu);
670
671 pending_pdu = skb_clone(pdu, GFP_KERNEL);
672
673 skb_queue_tail(&local->tx_queue, pdu);
674 skb_queue_tail(&sock->tx_pending_queue, pending_pdu);
675 nr_frames++;
676 }
677
678 return nr_frames;
679}
680
619static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, 681static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
620 struct sk_buff *skb) 682 struct sk_buff *skb)
621{ 683{
622 struct nfc_llcp_sock *llcp_sock; 684 struct nfc_llcp_sock *llcp_sock;
623 struct sock *sk; 685 struct sock *sk;
@@ -644,15 +706,15 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
644 nfc_llcp_sock_put(llcp_sock); 706 nfc_llcp_sock_put(llcp_sock);
645 } 707 }
646 708
647 if (ns == llcp_sock->recv_n)
648 llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
649 else
650 pr_err("Received out of sequence I PDU\n");
651
652 /* Pass the payload upstream */ 709 /* Pass the payload upstream */
653 if (ptype == LLCP_PDU_I) { 710 if (ptype == LLCP_PDU_I) {
654 pr_debug("I frame, queueing on %p\n", &llcp_sock->sk); 711 pr_debug("I frame, queueing on %p\n", &llcp_sock->sk);
655 712
713 if (ns == llcp_sock->recv_n)
714 llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
715 else
716 pr_err("Received out of sequence I PDU\n");
717
656 skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE); 718 skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
657 if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) { 719 if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
658 pr_err("receive queue is full\n"); 720 pr_err("receive queue is full\n");
@@ -673,30 +735,20 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
673 } 735 }
674 } 736 }
675 737
676 /* Queue some I frames for transmission */ 738 if (ptype == LLCP_PDU_RR)
677 while (llcp_sock->remote_ready && 739 llcp_sock->remote_ready = true;
678 skb_queue_len(&llcp_sock->tx_pending_queue) <= local->remote_rw) { 740 else if (ptype == LLCP_PDU_RNR)
679 struct sk_buff *pdu, *pending_pdu; 741 llcp_sock->remote_ready = false;
680
681 pdu = skb_dequeue(&llcp_sock->tx_queue);
682 if (pdu == NULL)
683 break;
684
685 /* Update N(S)/N(R) */
686 nfc_llcp_set_nrns(llcp_sock, pdu);
687 742
688 pending_pdu = skb_clone(pdu, GFP_KERNEL); 743 if (nfc_llcp_queue_i_frames(llcp_sock) == 0)
689 744 nfc_llcp_send_rr(llcp_sock);
690 skb_queue_tail(&local->tx_queue, pdu);
691 skb_queue_tail(&llcp_sock->tx_pending_queue, pending_pdu);
692 }
693 745
694 release_sock(sk); 746 release_sock(sk);
695 nfc_llcp_sock_put(llcp_sock); 747 nfc_llcp_sock_put(llcp_sock);
696} 748}
697 749
698static void nfc_llcp_recv_disc(struct nfc_llcp_local *local, 750static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
699 struct sk_buff *skb) 751 struct sk_buff *skb)
700{ 752{
701 struct nfc_llcp_sock *llcp_sock; 753 struct nfc_llcp_sock *llcp_sock;
702 struct sock *sk; 754 struct sock *sk;
@@ -718,7 +770,6 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
718 nfc_llcp_sock_put(llcp_sock); 770 nfc_llcp_sock_put(llcp_sock);
719 } 771 }
720 772
721
722 if (sk->sk_state == LLCP_CONNECTED) { 773 if (sk->sk_state == LLCP_CONNECTED) {
723 nfc_put_device(local->dev); 774 nfc_put_device(local->dev);
724 sk->sk_state = LLCP_CLOSED; 775 sk->sk_state = LLCP_CLOSED;
@@ -731,13 +782,12 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
731 nfc_llcp_sock_put(llcp_sock); 782 nfc_llcp_sock_put(llcp_sock);
732} 783}
733 784
734static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, 785static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
735 struct sk_buff *skb)
736{ 786{
737 struct nfc_llcp_sock *llcp_sock; 787 struct nfc_llcp_sock *llcp_sock;
788 struct sock *sk;
738 u8 dsap, ssap; 789 u8 dsap, ssap;
739 790
740
741 dsap = nfc_llcp_dsap(skb); 791 dsap = nfc_llcp_dsap(skb);
742 ssap = nfc_llcp_ssap(skb); 792 ssap = nfc_llcp_ssap(skb);
743 793
@@ -754,9 +804,13 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
754 } 804 }
755 805
756 llcp_sock->dsap = ssap; 806 llcp_sock->dsap = ssap;
807 sk = &llcp_sock->sk;
757 808
758 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], 809 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
759 skb->len - LLCP_HEADER_SIZE); 810 skb->len - LLCP_HEADER_SIZE);
811
812 sk->sk_state = LLCP_CONNECTED;
813 sk->sk_state_change(sk);
760 814
761 nfc_llcp_sock_put(llcp_sock); 815 nfc_llcp_sock_put(llcp_sock);
762} 816}
@@ -764,7 +818,7 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
764static void nfc_llcp_rx_work(struct work_struct *work) 818static void nfc_llcp_rx_work(struct work_struct *work)
765{ 819{
766 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, 820 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
767 rx_work); 821 rx_work);
768 u8 dsap, ssap, ptype; 822 u8 dsap, ssap, ptype;
769 struct sk_buff *skb; 823 struct sk_buff *skb;
770 824
@@ -780,6 +834,10 @@ static void nfc_llcp_rx_work(struct work_struct *work)
780 834
781 pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); 835 pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
782 836
837 if (ptype != LLCP_PDU_SYMM)
838 print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET,
839 16, 1, skb->data, skb->len, true);
840
783 switch (ptype) { 841 switch (ptype) {
784 case LLCP_PDU_SYMM: 842 case LLCP_PDU_SYMM:
785 pr_debug("SYMM\n"); 843 pr_debug("SYMM\n");
@@ -802,6 +860,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
802 860
803 case LLCP_PDU_I: 861 case LLCP_PDU_I:
804 case LLCP_PDU_RR: 862 case LLCP_PDU_RR:
863 case LLCP_PDU_RNR:
805 pr_debug("I frame\n"); 864 pr_debug("I frame\n");
806 nfc_llcp_recv_hdlc(local, skb); 865 nfc_llcp_recv_hdlc(local, skb);
807 break; 866 break;
@@ -821,7 +880,7 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
821 880
822 pr_debug("Received an LLCP PDU\n"); 881 pr_debug("Received an LLCP PDU\n");
823 if (err < 0) { 882 if (err < 0) {
824 pr_err("err %d", err); 883 pr_err("err %d\n", err);
825 return; 884 return;
826 } 885 }
827 886
@@ -840,6 +899,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
840 if (local == NULL) 899 if (local == NULL)
841 return; 900 return;
842 901
902 nfc_llcp_clear_sdp(local);
903
843 /* Close and purge all existing sockets */ 904 /* Close and purge all existing sockets */
844 nfc_llcp_socket_release(local); 905 nfc_llcp_socket_release(local);
845} 906}
@@ -865,7 +926,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
865 queue_work(local->tx_wq, &local->tx_work); 926 queue_work(local->tx_wq, &local->tx_work);
866 } else { 927 } else {
867 mod_timer(&local->link_timer, 928 mod_timer(&local->link_timer,
868 jiffies + msecs_to_jiffies(local->remote_lto)); 929 jiffies + msecs_to_jiffies(local->remote_lto));
869 } 930 }
870} 931}
871 932
@@ -891,8 +952,10 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
891 skb_queue_head_init(&local->tx_queue); 952 skb_queue_head_init(&local->tx_queue);
892 INIT_WORK(&local->tx_work, nfc_llcp_tx_work); 953 INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
893 snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev)); 954 snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
894 local->tx_wq = alloc_workqueue(name, 955 local->tx_wq =
895 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 956 alloc_workqueue(name,
957 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
958 1);
896 if (local->tx_wq == NULL) { 959 if (local->tx_wq == NULL) {
897 err = -ENOMEM; 960 err = -ENOMEM;
898 goto err_local; 961 goto err_local;
@@ -901,8 +964,10 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
901 local->rx_pending = NULL; 964 local->rx_pending = NULL;
902 INIT_WORK(&local->rx_work, nfc_llcp_rx_work); 965 INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
903 snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev)); 966 snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
904 local->rx_wq = alloc_workqueue(name, 967 local->rx_wq =
905 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 968 alloc_workqueue(name,
969 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
970 1);
906 if (local->rx_wq == NULL) { 971 if (local->rx_wq == NULL) {
907 err = -ENOMEM; 972 err = -ENOMEM;
908 goto err_tx_wq; 973 goto err_tx_wq;
@@ -910,8 +975,10 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
910 975
911 INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work); 976 INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
912 snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev)); 977 snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
913 local->timeout_wq = alloc_workqueue(name, 978 local->timeout_wq =
914 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 979 alloc_workqueue(name,
980 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
981 1);
915 if (local->timeout_wq == NULL) { 982 if (local->timeout_wq == NULL) {
916 err = -ENOMEM; 983 err = -ENOMEM;
917 goto err_rx_wq; 984 goto err_rx_wq;
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 0ad2e3361584..50680ce5ae43 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -28,6 +28,10 @@ enum llcp_state {
28#define LLCP_DEFAULT_RW 1 28#define LLCP_DEFAULT_RW 1
29#define LLCP_DEFAULT_MIU 128 29#define LLCP_DEFAULT_MIU 128
30 30
31#define LLCP_MAX_LTO 0xff
32#define LLCP_MAX_RW 15
33#define LLCP_MAX_MIUX 0x7ff
34
31#define LLCP_WKS_NUM_SAP 16 35#define LLCP_WKS_NUM_SAP 16
32#define LLCP_SDP_NUM_SAP 16 36#define LLCP_SDP_NUM_SAP 16
33#define LLCP_LOCAL_NUM_SAP 32 37#define LLCP_LOCAL_NUM_SAP 32
@@ -162,9 +166,10 @@ struct nfc_llcp_sock {
162 166
163struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); 167struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
164u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, 168u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
165 struct nfc_llcp_sock *sock); 169 struct nfc_llcp_sock *sock);
166u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local); 170u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
167void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap); 171void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
172int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock);
168 173
169/* Sock API */ 174/* Sock API */
170struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp); 175struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
@@ -175,7 +180,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
175 180
176/* TLV API */ 181/* TLV API */
177int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, 182int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
178 u8 *tlv_array, u16 tlv_array_len); 183 u8 *tlv_array, u16 tlv_array_len);
179 184
180/* Commands API */ 185/* Commands API */
181void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); 186void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
@@ -187,6 +192,9 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
187int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); 192int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
188int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason); 193int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
189int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock); 194int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
195int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
196 struct msghdr *msg, size_t len);
197int nfc_llcp_send_rr(struct nfc_llcp_sock *sock);
190 198
191/* Socket API */ 199/* Socket API */
192int __init nfc_llcp_sock_init(void); 200int __init nfc_llcp_sock_init(void);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index f738ccd535f1..3f339b19d140 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -27,6 +27,42 @@
27#include "../nfc.h" 27#include "../nfc.h"
28#include "llcp.h" 28#include "llcp.h"
29 29
30static int sock_wait_state(struct sock *sk, int state, unsigned long timeo)
31{
32 DECLARE_WAITQUEUE(wait, current);
33 int err = 0;
34
35 pr_debug("sk %p", sk);
36
37 add_wait_queue(sk_sleep(sk), &wait);
38 set_current_state(TASK_INTERRUPTIBLE);
39
40 while (sk->sk_state != state) {
41 if (!timeo) {
42 err = -EINPROGRESS;
43 break;
44 }
45
46 if (signal_pending(current)) {
47 err = sock_intr_errno(timeo);
48 break;
49 }
50
51 release_sock(sk);
52 timeo = schedule_timeout(timeo);
53 lock_sock(sk);
54 set_current_state(TASK_INTERRUPTIBLE);
55
56 err = sock_error(sk);
57 if (err)
58 break;
59 }
60
61 __set_current_state(TASK_RUNNING);
62 remove_wait_queue(sk_sleep(sk), &wait);
63 return err;
64}
65
30static struct proto llcp_sock_proto = { 66static struct proto llcp_sock_proto = {
31 .name = "NFC_LLCP", 67 .name = "NFC_LLCP",
32 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
@@ -78,9 +114,11 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
78 llcp_sock->local = local; 114 llcp_sock->local = local;
79 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; 115 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
80 llcp_sock->service_name_len = min_t(unsigned int, 116 llcp_sock->service_name_len = min_t(unsigned int,
81 llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME); 117 llcp_addr.service_name_len,
118 NFC_LLCP_MAX_SERVICE_NAME);
82 llcp_sock->service_name = kmemdup(llcp_addr.service_name, 119 llcp_sock->service_name = kmemdup(llcp_addr.service_name,
83 llcp_sock->service_name_len, GFP_KERNEL); 120 llcp_sock->service_name_len,
121 GFP_KERNEL);
84 122
85 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); 123 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
86 if (llcp_sock->ssap == LLCP_MAX_SAP) 124 if (llcp_sock->ssap == LLCP_MAX_SAP)
@@ -110,7 +148,7 @@ static int llcp_sock_listen(struct socket *sock, int backlog)
110 lock_sock(sk); 148 lock_sock(sk);
111 149
112 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) 150 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
113 || sk->sk_state != LLCP_BOUND) { 151 || sk->sk_state != LLCP_BOUND) {
114 ret = -EBADFD; 152 ret = -EBADFD;
115 goto error; 153 goto error;
116 } 154 }
@@ -149,13 +187,13 @@ void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk)
149 sock_hold(sk); 187 sock_hold(sk);
150 188
151 list_add_tail(&llcp_sock->accept_queue, 189 list_add_tail(&llcp_sock->accept_queue,
152 &llcp_sock_parent->accept_queue); 190 &llcp_sock_parent->accept_queue);
153 llcp_sock->parent = parent; 191 llcp_sock->parent = parent;
154 sk_acceptq_added(parent); 192 sk_acceptq_added(parent);
155} 193}
156 194
157struct sock *nfc_llcp_accept_dequeue(struct sock *parent, 195struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
158 struct socket *newsock) 196 struct socket *newsock)
159{ 197{
160 struct nfc_llcp_sock *lsk, *n, *llcp_parent; 198 struct nfc_llcp_sock *lsk, *n, *llcp_parent;
161 struct sock *sk; 199 struct sock *sk;
@@ -163,7 +201,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
163 llcp_parent = nfc_llcp_sock(parent); 201 llcp_parent = nfc_llcp_sock(parent);
164 202
165 list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue, 203 list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue,
166 accept_queue) { 204 accept_queue) {
167 sk = &lsk->sk; 205 sk = &lsk->sk;
168 lock_sock(sk); 206 lock_sock(sk);
169 207
@@ -192,7 +230,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
192} 230}
193 231
194static int llcp_sock_accept(struct socket *sock, struct socket *newsock, 232static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
195 int flags) 233 int flags)
196{ 234{
197 DECLARE_WAITQUEUE(wait, current); 235 DECLARE_WAITQUEUE(wait, current);
198 struct sock *sk = sock->sk, *new_sk; 236 struct sock *sk = sock->sk, *new_sk;
@@ -248,7 +286,7 @@ error:
248static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, 286static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
249 int *len, int peer) 287 int *len, int peer)
250{ 288{
251 struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *) addr; 289 struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *)addr;
252 struct sock *sk = sock->sk; 290 struct sock *sk = sock->sk;
253 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 291 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
254 292
@@ -262,7 +300,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
262 llcp_addr->ssap = llcp_sock->ssap; 300 llcp_addr->ssap = llcp_sock->ssap;
263 llcp_addr->service_name_len = llcp_sock->service_name_len; 301 llcp_addr->service_name_len = llcp_sock->service_name_len;
264 memcpy(llcp_addr->service_name, llcp_sock->service_name, 302 memcpy(llcp_addr->service_name, llcp_sock->service_name,
265 llcp_addr->service_name_len); 303 llcp_addr->service_name_len);
266 304
267 return 0; 305 return 0;
268} 306}
@@ -275,7 +313,7 @@ static inline unsigned int llcp_accept_poll(struct sock *parent)
275 parent_sock = nfc_llcp_sock(parent); 313 parent_sock = nfc_llcp_sock(parent);
276 314
277 list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue, 315 list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue,
278 accept_queue) { 316 accept_queue) {
279 sk = &llcp_sock->sk; 317 sk = &llcp_sock->sk;
280 318
281 if (sk->sk_state == LLCP_CONNECTED) 319 if (sk->sk_state == LLCP_CONNECTED)
@@ -286,7 +324,7 @@ static inline unsigned int llcp_accept_poll(struct sock *parent)
286} 324}
287 325
288static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, 326static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
289 poll_table *wait) 327 poll_table *wait)
290{ 328{
291 struct sock *sk = sock->sk; 329 struct sock *sk = sock->sk;
292 unsigned int mask = 0; 330 unsigned int mask = 0;
@@ -302,11 +340,24 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
302 mask |= POLLERR; 340 mask |= POLLERR;
303 341
304 if (!skb_queue_empty(&sk->sk_receive_queue)) 342 if (!skb_queue_empty(&sk->sk_receive_queue))
305 mask |= POLLIN; 343 mask |= POLLIN | POLLRDNORM;
306 344
307 if (sk->sk_state == LLCP_CLOSED) 345 if (sk->sk_state == LLCP_CLOSED)
308 mask |= POLLHUP; 346 mask |= POLLHUP;
309 347
348 if (sk->sk_shutdown & RCV_SHUTDOWN)
349 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
350
351 if (sk->sk_shutdown == SHUTDOWN_MASK)
352 mask |= POLLHUP;
353
354 if (sock_writeable(sk))
355 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
356 else
357 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
358
359 pr_debug("mask 0x%x\n", mask);
360
310 return mask; 361 return mask;
311} 362}
312 363
@@ -315,6 +366,7 @@ static int llcp_sock_release(struct socket *sock)
315 struct sock *sk = sock->sk; 366 struct sock *sk = sock->sk;
316 struct nfc_llcp_local *local; 367 struct nfc_llcp_local *local;
317 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 368 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
369 int err = 0;
318 370
319 if (!sk) 371 if (!sk)
320 return 0; 372 return 0;
@@ -322,25 +374,17 @@ static int llcp_sock_release(struct socket *sock)
322 pr_debug("%p\n", sk); 374 pr_debug("%p\n", sk);
323 375
324 local = llcp_sock->local; 376 local = llcp_sock->local;
325 if (local == NULL) 377 if (local == NULL) {
326 return -ENODEV; 378 err = -ENODEV;
379 goto out;
380 }
327 381
328 mutex_lock(&local->socket_lock); 382 mutex_lock(&local->socket_lock);
329 383
330 if (llcp_sock == local->sockets[llcp_sock->ssap]) { 384 if (llcp_sock == local->sockets[llcp_sock->ssap])
331 local->sockets[llcp_sock->ssap] = NULL; 385 local->sockets[llcp_sock->ssap] = NULL;
332 } else { 386 else
333 struct nfc_llcp_sock *parent, *s, *n; 387 list_del_init(&llcp_sock->list);
334
335 parent = local->sockets[llcp_sock->ssap];
336
337 list_for_each_entry_safe(s, n, &parent->list, list)
338 if (llcp_sock == s) {
339 list_del(&s->list);
340 break;
341 }
342
343 }
344 388
345 mutex_unlock(&local->socket_lock); 389 mutex_unlock(&local->socket_lock);
346 390
@@ -355,7 +399,7 @@ static int llcp_sock_release(struct socket *sock)
355 struct sock *accept_sk; 399 struct sock *accept_sk;
356 400
357 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, 401 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
358 accept_queue) { 402 accept_queue) {
359 accept_sk = &lsk->sk; 403 accept_sk = &lsk->sk;
360 lock_sock(accept_sk); 404 lock_sock(accept_sk);
361 405
@@ -364,31 +408,27 @@ static int llcp_sock_release(struct socket *sock)
364 408
365 release_sock(accept_sk); 409 release_sock(accept_sk);
366 410
367 sock_set_flag(sk, SOCK_DEAD);
368 sock_orphan(accept_sk); 411 sock_orphan(accept_sk);
369 sock_put(accept_sk);
370 } 412 }
371 } 413 }
372 414
373 /* Freeing the SAP */ 415 /* Freeing the SAP */
374 if ((sk->sk_state == LLCP_CONNECTED 416 if ((sk->sk_state == LLCP_CONNECTED
375 && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) || 417 && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
376 sk->sk_state == LLCP_BOUND || 418 sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN)
377 sk->sk_state == LLCP_LISTEN)
378 nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); 419 nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
379 420
380 sock_set_flag(sk, SOCK_DEAD);
381
382 release_sock(sk); 421 release_sock(sk);
383 422
423out:
384 sock_orphan(sk); 424 sock_orphan(sk);
385 sock_put(sk); 425 sock_put(sk);
386 426
387 return 0; 427 return err;
388} 428}
389 429
390static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, 430static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
391 int len, int flags) 431 int len, int flags)
392{ 432{
393 struct sock *sk = sock->sk; 433 struct sock *sk = sock->sk;
394 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 434 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -400,7 +440,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
400 pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); 440 pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
401 441
402 if (!addr || len < sizeof(struct sockaddr_nfc) || 442 if (!addr || len < sizeof(struct sockaddr_nfc) ||
403 addr->sa_family != AF_NFC) { 443 addr->sa_family != AF_NFC) {
404 pr_err("Invalid socket\n"); 444 pr_err("Invalid socket\n");
405 return -EINVAL; 445 return -EINVAL;
406 } 446 }
@@ -411,7 +451,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
411 } 451 }
412 452
413 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, 453 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx,
414 addr->target_idx, addr->nfc_protocol); 454 addr->target_idx, addr->nfc_protocol);
415 455
416 lock_sock(sk); 456 lock_sock(sk);
417 457
@@ -441,7 +481,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
441 device_unlock(&dev->dev); 481 device_unlock(&dev->dev);
442 482
443 if (local->rf_mode == NFC_RF_INITIATOR && 483 if (local->rf_mode == NFC_RF_INITIATOR &&
444 addr->target_idx != local->target_idx) { 484 addr->target_idx != local->target_idx) {
445 ret = -ENOLINK; 485 ret = -ENOLINK;
446 goto put_dev; 486 goto put_dev;
447 } 487 }
@@ -459,9 +499,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
459 llcp_sock->dsap = LLCP_SAP_SDP; 499 llcp_sock->dsap = LLCP_SAP_SDP;
460 llcp_sock->nfc_protocol = addr->nfc_protocol; 500 llcp_sock->nfc_protocol = addr->nfc_protocol;
461 llcp_sock->service_name_len = min_t(unsigned int, 501 llcp_sock->service_name_len = min_t(unsigned int,
462 addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME); 502 addr->service_name_len,
503 NFC_LLCP_MAX_SERVICE_NAME);
463 llcp_sock->service_name = kmemdup(addr->service_name, 504 llcp_sock->service_name = kmemdup(addr->service_name,
464 llcp_sock->service_name_len, GFP_KERNEL); 505 llcp_sock->service_name_len,
506 GFP_KERNEL);
465 507
466 local->sockets[llcp_sock->ssap] = llcp_sock; 508 local->sockets[llcp_sock->ssap] = llcp_sock;
467 509
@@ -469,9 +511,13 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
469 if (ret) 511 if (ret)
470 goto put_dev; 512 goto put_dev;
471 513
472 sk->sk_state = LLCP_CONNECTED; 514 ret = sock_wait_state(sk, LLCP_CONNECTED,
515 sock_sndtimeo(sk, flags & O_NONBLOCK));
516 if (ret)
517 goto put_dev;
473 518
474 release_sock(sk); 519 release_sock(sk);
520
475 return 0; 521 return 0;
476 522
477put_dev: 523put_dev:
@@ -482,6 +528,34 @@ error:
482 return ret; 528 return ret;
483} 529}
484 530
531static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
532 struct msghdr *msg, size_t len)
533{
534 struct sock *sk = sock->sk;
535 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
536 int ret;
537
538 pr_debug("sock %p sk %p", sock, sk);
539
540 ret = sock_error(sk);
541 if (ret)
542 return ret;
543
544 if (msg->msg_flags & MSG_OOB)
545 return -EOPNOTSUPP;
546
547 lock_sock(sk);
548
549 if (sk->sk_state != LLCP_CONNECTED) {
550 release_sock(sk);
551 return -ENOTCONN;
552 }
553
554 release_sock(sk);
555
556 return nfc_llcp_send_i_frame(llcp_sock, msg, len);
557}
558
485static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 559static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
486 struct msghdr *msg, size_t len, int flags) 560 struct msghdr *msg, size_t len, int flags)
487{ 561{
@@ -496,7 +570,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
496 lock_sock(sk); 570 lock_sock(sk);
497 571
498 if (sk->sk_state == LLCP_CLOSED && 572 if (sk->sk_state == LLCP_CLOSED &&
499 skb_queue_empty(&sk->sk_receive_queue)) { 573 skb_queue_empty(&sk->sk_receive_queue)) {
500 release_sock(sk); 574 release_sock(sk);
501 return 0; 575 return 0;
502 } 576 }
@@ -509,7 +583,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
509 skb = skb_recv_datagram(sk, flags, noblock, &err); 583 skb = skb_recv_datagram(sk, flags, noblock, &err);
510 if (!skb) { 584 if (!skb) {
511 pr_err("Recv datagram failed state %d %d %d", 585 pr_err("Recv datagram failed state %d %d %d",
512 sk->sk_state, err, sock_error(sk)); 586 sk->sk_state, err, sock_error(sk));
513 587
514 if (sk->sk_shutdown & RCV_SHUTDOWN) 588 if (sk->sk_shutdown & RCV_SHUTDOWN)
515 return 0; 589 return 0;
@@ -517,7 +591,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
517 return err; 591 return err;
518 } 592 }
519 593
520 rlen = skb->len; /* real length of skb */ 594 rlen = skb->len; /* real length of skb */
521 copied = min_t(unsigned int, rlen, len); 595 copied = min_t(unsigned int, rlen, len);
522 596
523 cskb = skb; 597 cskb = skb;
@@ -567,7 +641,7 @@ static const struct proto_ops llcp_sock_ops = {
567 .shutdown = sock_no_shutdown, 641 .shutdown = sock_no_shutdown,
568 .setsockopt = sock_no_setsockopt, 642 .setsockopt = sock_no_setsockopt,
569 .getsockopt = sock_no_getsockopt, 643 .getsockopt = sock_no_getsockopt,
570 .sendmsg = sock_no_sendmsg, 644 .sendmsg = llcp_sock_sendmsg,
571 .recvmsg = llcp_sock_recvmsg, 645 .recvmsg = llcp_sock_recvmsg,
572 .mmap = sock_no_mmap, 646 .mmap = sock_no_mmap,
573}; 647};
@@ -627,6 +701,8 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
627 701
628void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) 702void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
629{ 703{
704 struct nfc_llcp_local *local = sock->local;
705
630 kfree(sock->service_name); 706 kfree(sock->service_name);
631 707
632 skb_queue_purge(&sock->tx_queue); 708 skb_queue_purge(&sock->tx_queue);
@@ -635,11 +711,16 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
635 711
636 list_del_init(&sock->accept_queue); 712 list_del_init(&sock->accept_queue);
637 713
714 if (local != NULL && sock == local->sockets[sock->ssap])
715 local->sockets[sock->ssap] = NULL;
716 else
717 list_del_init(&sock->list);
718
638 sock->parent = NULL; 719 sock->parent = NULL;
639} 720}
640 721
641static int llcp_sock_create(struct net *net, struct socket *sock, 722static int llcp_sock_create(struct net *net, struct socket *sock,
642 const struct nfc_protocol *nfc_proto) 723 const struct nfc_protocol *nfc_proto)
643{ 724{
644 struct sock *sk; 725 struct sock *sk;
645 726
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 7650139a1a05..d560e6f13072 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -66,9 +66,8 @@ static void nci_req_cancel(struct nci_dev *ndev, int err)
66 66
67/* Execute request and wait for completion. */ 67/* Execute request and wait for completion. */
68static int __nci_request(struct nci_dev *ndev, 68static int __nci_request(struct nci_dev *ndev,
69 void (*req)(struct nci_dev *ndev, unsigned long opt), 69 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 unsigned long opt, 70 unsigned long opt, __u32 timeout)
71 __u32 timeout)
72{ 71{
73 int rc = 0; 72 int rc = 0;
74 long completion_rc; 73 long completion_rc;
@@ -77,9 +76,9 @@ static int __nci_request(struct nci_dev *ndev,
77 76
78 init_completion(&ndev->req_completion); 77 init_completion(&ndev->req_completion);
79 req(ndev, opt); 78 req(ndev, opt);
80 completion_rc = wait_for_completion_interruptible_timeout( 79 completion_rc =
81 &ndev->req_completion, 80 wait_for_completion_interruptible_timeout(&ndev->req_completion,
82 timeout); 81 timeout);
83 82
84 pr_debug("wait_for_completion return %ld\n", completion_rc); 83 pr_debug("wait_for_completion return %ld\n", completion_rc);
85 84
@@ -110,8 +109,9 @@ static int __nci_request(struct nci_dev *ndev,
110} 109}
111 110
112static inline int nci_request(struct nci_dev *ndev, 111static inline int nci_request(struct nci_dev *ndev,
113 void (*req)(struct nci_dev *ndev, unsigned long opt), 112 void (*req)(struct nci_dev *ndev,
114 unsigned long opt, __u32 timeout) 113 unsigned long opt),
114 unsigned long opt, __u32 timeout)
115{ 115{
116 int rc; 116 int rc;
117 117
@@ -152,14 +152,14 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */ 152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) { 153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 if (ndev->supported_rf_interfaces[i] == 154 if (ndev->supported_rf_interfaces[i] ==
155 NCI_RF_INTERFACE_ISO_DEP) { 155 NCI_RF_INTERFACE_ISO_DEP) {
156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; 156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | 157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
158 NCI_DISC_MAP_MODE_LISTEN; 158 NCI_DISC_MAP_MODE_LISTEN;
159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP; 159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
160 (*num)++; 160 (*num)++;
161 } else if (ndev->supported_rf_interfaces[i] == 161 } else if (ndev->supported_rf_interfaces[i] ==
162 NCI_RF_INTERFACE_NFC_DEP) { 162 NCI_RF_INTERFACE_NFC_DEP) {
163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; 163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | 164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
165 NCI_DISC_MAP_MODE_LISTEN; 165 NCI_DISC_MAP_MODE_LISTEN;
@@ -172,8 +172,7 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
172 } 172 }
173 173
174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD, 174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
175 (1 + ((*num)*sizeof(struct disc_map_config))), 175 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
176 &cmd);
177} 176}
178 177
179static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) 178static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
@@ -184,36 +183,68 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
184 cmd.num_disc_configs = 0; 183 cmd.num_disc_configs = 0;
185 184
186 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 185 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
187 (protocols & NFC_PROTO_JEWEL_MASK 186 (protocols & NFC_PROTO_JEWEL_MASK
188 || protocols & NFC_PROTO_MIFARE_MASK 187 || protocols & NFC_PROTO_MIFARE_MASK
189 || protocols & NFC_PROTO_ISO14443_MASK 188 || protocols & NFC_PROTO_ISO14443_MASK
190 || protocols & NFC_PROTO_NFC_DEP_MASK)) { 189 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
191 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 190 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
192 NCI_NFC_A_PASSIVE_POLL_MODE; 191 NCI_NFC_A_PASSIVE_POLL_MODE;
193 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 192 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
194 cmd.num_disc_configs++; 193 cmd.num_disc_configs++;
195 } 194 }
196 195
197 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 196 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
198 (protocols & NFC_PROTO_ISO14443_MASK)) { 197 (protocols & NFC_PROTO_ISO14443_MASK)) {
199 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 198 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
200 NCI_NFC_B_PASSIVE_POLL_MODE; 199 NCI_NFC_B_PASSIVE_POLL_MODE;
201 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 200 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
202 cmd.num_disc_configs++; 201 cmd.num_disc_configs++;
203 } 202 }
204 203
205 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 204 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
206 (protocols & NFC_PROTO_FELICA_MASK 205 (protocols & NFC_PROTO_FELICA_MASK
207 || protocols & NFC_PROTO_NFC_DEP_MASK)) { 206 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
208 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 207 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
209 NCI_NFC_F_PASSIVE_POLL_MODE; 208 NCI_NFC_F_PASSIVE_POLL_MODE;
210 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 209 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
211 cmd.num_disc_configs++; 210 cmd.num_disc_configs++;
212 } 211 }
213 212
214 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD, 213 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
215 (1 + (cmd.num_disc_configs*sizeof(struct disc_config))), 214 (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
216 &cmd); 215 &cmd);
216}
217
218struct nci_rf_discover_select_param {
219 __u8 rf_discovery_id;
220 __u8 rf_protocol;
221};
222
223static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
224{
225 struct nci_rf_discover_select_param *param =
226 (struct nci_rf_discover_select_param *)opt;
227 struct nci_rf_discover_select_cmd cmd;
228
229 cmd.rf_discovery_id = param->rf_discovery_id;
230 cmd.rf_protocol = param->rf_protocol;
231
232 switch (cmd.rf_protocol) {
233 case NCI_RF_PROTOCOL_ISO_DEP:
234 cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
235 break;
236
237 case NCI_RF_PROTOCOL_NFC_DEP:
238 cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
239 break;
240
241 default:
242 cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
243 break;
244 }
245
246 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
247 sizeof(struct nci_rf_discover_select_cmd), &cmd);
217} 248}
218 249
219static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt) 250static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
@@ -223,8 +254,7 @@ static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
223 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE; 254 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
224 255
225 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD, 256 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
226 sizeof(struct nci_rf_deactivate_cmd), 257 sizeof(struct nci_rf_deactivate_cmd), &cmd);
227 &cmd);
228} 258}
229 259
230static int nci_open_device(struct nci_dev *ndev) 260static int nci_open_device(struct nci_dev *ndev)
@@ -248,22 +278,24 @@ static int nci_open_device(struct nci_dev *ndev)
248 set_bit(NCI_INIT, &ndev->flags); 278 set_bit(NCI_INIT, &ndev->flags);
249 279
250 rc = __nci_request(ndev, nci_reset_req, 0, 280 rc = __nci_request(ndev, nci_reset_req, 0,
251 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 281 msecs_to_jiffies(NCI_RESET_TIMEOUT));
252 282
253 if (!rc) { 283 if (!rc) {
254 rc = __nci_request(ndev, nci_init_req, 0, 284 rc = __nci_request(ndev, nci_init_req, 0,
255 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 285 msecs_to_jiffies(NCI_INIT_TIMEOUT));
256 } 286 }
257 287
258 if (!rc) { 288 if (!rc) {
259 rc = __nci_request(ndev, nci_init_complete_req, 0, 289 rc = __nci_request(ndev, nci_init_complete_req, 0,
260 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 290 msecs_to_jiffies(NCI_INIT_TIMEOUT));
261 } 291 }
262 292
263 clear_bit(NCI_INIT, &ndev->flags); 293 clear_bit(NCI_INIT, &ndev->flags);
264 294
265 if (!rc) { 295 if (!rc) {
266 set_bit(NCI_UP, &ndev->flags); 296 set_bit(NCI_UP, &ndev->flags);
297 nci_clear_target_list(ndev);
298 atomic_set(&ndev->state, NCI_IDLE);
267 } else { 299 } else {
268 /* Init failed, cleanup */ 300 /* Init failed, cleanup */
269 skb_queue_purge(&ndev->cmd_q); 301 skb_queue_purge(&ndev->cmd_q);
@@ -286,6 +318,7 @@ static int nci_close_device(struct nci_dev *ndev)
286 318
287 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { 319 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
288 del_timer_sync(&ndev->cmd_timer); 320 del_timer_sync(&ndev->cmd_timer);
321 del_timer_sync(&ndev->data_timer);
289 mutex_unlock(&ndev->req_lock); 322 mutex_unlock(&ndev->req_lock);
290 return 0; 323 return 0;
291 } 324 }
@@ -304,7 +337,7 @@ static int nci_close_device(struct nci_dev *ndev)
304 337
305 set_bit(NCI_INIT, &ndev->flags); 338 set_bit(NCI_INIT, &ndev->flags);
306 __nci_request(ndev, nci_reset_req, 0, 339 __nci_request(ndev, nci_reset_req, 0,
307 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 340 msecs_to_jiffies(NCI_RESET_TIMEOUT));
308 clear_bit(NCI_INIT, &ndev->flags); 341 clear_bit(NCI_INIT, &ndev->flags);
309 342
310 /* Flush cmd wq */ 343 /* Flush cmd wq */
@@ -331,6 +364,15 @@ static void nci_cmd_timer(unsigned long arg)
331 queue_work(ndev->cmd_wq, &ndev->cmd_work); 364 queue_work(ndev->cmd_wq, &ndev->cmd_work);
332} 365}
333 366
367/* NCI data exchange timer function */
368static void nci_data_timer(unsigned long arg)
369{
370 struct nci_dev *ndev = (void *) arg;
371
372 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
373 queue_work(ndev->rx_wq, &ndev->rx_work);
374}
375
334static int nci_dev_up(struct nfc_dev *nfc_dev) 376static int nci_dev_up(struct nfc_dev *nfc_dev)
335{ 377{
336 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 378 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
@@ -350,7 +392,8 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
350 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 392 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
351 int rc; 393 int rc;
352 394
353 if (test_bit(NCI_DISCOVERY, &ndev->flags)) { 395 if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
396 (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
354 pr_err("unable to start poll, since poll is already active\n"); 397 pr_err("unable to start poll, since poll is already active\n");
355 return -EBUSY; 398 return -EBUSY;
356 } 399 }
@@ -360,17 +403,18 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
360 return -EBUSY; 403 return -EBUSY;
361 } 404 }
362 405
363 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { 406 if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
364 pr_debug("target is active, implicitly deactivate...\n"); 407 (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
408 pr_debug("target active or w4 select, implicitly deactivate\n");
365 409
366 rc = nci_request(ndev, nci_rf_deactivate_req, 0, 410 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
367 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 411 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
368 if (rc) 412 if (rc)
369 return -EBUSY; 413 return -EBUSY;
370 } 414 }
371 415
372 rc = nci_request(ndev, nci_rf_discover_req, protocols, 416 rc = nci_request(ndev, nci_rf_discover_req, protocols,
373 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); 417 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
374 418
375 if (!rc) 419 if (!rc)
376 ndev->poll_prots = protocols; 420 ndev->poll_prots = protocols;
@@ -382,23 +426,29 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
382{ 426{
383 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 427 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
384 428
385 if (!test_bit(NCI_DISCOVERY, &ndev->flags)) { 429 if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
430 (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
386 pr_err("unable to stop poll, since poll is not active\n"); 431 pr_err("unable to stop poll, since poll is not active\n");
387 return; 432 return;
388 } 433 }
389 434
390 nci_request(ndev, nci_rf_deactivate_req, 0, 435 nci_request(ndev, nci_rf_deactivate_req, 0,
391 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 436 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
392} 437}
393 438
394static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx, 439static int nci_activate_target(struct nfc_dev *nfc_dev,
395 __u32 protocol) 440 struct nfc_target *target, __u32 protocol)
396{ 441{
397 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 442 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
443 struct nci_rf_discover_select_param param;
444 struct nfc_target *nci_target = NULL;
445 int i;
446 int rc = 0;
398 447
399 pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol); 448 pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
400 449
401 if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { 450 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
451 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
402 pr_err("there is no available target to activate\n"); 452 pr_err("there is no available target to activate\n");
403 return -EINVAL; 453 return -EINVAL;
404 } 454 }
@@ -408,23 +458,55 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
408 return -EBUSY; 458 return -EBUSY;
409 } 459 }
410 460
411 if (!(ndev->target_available_prots & (1 << protocol))) { 461 for (i = 0; i < ndev->n_targets; i++) {
462 if (ndev->targets[i].idx == target->idx) {
463 nci_target = &ndev->targets[i];
464 break;
465 }
466 }
467
468 if (!nci_target) {
469 pr_err("unable to find the selected target\n");
470 return -EINVAL;
471 }
472
473 if (!(nci_target->supported_protocols & (1 << protocol))) {
412 pr_err("target does not support the requested protocol 0x%x\n", 474 pr_err("target does not support the requested protocol 0x%x\n",
413 protocol); 475 protocol);
414 return -EINVAL; 476 return -EINVAL;
415 } 477 }
416 478
417 ndev->target_active_prot = protocol; 479 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
418 ndev->target_available_prots = 0; 480 param.rf_discovery_id = nci_target->logical_idx;
419 481
420 return 0; 482 if (protocol == NFC_PROTO_JEWEL)
483 param.rf_protocol = NCI_RF_PROTOCOL_T1T;
484 else if (protocol == NFC_PROTO_MIFARE)
485 param.rf_protocol = NCI_RF_PROTOCOL_T2T;
486 else if (protocol == NFC_PROTO_FELICA)
487 param.rf_protocol = NCI_RF_PROTOCOL_T3T;
488 else if (protocol == NFC_PROTO_ISO14443)
489 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
490 else
491 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
492
493 rc = nci_request(ndev, nci_rf_discover_select_req,
494 (unsigned long)&param,
495 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
496 }
497
498 if (!rc)
499 ndev->target_active_prot = protocol;
500
501 return rc;
421} 502}
422 503
423static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx) 504static void nci_deactivate_target(struct nfc_dev *nfc_dev,
505 struct nfc_target *target)
424{ 506{
425 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 507 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
426 508
427 pr_debug("target_idx %d\n", target_idx); 509 pr_debug("target_idx %d\n", target->idx);
428 510
429 if (!ndev->target_active_prot) { 511 if (!ndev->target_active_prot) {
430 pr_err("unable to deactivate target, no active target\n"); 512 pr_err("unable to deactivate target, no active target\n");
@@ -433,21 +515,20 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
433 515
434 ndev->target_active_prot = 0; 516 ndev->target_active_prot = 0;
435 517
436 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { 518 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
437 nci_request(ndev, nci_rf_deactivate_req, 0, 519 nci_request(ndev, nci_rf_deactivate_req, 0,
438 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 520 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
439 } 521 }
440} 522}
441 523
442static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx, 524static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
443 struct sk_buff *skb, 525 struct sk_buff *skb,
444 data_exchange_cb_t cb, 526 data_exchange_cb_t cb, void *cb_context)
445 void *cb_context)
446{ 527{
447 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 528 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
448 int rc; 529 int rc;
449 530
450 pr_debug("target_idx %d, len %d\n", target_idx, skb->len); 531 pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
451 532
452 if (!ndev->target_active_prot) { 533 if (!ndev->target_active_prot) {
453 pr_err("unable to exchange data, no active target\n"); 534 pr_err("unable to exchange data, no active target\n");
@@ -487,9 +568,8 @@ static struct nfc_ops nci_nfc_ops = {
487 * @supported_protocols: NFC protocols supported by the device 568 * @supported_protocols: NFC protocols supported by the device
488 */ 569 */
489struct nci_dev *nci_allocate_device(struct nci_ops *ops, 570struct nci_dev *nci_allocate_device(struct nci_ops *ops,
490 __u32 supported_protocols, 571 __u32 supported_protocols,
491 int tx_headroom, 572 int tx_headroom, int tx_tailroom)
492 int tx_tailroom)
493{ 573{
494 struct nci_dev *ndev; 574 struct nci_dev *ndev;
495 575
@@ -510,9 +590,9 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
510 ndev->tx_tailroom = tx_tailroom; 590 ndev->tx_tailroom = tx_tailroom;
511 591
512 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, 592 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
513 supported_protocols, 593 supported_protocols,
514 tx_headroom + NCI_DATA_HDR_SIZE, 594 tx_headroom + NCI_DATA_HDR_SIZE,
515 tx_tailroom); 595 tx_tailroom);
516 if (!ndev->nfc_dev) 596 if (!ndev->nfc_dev)
517 goto free_exit; 597 goto free_exit;
518 598
@@ -584,7 +664,9 @@ int nci_register_device(struct nci_dev *ndev)
584 skb_queue_head_init(&ndev->tx_q); 664 skb_queue_head_init(&ndev->tx_q);
585 665
586 setup_timer(&ndev->cmd_timer, nci_cmd_timer, 666 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
587 (unsigned long) ndev); 667 (unsigned long) ndev);
668 setup_timer(&ndev->data_timer, nci_data_timer,
669 (unsigned long) ndev);
588 670
589 mutex_init(&ndev->req_lock); 671 mutex_init(&ndev->req_lock);
590 672
@@ -633,7 +715,7 @@ int nci_recv_frame(struct sk_buff *skb)
633 pr_debug("len %d\n", skb->len); 715 pr_debug("len %d\n", skb->len);
634 716
635 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) 717 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
636 && !test_bit(NCI_INIT, &ndev->flags))) { 718 && !test_bit(NCI_INIT, &ndev->flags))) {
637 kfree_skb(skb); 719 kfree_skb(skb);
638 return -ENXIO; 720 return -ENXIO;
639 } 721 }
@@ -713,7 +795,7 @@ static void nci_tx_work(struct work_struct *work)
713 795
714 /* Check if data flow control is used */ 796 /* Check if data flow control is used */
715 if (atomic_read(&ndev->credits_cnt) != 797 if (atomic_read(&ndev->credits_cnt) !=
716 NCI_DATA_FLOW_CONTROL_NOT_USED) 798 NCI_DATA_FLOW_CONTROL_NOT_USED)
717 atomic_dec(&ndev->credits_cnt); 799 atomic_dec(&ndev->credits_cnt);
718 800
719 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", 801 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
@@ -722,6 +804,9 @@ static void nci_tx_work(struct work_struct *work)
722 nci_plen(skb->data)); 804 nci_plen(skb->data));
723 805
724 nci_send_frame(skb); 806 nci_send_frame(skb);
807
808 mod_timer(&ndev->data_timer,
809 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
725 } 810 }
726} 811}
727 812
@@ -753,6 +838,15 @@ static void nci_rx_work(struct work_struct *work)
753 break; 838 break;
754 } 839 }
755 } 840 }
841
842 /* check if a data exchange timout has occurred */
843 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
844 /* complete the data exchange transaction, if exists */
845 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
846 nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
847
848 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
849 }
756} 850}
757 851
758/* ----- NCI TX CMD worker thread ----- */ 852/* ----- NCI TX CMD worker thread ----- */
@@ -781,6 +875,6 @@ static void nci_cmd_work(struct work_struct *work)
781 nci_send_frame(skb); 875 nci_send_frame(skb);
782 876
783 mod_timer(&ndev->cmd_timer, 877 mod_timer(&ndev->cmd_timer,
784 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); 878 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
785 } 879 }
786} 880}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index e5756b30e602..76c48c5324f8 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -35,8 +35,7 @@
35#include <linux/nfc.h> 35#include <linux/nfc.h>
36 36
37/* Complete data exchange transaction and forward skb to nfc core */ 37/* Complete data exchange transaction and forward skb to nfc core */
38void nci_data_exchange_complete(struct nci_dev *ndev, 38void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
39 struct sk_buff *skb,
40 int err) 39 int err)
41{ 40{
42 data_exchange_cb_t cb = ndev->data_exchange_cb; 41 data_exchange_cb_t cb = ndev->data_exchange_cb;
@@ -44,9 +43,13 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
44 43
45 pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); 44 pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
46 45
46 /* data exchange is complete, stop the data timer */
47 del_timer_sync(&ndev->data_timer);
48 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
49
47 if (cb) { 50 if (cb) {
48 ndev->data_exchange_cb = NULL; 51 ndev->data_exchange_cb = NULL;
49 ndev->data_exchange_cb_context = 0; 52 ndev->data_exchange_cb_context = NULL;
50 53
51 /* forward skb to nfc core */ 54 /* forward skb to nfc core */
52 cb(cb_context, skb, err); 55 cb(cb_context, skb, err);
@@ -63,9 +66,9 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
63/* ----------------- NCI TX Data ----------------- */ 66/* ----------------- NCI TX Data ----------------- */
64 67
65static inline void nci_push_data_hdr(struct nci_dev *ndev, 68static inline void nci_push_data_hdr(struct nci_dev *ndev,
66 __u8 conn_id, 69 __u8 conn_id,
67 struct sk_buff *skb, 70 struct sk_buff *skb,
68 __u8 pbf) 71 __u8 pbf)
69{ 72{
70 struct nci_data_hdr *hdr; 73 struct nci_data_hdr *hdr;
71 int plen = skb->len; 74 int plen = skb->len;
@@ -82,8 +85,8 @@ static inline void nci_push_data_hdr(struct nci_dev *ndev,
82} 85}
83 86
84static int nci_queue_tx_data_frags(struct nci_dev *ndev, 87static int nci_queue_tx_data_frags(struct nci_dev *ndev,
85 __u8 conn_id, 88 __u8 conn_id,
86 struct sk_buff *skb) { 89 struct sk_buff *skb) {
87 int total_len = skb->len; 90 int total_len = skb->len;
88 unsigned char *data = skb->data; 91 unsigned char *data = skb->data;
89 unsigned long flags; 92 unsigned long flags;
@@ -101,8 +104,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
101 min_t(int, total_len, ndev->max_data_pkt_payload_size); 104 min_t(int, total_len, ndev->max_data_pkt_payload_size);
102 105
103 skb_frag = nci_skb_alloc(ndev, 106 skb_frag = nci_skb_alloc(ndev,
104 (NCI_DATA_HDR_SIZE + frag_len), 107 (NCI_DATA_HDR_SIZE + frag_len),
105 GFP_KERNEL); 108 GFP_KERNEL);
106 if (skb_frag == NULL) { 109 if (skb_frag == NULL) {
107 rc = -ENOMEM; 110 rc = -ENOMEM;
108 goto free_exit; 111 goto free_exit;
@@ -114,7 +117,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
114 117
115 /* second, set the header */ 118 /* second, set the header */
116 nci_push_data_hdr(ndev, conn_id, skb_frag, 119 nci_push_data_hdr(ndev, conn_id, skb_frag,
117 ((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT))); 120 ((total_len == frag_len) ?
121 (NCI_PBF_LAST) : (NCI_PBF_CONT)));
118 122
119 __skb_queue_tail(&frags_q, skb_frag); 123 __skb_queue_tail(&frags_q, skb_frag);
120 124
@@ -182,8 +186,8 @@ exit:
182/* ----------------- NCI RX Data ----------------- */ 186/* ----------------- NCI RX Data ----------------- */
183 187
184static void nci_add_rx_data_frag(struct nci_dev *ndev, 188static void nci_add_rx_data_frag(struct nci_dev *ndev,
185 struct sk_buff *skb, 189 struct sk_buff *skb,
186 __u8 pbf) 190 __u8 pbf)
187{ 191{
188 int reassembly_len; 192 int reassembly_len;
189 int err = 0; 193 int err = 0;
@@ -196,10 +200,10 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
196 pr_err("error adding room for accumulated rx data\n"); 200 pr_err("error adding room for accumulated rx data\n");
197 201
198 kfree_skb(skb); 202 kfree_skb(skb);
199 skb = 0; 203 skb = NULL;
200 204
201 kfree_skb(ndev->rx_data_reassembly); 205 kfree_skb(ndev->rx_data_reassembly);
202 ndev->rx_data_reassembly = 0; 206 ndev->rx_data_reassembly = NULL;
203 207
204 err = -ENOMEM; 208 err = -ENOMEM;
205 goto exit; 209 goto exit;
@@ -207,12 +211,12 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
207 211
208 /* second, combine the two fragments */ 212 /* second, combine the two fragments */
209 memcpy(skb_push(skb, reassembly_len), 213 memcpy(skb_push(skb, reassembly_len),
210 ndev->rx_data_reassembly->data, 214 ndev->rx_data_reassembly->data,
211 reassembly_len); 215 reassembly_len);
212 216
213 /* third, free old reassembly */ 217 /* third, free old reassembly */
214 kfree_skb(ndev->rx_data_reassembly); 218 kfree_skb(ndev->rx_data_reassembly);
215 ndev->rx_data_reassembly = 0; 219 ndev->rx_data_reassembly = NULL;
216 } 220 }
217 221
218 if (pbf == NCI_PBF_CONT) { 222 if (pbf == NCI_PBF_CONT) {
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
index 6a63e5eb483d..6b7fd26c68d9 100644
--- a/net/nfc/nci/lib.c
+++ b/net/nfc/nci/lib.c
@@ -31,6 +31,7 @@
31#include <linux/errno.h> 31#include <linux/errno.h>
32 32
33#include <net/nfc/nci.h> 33#include <net/nfc/nci.h>
34#include <net/nfc/nci_core.h>
34 35
35/* NCI status codes to Unix errno mapping */ 36/* NCI status codes to Unix errno mapping */
36int nci_to_errno(__u8 code) 37int nci_to_errno(__u8 code)
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index b16a8dc2afbe..cb2646179e5f 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -40,7 +40,7 @@
40/* Handle NCI Notification packets */ 40/* Handle NCI Notification packets */
41 41
42static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, 42static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
43 struct sk_buff *skb) 43 struct sk_buff *skb)
44{ 44{
45 struct nci_core_conn_credit_ntf *ntf = (void *) skb->data; 45 struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
46 int i; 46 int i;
@@ -62,7 +62,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
62 if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) { 62 if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
63 /* found static rf connection */ 63 /* found static rf connection */
64 atomic_add(ntf->conn_entries[i].credits, 64 atomic_add(ntf->conn_entries[i].credits,
65 &ndev->credits_cnt); 65 &ndev->credits_cnt);
66 } 66 }
67 } 67 }
68 68
@@ -71,6 +71,20 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
71 queue_work(ndev->tx_wq, &ndev->tx_work); 71 queue_work(ndev->tx_wq, &ndev->tx_work);
72} 72}
73 73
74static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
75 struct sk_buff *skb)
76{
77 __u8 status = skb->data[0];
78
79 pr_debug("status 0x%x\n", status);
80
81 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
82 /* Activation failed, so complete the request
83 (the state remains the same) */
84 nci_req_complete(ndev, status);
85 }
86}
87
74static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, 88static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
75 struct sk_buff *skb) 89 struct sk_buff *skb)
76{ 90{
@@ -86,12 +100,9 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
86} 100}
87 101
88static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, 102static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
89 struct nci_rf_intf_activated_ntf *ntf, __u8 *data) 103 struct rf_tech_specific_params_nfca_poll *nfca_poll,
104 __u8 *data)
90{ 105{
91 struct rf_tech_specific_params_nfca_poll *nfca_poll;
92
93 nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
94
95 nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data)); 106 nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
96 data += 2; 107 data += 2;
97 108
@@ -115,79 +126,267 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
115 return data; 126 return data;
116} 127}
117 128
129static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
130 struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
131 __u8 *data)
132{
133 nfcb_poll->sensb_res_len = *data++;
134
135 pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
136
137 memcpy(nfcb_poll->sensb_res, data, nfcb_poll->sensb_res_len);
138 data += nfcb_poll->sensb_res_len;
139
140 return data;
141}
142
143static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
144 struct rf_tech_specific_params_nfcf_poll *nfcf_poll,
145 __u8 *data)
146{
147 nfcf_poll->bit_rate = *data++;
148 nfcf_poll->sensf_res_len = *data++;
149
150 pr_debug("bit_rate %d, sensf_res_len %d\n",
151 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
152
153 memcpy(nfcf_poll->sensf_res, data, nfcf_poll->sensf_res_len);
154 data += nfcf_poll->sensf_res_len;
155
156 return data;
157}
158
159static int nci_add_new_protocol(struct nci_dev *ndev,
160 struct nfc_target *target,
161 __u8 rf_protocol,
162 __u8 rf_tech_and_mode,
163 void *params)
164{
165 struct rf_tech_specific_params_nfca_poll *nfca_poll;
166 struct rf_tech_specific_params_nfcb_poll *nfcb_poll;
167 struct rf_tech_specific_params_nfcf_poll *nfcf_poll;
168 __u32 protocol;
169
170 if (rf_protocol == NCI_RF_PROTOCOL_T2T)
171 protocol = NFC_PROTO_MIFARE_MASK;
172 else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)
173 protocol = NFC_PROTO_ISO14443_MASK;
174 else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
175 protocol = NFC_PROTO_FELICA_MASK;
176 else
177 protocol = 0;
178
179 if (!(protocol & ndev->poll_prots)) {
180 pr_err("the target found does not have the desired protocol\n");
181 return -EPROTO;
182 }
183
184 if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) {
185 nfca_poll = (struct rf_tech_specific_params_nfca_poll *)params;
186
187 target->sens_res = nfca_poll->sens_res;
188 target->sel_res = nfca_poll->sel_res;
189 target->nfcid1_len = nfca_poll->nfcid1_len;
190 if (target->nfcid1_len > 0) {
191 memcpy(target->nfcid1, nfca_poll->nfcid1,
192 target->nfcid1_len);
193 }
194 } else if (rf_tech_and_mode == NCI_NFC_B_PASSIVE_POLL_MODE) {
195 nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
196
197 target->sensb_res_len = nfcb_poll->sensb_res_len;
198 if (target->sensb_res_len > 0) {
199 memcpy(target->sensb_res, nfcb_poll->sensb_res,
200 target->sensb_res_len);
201 }
202 } else if (rf_tech_and_mode == NCI_NFC_F_PASSIVE_POLL_MODE) {
203 nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
204
205 target->sensf_res_len = nfcf_poll->sensf_res_len;
206 if (target->sensf_res_len > 0) {
207 memcpy(target->sensf_res, nfcf_poll->sensf_res,
208 target->sensf_res_len);
209 }
210 } else {
211 pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode);
212 return -EPROTO;
213 }
214
215 target->supported_protocols |= protocol;
216
217 pr_debug("protocol 0x%x\n", protocol);
218
219 return 0;
220}
221
222static void nci_add_new_target(struct nci_dev *ndev,
223 struct nci_rf_discover_ntf *ntf)
224{
225 struct nfc_target *target;
226 int i, rc;
227
228 for (i = 0; i < ndev->n_targets; i++) {
229 target = &ndev->targets[i];
230 if (target->logical_idx == ntf->rf_discovery_id) {
231 /* This target already exists, add the new protocol */
232 nci_add_new_protocol(ndev, target, ntf->rf_protocol,
233 ntf->rf_tech_and_mode,
234 &ntf->rf_tech_specific_params);
235 return;
236 }
237 }
238
239 /* This is a new target, check if we've enough room */
240 if (ndev->n_targets == NCI_MAX_DISCOVERED_TARGETS) {
241 pr_debug("not enough room, ignoring new target...\n");
242 return;
243 }
244
245 target = &ndev->targets[ndev->n_targets];
246
247 rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol,
248 ntf->rf_tech_and_mode,
249 &ntf->rf_tech_specific_params);
250 if (!rc) {
251 target->logical_idx = ntf->rf_discovery_id;
252 ndev->n_targets++;
253
254 pr_debug("logical idx %d, n_targets %d\n", target->logical_idx,
255 ndev->n_targets);
256 }
257}
258
259void nci_clear_target_list(struct nci_dev *ndev)
260{
261 memset(ndev->targets, 0,
262 (sizeof(struct nfc_target)*NCI_MAX_DISCOVERED_TARGETS));
263
264 ndev->n_targets = 0;
265}
266
267static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
268 struct sk_buff *skb)
269{
270 struct nci_rf_discover_ntf ntf;
271 __u8 *data = skb->data;
272 bool add_target = true;
273
274 ntf.rf_discovery_id = *data++;
275 ntf.rf_protocol = *data++;
276 ntf.rf_tech_and_mode = *data++;
277 ntf.rf_tech_specific_params_len = *data++;
278
279 pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id);
280 pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol);
281 pr_debug("rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode);
282 pr_debug("rf_tech_specific_params_len %d\n",
283 ntf.rf_tech_specific_params_len);
284
285 if (ntf.rf_tech_specific_params_len > 0) {
286 switch (ntf.rf_tech_and_mode) {
287 case NCI_NFC_A_PASSIVE_POLL_MODE:
288 data = nci_extract_rf_params_nfca_passive_poll(ndev,
289 &(ntf.rf_tech_specific_params.nfca_poll), data);
290 break;
291
292 case NCI_NFC_B_PASSIVE_POLL_MODE:
293 data = nci_extract_rf_params_nfcb_passive_poll(ndev,
294 &(ntf.rf_tech_specific_params.nfcb_poll), data);
295 break;
296
297 case NCI_NFC_F_PASSIVE_POLL_MODE:
298 data = nci_extract_rf_params_nfcf_passive_poll(ndev,
299 &(ntf.rf_tech_specific_params.nfcf_poll), data);
300 break;
301
302 default:
303 pr_err("unsupported rf_tech_and_mode 0x%x\n",
304 ntf.rf_tech_and_mode);
305 data += ntf.rf_tech_specific_params_len;
306 add_target = false;
307 }
308 }
309
310 ntf.ntf_type = *data++;
311 pr_debug("ntf_type %d\n", ntf.ntf_type);
312
313 if (add_target == true)
314 nci_add_new_target(ndev, &ntf);
315
316 if (ntf.ntf_type == NCI_DISCOVER_NTF_TYPE_MORE) {
317 atomic_set(&ndev->state, NCI_W4_ALL_DISCOVERIES);
318 } else {
319 atomic_set(&ndev->state, NCI_W4_HOST_SELECT);
320 nfc_targets_found(ndev->nfc_dev, ndev->targets,
321 ndev->n_targets);
322 }
323}
324
118static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, 325static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
119 struct nci_rf_intf_activated_ntf *ntf, __u8 *data) 326 struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
120{ 327{
121 struct activation_params_nfca_poll_iso_dep *nfca_poll; 328 struct activation_params_nfca_poll_iso_dep *nfca_poll;
329 struct activation_params_nfcb_poll_iso_dep *nfcb_poll;
122 330
123 switch (ntf->activation_rf_tech_and_mode) { 331 switch (ntf->activation_rf_tech_and_mode) {
124 case NCI_NFC_A_PASSIVE_POLL_MODE: 332 case NCI_NFC_A_PASSIVE_POLL_MODE:
125 nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; 333 nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
126 nfca_poll->rats_res_len = *data++; 334 nfca_poll->rats_res_len = *data++;
335 pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
127 if (nfca_poll->rats_res_len > 0) { 336 if (nfca_poll->rats_res_len > 0) {
128 memcpy(nfca_poll->rats_res, 337 memcpy(nfca_poll->rats_res,
129 data, 338 data, nfca_poll->rats_res_len);
130 nfca_poll->rats_res_len); 339 }
340 break;
341
342 case NCI_NFC_B_PASSIVE_POLL_MODE:
343 nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
344 nfcb_poll->attrib_res_len = *data++;
345 pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
346 if (nfcb_poll->attrib_res_len > 0) {
347 memcpy(nfcb_poll->attrib_res,
348 data, nfcb_poll->attrib_res_len);
131 } 349 }
132 break; 350 break;
133 351
134 default: 352 default:
135 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", 353 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
136 ntf->activation_rf_tech_and_mode); 354 ntf->activation_rf_tech_and_mode);
137 return -EPROTO; 355 return NCI_STATUS_RF_PROTOCOL_ERROR;
138 } 356 }
139 357
140 return 0; 358 return NCI_STATUS_OK;
141} 359}
142 360
143static void nci_target_found(struct nci_dev *ndev, 361static void nci_target_auto_activated(struct nci_dev *ndev,
144 struct nci_rf_intf_activated_ntf *ntf) 362 struct nci_rf_intf_activated_ntf *ntf)
145{ 363{
146 struct nfc_target nfc_tgt; 364 struct nfc_target *target;
365 int rc;
147 366
148 if (ntf->rf_protocol == NCI_RF_PROTOCOL_T2T) /* T2T MifareUL */ 367 target = &ndev->targets[ndev->n_targets];
149 nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
150 else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
151 nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
152 else
153 nfc_tgt.supported_protocols = 0;
154
155 nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
156 nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
157 nfc_tgt.nfcid1_len = ntf->rf_tech_specific_params.nfca_poll.nfcid1_len;
158 if (nfc_tgt.nfcid1_len > 0) {
159 memcpy(nfc_tgt.nfcid1,
160 ntf->rf_tech_specific_params.nfca_poll.nfcid1,
161 nfc_tgt.nfcid1_len);
162 }
163 368
164 if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) { 369 rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol,
165 pr_debug("the target found does not have the desired protocol\n"); 370 ntf->activation_rf_tech_and_mode,
371 &ntf->rf_tech_specific_params);
372 if (rc)
166 return; 373 return;
167 }
168 374
169 pr_debug("new target found, supported_protocols 0x%x\n", 375 target->logical_idx = ntf->rf_discovery_id;
170 nfc_tgt.supported_protocols); 376 ndev->n_targets++;
171 377
172 ndev->target_available_prots = nfc_tgt.supported_protocols; 378 pr_debug("logical idx %d, n_targets %d\n",
173 ndev->max_data_pkt_payload_size = ntf->max_data_pkt_payload_size; 379 target->logical_idx, ndev->n_targets);
174 ndev->initial_num_credits = ntf->initial_num_credits;
175 380
176 /* set the available credits to initial value */ 381 nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets);
177 atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
178
179 nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
180} 382}
181 383
182static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, 384static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
183 struct sk_buff *skb) 385 struct sk_buff *skb)
184{ 386{
185 struct nci_rf_intf_activated_ntf ntf; 387 struct nci_rf_intf_activated_ntf ntf;
186 __u8 *data = skb->data; 388 __u8 *data = skb->data;
187 int err = 0; 389 int err = NCI_STATUS_OK;
188
189 clear_bit(NCI_DISCOVERY, &ndev->flags);
190 set_bit(NCI_POLL_ACTIVE, &ndev->flags);
191 390
192 ntf.rf_discovery_id = *data++; 391 ntf.rf_discovery_id = *data++;
193 ntf.rf_interface = *data++; 392 ntf.rf_interface = *data++;
@@ -204,7 +403,8 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
204 ntf.activation_rf_tech_and_mode); 403 ntf.activation_rf_tech_and_mode);
205 pr_debug("max_data_pkt_payload_size 0x%x\n", 404 pr_debug("max_data_pkt_payload_size 0x%x\n",
206 ntf.max_data_pkt_payload_size); 405 ntf.max_data_pkt_payload_size);
207 pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits); 406 pr_debug("initial_num_credits 0x%x\n",
407 ntf.initial_num_credits);
208 pr_debug("rf_tech_specific_params_len %d\n", 408 pr_debug("rf_tech_specific_params_len %d\n",
209 ntf.rf_tech_specific_params_len); 409 ntf.rf_tech_specific_params_len);
210 410
@@ -212,13 +412,24 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
212 switch (ntf.activation_rf_tech_and_mode) { 412 switch (ntf.activation_rf_tech_and_mode) {
213 case NCI_NFC_A_PASSIVE_POLL_MODE: 413 case NCI_NFC_A_PASSIVE_POLL_MODE:
214 data = nci_extract_rf_params_nfca_passive_poll(ndev, 414 data = nci_extract_rf_params_nfca_passive_poll(ndev,
215 &ntf, data); 415 &(ntf.rf_tech_specific_params.nfca_poll), data);
416 break;
417
418 case NCI_NFC_B_PASSIVE_POLL_MODE:
419 data = nci_extract_rf_params_nfcb_passive_poll(ndev,
420 &(ntf.rf_tech_specific_params.nfcb_poll), data);
421 break;
422
423 case NCI_NFC_F_PASSIVE_POLL_MODE:
424 data = nci_extract_rf_params_nfcf_passive_poll(ndev,
425 &(ntf.rf_tech_specific_params.nfcf_poll), data);
216 break; 426 break;
217 427
218 default: 428 default:
219 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", 429 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
220 ntf.activation_rf_tech_and_mode); 430 ntf.activation_rf_tech_and_mode);
221 return; 431 err = NCI_STATUS_RF_PROTOCOL_ERROR;
432 goto exit;
222 } 433 }
223 } 434 }
224 435
@@ -229,18 +440,15 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
229 440
230 pr_debug("data_exch_rf_tech_and_mode 0x%x\n", 441 pr_debug("data_exch_rf_tech_and_mode 0x%x\n",
231 ntf.data_exch_rf_tech_and_mode); 442 ntf.data_exch_rf_tech_and_mode);
232 pr_debug("data_exch_tx_bit_rate 0x%x\n", 443 pr_debug("data_exch_tx_bit_rate 0x%x\n", ntf.data_exch_tx_bit_rate);
233 ntf.data_exch_tx_bit_rate); 444 pr_debug("data_exch_rx_bit_rate 0x%x\n", ntf.data_exch_rx_bit_rate);
234 pr_debug("data_exch_rx_bit_rate 0x%x\n", 445 pr_debug("activation_params_len %d\n", ntf.activation_params_len);
235 ntf.data_exch_rx_bit_rate);
236 pr_debug("activation_params_len %d\n",
237 ntf.activation_params_len);
238 446
239 if (ntf.activation_params_len > 0) { 447 if (ntf.activation_params_len > 0) {
240 switch (ntf.rf_interface) { 448 switch (ntf.rf_interface) {
241 case NCI_RF_INTERFACE_ISO_DEP: 449 case NCI_RF_INTERFACE_ISO_DEP:
242 err = nci_extract_activation_params_iso_dep(ndev, 450 err = nci_extract_activation_params_iso_dep(ndev,
243 &ntf, data); 451 &ntf, data);
244 break; 452 break;
245 453
246 case NCI_RF_INTERFACE_FRAME: 454 case NCI_RF_INTERFACE_FRAME:
@@ -250,36 +458,55 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
250 default: 458 default:
251 pr_err("unsupported rf_interface 0x%x\n", 459 pr_err("unsupported rf_interface 0x%x\n",
252 ntf.rf_interface); 460 ntf.rf_interface);
253 return; 461 err = NCI_STATUS_RF_PROTOCOL_ERROR;
462 break;
254 } 463 }
255 } 464 }
256 465
257 if (!err) 466exit:
258 nci_target_found(ndev, &ntf); 467 if (err == NCI_STATUS_OK) {
468 ndev->max_data_pkt_payload_size = ntf.max_data_pkt_payload_size;
469 ndev->initial_num_credits = ntf.initial_num_credits;
470
471 /* set the available credits to initial value */
472 atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
473 }
474
475 if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
476 /* A single target was found and activated automatically */
477 atomic_set(&ndev->state, NCI_POLL_ACTIVE);
478 if (err == NCI_STATUS_OK)
479 nci_target_auto_activated(ndev, &ntf);
480 } else { /* ndev->state == NCI_W4_HOST_SELECT */
481 /* A selected target was activated, so complete the request */
482 atomic_set(&ndev->state, NCI_POLL_ACTIVE);
483 nci_req_complete(ndev, err);
484 }
259} 485}
260 486
261static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, 487static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
262 struct sk_buff *skb) 488 struct sk_buff *skb)
263{ 489{
264 struct nci_rf_deactivate_ntf *ntf = (void *) skb->data; 490 struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
265 491
266 pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); 492 pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
267 493
268 clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
269 ndev->target_active_prot = 0;
270
271 /* drop tx data queue */ 494 /* drop tx data queue */
272 skb_queue_purge(&ndev->tx_q); 495 skb_queue_purge(&ndev->tx_q);
273 496
274 /* drop partial rx data packet */ 497 /* drop partial rx data packet */
275 if (ndev->rx_data_reassembly) { 498 if (ndev->rx_data_reassembly) {
276 kfree_skb(ndev->rx_data_reassembly); 499 kfree_skb(ndev->rx_data_reassembly);
277 ndev->rx_data_reassembly = 0; 500 ndev->rx_data_reassembly = NULL;
278 } 501 }
279 502
280 /* complete the data exchange transaction, if exists */ 503 /* complete the data exchange transaction, if exists */
281 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) 504 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
282 nci_data_exchange_complete(ndev, NULL, -EIO); 505 nci_data_exchange_complete(ndev, NULL, -EIO);
506
507 nci_clear_target_list(ndev);
508 atomic_set(&ndev->state, NCI_IDLE);
509 nci_req_complete(ndev, NCI_STATUS_OK);
283} 510}
284 511
285void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) 512void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
@@ -300,10 +527,18 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
300 nci_core_conn_credits_ntf_packet(ndev, skb); 527 nci_core_conn_credits_ntf_packet(ndev, skb);
301 break; 528 break;
302 529
530 case NCI_OP_CORE_GENERIC_ERROR_NTF:
531 nci_core_generic_error_ntf_packet(ndev, skb);
532 break;
533
303 case NCI_OP_CORE_INTF_ERROR_NTF: 534 case NCI_OP_CORE_INTF_ERROR_NTF:
304 nci_core_conn_intf_error_ntf_packet(ndev, skb); 535 nci_core_conn_intf_error_ntf_packet(ndev, skb);
305 break; 536 break;
306 537
538 case NCI_OP_RF_DISCOVER_NTF:
539 nci_rf_discover_ntf_packet(ndev, skb);
540 break;
541
307 case NCI_OP_RF_INTF_ACTIVATED_NTF: 542 case NCI_OP_RF_INTF_ACTIVATED_NTF:
308 nci_rf_intf_activated_ntf_packet(ndev, skb); 543 nci_rf_intf_activated_ntf_packet(ndev, skb);
309 break; 544 break;
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 2840ae2f3615..3003c3390e49 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -67,19 +67,18 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
67 ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces; 67 ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
68 68
69 if (ndev->num_supported_rf_interfaces > 69 if (ndev->num_supported_rf_interfaces >
70 NCI_MAX_SUPPORTED_RF_INTERFACES) { 70 NCI_MAX_SUPPORTED_RF_INTERFACES) {
71 ndev->num_supported_rf_interfaces = 71 ndev->num_supported_rf_interfaces =
72 NCI_MAX_SUPPORTED_RF_INTERFACES; 72 NCI_MAX_SUPPORTED_RF_INTERFACES;
73 } 73 }
74 74
75 memcpy(ndev->supported_rf_interfaces, 75 memcpy(ndev->supported_rf_interfaces,
76 rsp_1->supported_rf_interfaces, 76 rsp_1->supported_rf_interfaces,
77 ndev->num_supported_rf_interfaces); 77 ndev->num_supported_rf_interfaces);
78 78
79 rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); 79 rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
80 80
81 ndev->max_logical_connections = 81 ndev->max_logical_connections = rsp_2->max_logical_connections;
82 rsp_2->max_logical_connections;
83 ndev->max_routing_table_size = 82 ndev->max_routing_table_size =
84 __le16_to_cpu(rsp_2->max_routing_table_size); 83 __le16_to_cpu(rsp_2->max_routing_table_size);
85 ndev->max_ctrl_pkt_payload_len = 84 ndev->max_ctrl_pkt_payload_len =
@@ -121,7 +120,7 @@ exit:
121} 120}
122 121
123static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, 122static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
124 struct sk_buff *skb) 123 struct sk_buff *skb)
125{ 124{
126 __u8 status = skb->data[0]; 125 __u8 status = skb->data[0];
127 126
@@ -137,21 +136,37 @@ static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
137 pr_debug("status 0x%x\n", status); 136 pr_debug("status 0x%x\n", status);
138 137
139 if (status == NCI_STATUS_OK) 138 if (status == NCI_STATUS_OK)
140 set_bit(NCI_DISCOVERY, &ndev->flags); 139 atomic_set(&ndev->state, NCI_DISCOVERY);
141 140
142 nci_req_complete(ndev, status); 141 nci_req_complete(ndev, status);
143} 142}
144 143
145static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev, 144static void nci_rf_disc_select_rsp_packet(struct nci_dev *ndev,
146 struct sk_buff *skb) 145 struct sk_buff *skb)
147{ 146{
148 __u8 status = skb->data[0]; 147 __u8 status = skb->data[0];
149 148
150 pr_debug("status 0x%x\n", status); 149 pr_debug("status 0x%x\n", status);
151 150
152 clear_bit(NCI_DISCOVERY, &ndev->flags); 151 /* Complete the request on intf_activated_ntf or generic_error_ntf */
152 if (status != NCI_STATUS_OK)
153 nci_req_complete(ndev, status);
154}
153 155
154 nci_req_complete(ndev, status); 156static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
157 struct sk_buff *skb)
158{
159 __u8 status = skb->data[0];
160
161 pr_debug("status 0x%x\n", status);
162
163 /* If target was active, complete the request only in deactivate_ntf */
164 if ((status != NCI_STATUS_OK) ||
165 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
166 nci_clear_target_list(ndev);
167 atomic_set(&ndev->state, NCI_IDLE);
168 nci_req_complete(ndev, status);
169 }
155} 170}
156 171
157void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) 172void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
@@ -187,6 +202,10 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
187 nci_rf_disc_rsp_packet(ndev, skb); 202 nci_rf_disc_rsp_packet(ndev, skb);
188 break; 203 break;
189 204
205 case NCI_OP_RF_DISCOVER_SELECT_RSP:
206 nci_rf_disc_select_rsp_packet(ndev, skb);
207 break;
208
190 case NCI_OP_RF_DEACTIVATE_RSP: 209 case NCI_OP_RF_DEACTIVATE_RSP:
191 nci_rf_deactivate_rsp_packet(ndev, skb); 210 nci_rf_deactivate_rsp_packet(ndev, skb);
192 break; 211 break;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 6989dfa28ee2..581d419083aa 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -33,7 +33,7 @@ static struct genl_multicast_group nfc_genl_event_mcgrp = {
33 .name = NFC_GENL_MCAST_EVENT_NAME, 33 .name = NFC_GENL_MCAST_EVENT_NAME,
34}; 34};
35 35
36struct genl_family nfc_genl_family = { 36static struct genl_family nfc_genl_family = {
37 .id = GENL_ID_GENERATE, 37 .id = GENL_ID_GENERATE,
38 .hdrsize = 0, 38 .hdrsize = 0,
39 .name = NFC_GENL_NAME, 39 .name = NFC_GENL_NAME,
@@ -48,28 +48,38 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
48 [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 }, 48 [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
49 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, 49 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
50 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, 50 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
51 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
51}; 52};
52 53
53static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, 54static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
54 struct netlink_callback *cb, int flags) 55 struct netlink_callback *cb, int flags)
55{ 56{
56 void *hdr; 57 void *hdr;
57 58
58 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 59 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
59 &nfc_genl_family, flags, NFC_CMD_GET_TARGET); 60 &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
60 if (!hdr) 61 if (!hdr)
61 return -EMSGSIZE; 62 return -EMSGSIZE;
62 63
63 genl_dump_check_consistent(cb, hdr, &nfc_genl_family); 64 genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
64 65
65 NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx); 66 if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) ||
66 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, 67 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) ||
67 target->supported_protocols); 68 nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) ||
68 NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res); 69 nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res))
69 NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res); 70 goto nla_put_failure;
70 if (target->nfcid1_len > 0) 71 if (target->nfcid1_len > 0 &&
71 NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, 72 nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
72 target->nfcid1); 73 target->nfcid1))
74 goto nla_put_failure;
75 if (target->sensb_res_len > 0 &&
76 nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
77 target->sensb_res))
78 goto nla_put_failure;
79 if (target->sensf_res_len > 0 &&
80 nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
81 target->sensf_res))
82 goto nla_put_failure;
73 83
74 return genlmsg_end(msg, hdr); 84 return genlmsg_end(msg, hdr);
75 85
@@ -85,9 +95,9 @@ static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
85 u32 idx; 95 u32 idx;
86 96
87 rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize, 97 rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize,
88 nfc_genl_family.attrbuf, 98 nfc_genl_family.attrbuf,
89 nfc_genl_family.maxattr, 99 nfc_genl_family.maxattr,
90 nfc_genl_policy); 100 nfc_genl_policy);
91 if (rc < 0) 101 if (rc < 0)
92 return ERR_PTR(rc); 102 return ERR_PTR(rc);
93 103
@@ -104,7 +114,7 @@ static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
104} 114}
105 115
106static int nfc_genl_dump_targets(struct sk_buff *skb, 116static int nfc_genl_dump_targets(struct sk_buff *skb,
107 struct netlink_callback *cb) 117 struct netlink_callback *cb)
108{ 118{
109 int i = cb->args[0]; 119 int i = cb->args[0];
110 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; 120 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
@@ -118,20 +128,20 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
118 cb->args[1] = (long) dev; 128 cb->args[1] = (long) dev;
119 } 129 }
120 130
121 spin_lock_bh(&dev->targets_lock); 131 device_lock(&dev->dev);
122 132
123 cb->seq = dev->targets_generation; 133 cb->seq = dev->targets_generation;
124 134
125 while (i < dev->n_targets) { 135 while (i < dev->n_targets) {
126 rc = nfc_genl_send_target(skb, &dev->targets[i], cb, 136 rc = nfc_genl_send_target(skb, &dev->targets[i], cb,
127 NLM_F_MULTI); 137 NLM_F_MULTI);
128 if (rc < 0) 138 if (rc < 0)
129 break; 139 break;
130 140
131 i++; 141 i++;
132 } 142 }
133 143
134 spin_unlock_bh(&dev->targets_lock); 144 device_unlock(&dev->dev);
135 145
136 cb->args[0] = i; 146 cb->args[0] = i;
137 147
@@ -160,11 +170,12 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
160 return -ENOMEM; 170 return -ENOMEM;
161 171
162 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 172 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
163 NFC_EVENT_TARGETS_FOUND); 173 NFC_EVENT_TARGETS_FOUND);
164 if (!hdr) 174 if (!hdr)
165 goto free_msg; 175 goto free_msg;
166 176
167 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 177 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
178 goto nla_put_failure;
168 179
169 genlmsg_end(msg, hdr); 180 genlmsg_end(msg, hdr);
170 181
@@ -177,6 +188,37 @@ free_msg:
177 return -EMSGSIZE; 188 return -EMSGSIZE;
178} 189}
179 190
191int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
192{
193 struct sk_buff *msg;
194 void *hdr;
195
196 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
197 if (!msg)
198 return -ENOMEM;
199
200 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
201 NFC_EVENT_TARGET_LOST);
202 if (!hdr)
203 goto free_msg;
204
205 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
206 nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
207 goto nla_put_failure;
208
209 genlmsg_end(msg, hdr);
210
211 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
212
213 return 0;
214
215nla_put_failure:
216 genlmsg_cancel(msg, hdr);
217free_msg:
218 nlmsg_free(msg);
219 return -EMSGSIZE;
220}
221
180int nfc_genl_device_added(struct nfc_dev *dev) 222int nfc_genl_device_added(struct nfc_dev *dev)
181{ 223{
182 struct sk_buff *msg; 224 struct sk_buff *msg;
@@ -187,13 +229,15 @@ int nfc_genl_device_added(struct nfc_dev *dev)
187 return -ENOMEM; 229 return -ENOMEM;
188 230
189 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 231 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
190 NFC_EVENT_DEVICE_ADDED); 232 NFC_EVENT_DEVICE_ADDED);
191 if (!hdr) 233 if (!hdr)
192 goto free_msg; 234 goto free_msg;
193 235
194 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); 236 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
195 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 237 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
196 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); 238 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
239 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
240 goto nla_put_failure;
197 241
198 genlmsg_end(msg, hdr); 242 genlmsg_end(msg, hdr);
199 243
@@ -218,11 +262,12 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
218 return -ENOMEM; 262 return -ENOMEM;
219 263
220 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 264 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
221 NFC_EVENT_DEVICE_REMOVED); 265 NFC_EVENT_DEVICE_REMOVED);
222 if (!hdr) 266 if (!hdr)
223 goto free_msg; 267 goto free_msg;
224 268
225 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 269 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
270 goto nla_put_failure;
226 271
227 genlmsg_end(msg, hdr); 272 genlmsg_end(msg, hdr);
228 273
@@ -238,23 +283,25 @@ free_msg:
238} 283}
239 284
240static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, 285static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
241 u32 pid, u32 seq, 286 u32 pid, u32 seq,
242 struct netlink_callback *cb, 287 struct netlink_callback *cb,
243 int flags) 288 int flags)
244{ 289{
245 void *hdr; 290 void *hdr;
246 291
247 hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags, 292 hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
248 NFC_CMD_GET_DEVICE); 293 NFC_CMD_GET_DEVICE);
249 if (!hdr) 294 if (!hdr)
250 return -EMSGSIZE; 295 return -EMSGSIZE;
251 296
252 if (cb) 297 if (cb)
253 genl_dump_check_consistent(cb, hdr, &nfc_genl_family); 298 genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
254 299
255 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); 300 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
256 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 301 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
257 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); 302 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
303 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
304 goto nla_put_failure;
258 305
259 return genlmsg_end(msg, hdr); 306 return genlmsg_end(msg, hdr);
260 307
@@ -264,7 +311,7 @@ nla_put_failure:
264} 311}
265 312
266static int nfc_genl_dump_devices(struct sk_buff *skb, 313static int nfc_genl_dump_devices(struct sk_buff *skb,
267 struct netlink_callback *cb) 314 struct netlink_callback *cb)
268{ 315{
269 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; 316 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
270 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; 317 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
@@ -291,8 +338,7 @@ static int nfc_genl_dump_devices(struct sk_buff *skb,
291 int rc; 338 int rc;
292 339
293 rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid, 340 rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid,
294 cb->nlh->nlmsg_seq, 341 cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
295 cb, NLM_F_MULTI);
296 if (rc < 0) 342 if (rc < 0)
297 break; 343 break;
298 344
@@ -317,7 +363,7 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
317} 363}
318 364
319int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, 365int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
320 u8 comm_mode, u8 rf_mode) 366 u8 comm_mode, u8 rf_mode)
321{ 367{
322 struct sk_buff *msg; 368 struct sk_buff *msg;
323 void *hdr; 369 void *hdr;
@@ -328,16 +374,18 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
328 if (!msg) 374 if (!msg)
329 return -ENOMEM; 375 return -ENOMEM;
330 376
331 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 377 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_UP);
332 NFC_CMD_DEP_LINK_UP);
333 if (!hdr) 378 if (!hdr)
334 goto free_msg; 379 goto free_msg;
335 380
336 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 381 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
337 if (rf_mode == NFC_RF_INITIATOR) 382 goto nla_put_failure;
338 NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx); 383 if (rf_mode == NFC_RF_INITIATOR &&
339 NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode); 384 nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
340 NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode); 385 goto nla_put_failure;
386 if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) ||
387 nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode))
388 goto nla_put_failure;
341 389
342 genlmsg_end(msg, hdr); 390 genlmsg_end(msg, hdr);
343 391
@@ -366,11 +414,12 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
366 return -ENOMEM; 414 return -ENOMEM;
367 415
368 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 416 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
369 NFC_CMD_DEP_LINK_DOWN); 417 NFC_CMD_DEP_LINK_DOWN);
370 if (!hdr) 418 if (!hdr)
371 goto free_msg; 419 goto free_msg;
372 420
373 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 421 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
422 goto nla_put_failure;
374 423
375 genlmsg_end(msg, hdr); 424 genlmsg_end(msg, hdr);
376 425
@@ -408,7 +457,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
408 } 457 }
409 458
410 rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq, 459 rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq,
411 NULL, 0); 460 NULL, 0);
412 if (rc < 0) 461 if (rc < 0)
413 goto out_free; 462 goto out_free;
414 463
@@ -475,7 +524,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
475 pr_debug("Poll start\n"); 524 pr_debug("Poll start\n");
476 525
477 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || 526 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
478 !info->attrs[NFC_ATTR_PROTOCOLS]) 527 !info->attrs[NFC_ATTR_PROTOCOLS])
479 return -EINVAL; 528 return -EINVAL;
480 529
481 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 530 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -533,13 +582,12 @@ static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
533 struct nfc_dev *dev; 582 struct nfc_dev *dev;
534 int rc, tgt_idx; 583 int rc, tgt_idx;
535 u32 idx; 584 u32 idx;
536 u8 comm, rf; 585 u8 comm;
537 586
538 pr_debug("DEP link up\n"); 587 pr_debug("DEP link up\n");
539 588
540 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || 589 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
541 !info->attrs[NFC_ATTR_COMM_MODE] || 590 !info->attrs[NFC_ATTR_COMM_MODE])
542 !info->attrs[NFC_ATTR_RF_MODE])
543 return -EINVAL; 591 return -EINVAL;
544 592
545 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 593 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -549,19 +597,15 @@ static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
549 tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); 597 tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
550 598
551 comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]); 599 comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]);
552 rf = nla_get_u8(info->attrs[NFC_ATTR_RF_MODE]);
553 600
554 if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE) 601 if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE)
555 return -EINVAL; 602 return -EINVAL;
556 603
557 if (rf != NFC_RF_INITIATOR && comm != NFC_RF_TARGET)
558 return -EINVAL;
559
560 dev = nfc_get_device(idx); 604 dev = nfc_get_device(idx);
561 if (!dev) 605 if (!dev)
562 return -ENODEV; 606 return -ENODEV;
563 607
564 rc = nfc_dep_link_up(dev, tgt_idx, comm, rf); 608 rc = nfc_dep_link_up(dev, tgt_idx, comm);
565 609
566 nfc_put_device(dev); 610 nfc_put_device(dev);
567 611
@@ -636,7 +680,7 @@ static struct genl_ops nfc_genl_ops[] = {
636}; 680};
637 681
638static int nfc_genl_rcv_nl_event(struct notifier_block *this, 682static int nfc_genl_rcv_nl_event(struct notifier_block *this,
639 unsigned long event, void *ptr) 683 unsigned long event, void *ptr)
640{ 684{
641 struct netlink_notify *n = ptr; 685 struct netlink_notify *n = ptr;
642 struct class_dev_iter iter; 686 struct class_dev_iter iter;
@@ -689,7 +733,7 @@ int __init nfc_genl_init(void)
689 int rc; 733 int rc;
690 734
691 rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops, 735 rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops,
692 ARRAY_SIZE(nfc_genl_ops)); 736 ARRAY_SIZE(nfc_genl_ops));
693 if (rc) 737 if (rc)
694 return rc; 738 return rc;
695 739
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 6d28d75995b0..3dd4232ae664 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -32,7 +32,7 @@ struct nfc_protocol {
32 struct proto *proto; 32 struct proto *proto;
33 struct module *owner; 33 struct module *owner;
34 int (*create)(struct net *net, struct socket *sock, 34 int (*create)(struct net *net, struct socket *sock,
35 const struct nfc_protocol *nfc_proto); 35 const struct nfc_protocol *nfc_proto);
36}; 36};
37 37
38struct nfc_rawsock { 38struct nfc_rawsock {
@@ -54,7 +54,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
54int nfc_llcp_register_device(struct nfc_dev *dev); 54int nfc_llcp_register_device(struct nfc_dev *dev);
55void nfc_llcp_unregister_device(struct nfc_dev *dev); 55void nfc_llcp_unregister_device(struct nfc_dev *dev);
56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); 56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len); 57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
58int __init nfc_llcp_init(void); 58int __init nfc_llcp_init(void);
59void nfc_llcp_exit(void); 59void nfc_llcp_exit(void);
60 60
@@ -65,7 +65,7 @@ static inline void nfc_llcp_mac_is_down(struct nfc_dev *dev)
65} 65}
66 66
67static inline void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, 67static inline void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
68 u8 comm_mode, u8 rf_mode) 68 u8 comm_mode, u8 rf_mode)
69{ 69{
70} 70}
71 71
@@ -78,12 +78,13 @@ static inline void nfc_llcp_unregister_device(struct nfc_dev *dev)
78{ 78{
79} 79}
80 80
81static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len) 81static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev,
82 u8 *gb, u8 gb_len)
82{ 83{
83 return 0; 84 return 0;
84} 85}
85 86
86static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len) 87static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
87{ 88{
88 *gb_len = 0; 89 *gb_len = 0;
89 return NULL; 90 return NULL;
@@ -118,6 +119,7 @@ void nfc_genl_data_init(struct nfc_genl_data *genl_data);
118void nfc_genl_data_exit(struct nfc_genl_data *genl_data); 119void nfc_genl_data_exit(struct nfc_genl_data *genl_data);
119 120
120int nfc_genl_targets_found(struct nfc_dev *dev); 121int nfc_genl_targets_found(struct nfc_dev *dev);
122int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx);
121 123
122int nfc_genl_device_added(struct nfc_dev *dev); 124int nfc_genl_device_added(struct nfc_dev *dev);
123int nfc_genl_device_removed(struct nfc_dev *dev); 125int nfc_genl_device_removed(struct nfc_dev *dev);
@@ -126,7 +128,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
126 u8 comm_mode, u8 rf_mode); 128 u8 comm_mode, u8 rf_mode);
127int nfc_genl_dep_link_down_event(struct nfc_dev *dev); 129int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
128 130
129struct nfc_dev *nfc_get_device(unsigned idx); 131struct nfc_dev *nfc_get_device(unsigned int idx);
130 132
131static inline void nfc_put_device(struct nfc_dev *dev) 133static inline void nfc_put_device(struct nfc_dev *dev)
132{ 134{
@@ -160,8 +162,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
160 162
161int nfc_stop_poll(struct nfc_dev *dev); 163int nfc_stop_poll(struct nfc_dev *dev);
162 164
163int nfc_dep_link_up(struct nfc_dev *dev, int target_idx, 165int nfc_dep_link_up(struct nfc_dev *dev, int target_idx, u8 comm_mode);
164 u8 comm_mode, u8 rf_mode);
165 166
166int nfc_dep_link_down(struct nfc_dev *dev); 167int nfc_dep_link_down(struct nfc_dev *dev);
167 168
@@ -169,9 +170,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol);
169 170
170int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx); 171int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
171 172
172int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, 173int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
173 struct sk_buff *skb, 174 data_exchange_cb_t cb, void *cb_context);
174 data_exchange_cb_t cb,
175 void *cb_context);
176 175
177#endif /* __LOCAL_NFC_H */ 176#endif /* __LOCAL_NFC_H */
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 2e2f8c6a61fe..ec1134c9e07f 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -63,7 +63,7 @@ static int rawsock_release(struct socket *sock)
63} 63}
64 64
65static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, 65static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
66 int len, int flags) 66 int len, int flags)
67{ 67{
68 struct sock *sk = sock->sk; 68 struct sock *sk = sock->sk;
69 struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; 69 struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr;
@@ -73,7 +73,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
73 pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); 73 pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
74 74
75 if (!addr || len < sizeof(struct sockaddr_nfc) || 75 if (!addr || len < sizeof(struct sockaddr_nfc) ||
76 addr->sa_family != AF_NFC) 76 addr->sa_family != AF_NFC)
77 return -EINVAL; 77 return -EINVAL;
78 78
79 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", 79 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
@@ -92,14 +92,8 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
92 goto error; 92 goto error;
93 } 93 }
94 94
95 if (addr->target_idx > dev->target_idx - 1 || 95 if (addr->target_idx > dev->target_next_idx - 1 ||
96 addr->target_idx < dev->target_idx - dev->n_targets) { 96 addr->target_idx < dev->target_next_idx - dev->n_targets) {
97 rc = -EINVAL;
98 goto error;
99 }
100
101 if (addr->target_idx > dev->target_idx - 1 ||
102 addr->target_idx < dev->target_idx - dev->n_targets) {
103 rc = -EINVAL; 97 rc = -EINVAL;
104 goto error; 98 goto error;
105 } 99 }
@@ -132,7 +126,7 @@ static int rawsock_add_header(struct sk_buff *skb)
132} 126}
133 127
134static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, 128static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
135 int err) 129 int err)
136{ 130{
137 struct sock *sk = (struct sock *) context; 131 struct sock *sk = (struct sock *) context;
138 132
@@ -185,7 +179,7 @@ static void rawsock_tx_work(struct work_struct *work)
185 179
186 sock_hold(sk); 180 sock_hold(sk);
187 rc = nfc_data_exchange(dev, target_idx, skb, 181 rc = nfc_data_exchange(dev, target_idx, skb,
188 rawsock_data_exchange_complete, sk); 182 rawsock_data_exchange_complete, sk);
189 if (rc) { 183 if (rc) {
190 rawsock_report_error(sk, rc); 184 rawsock_report_error(sk, rc);
191 sock_put(sk); 185 sock_put(sk);
@@ -193,7 +187,7 @@ static void rawsock_tx_work(struct work_struct *work)
193} 187}
194 188
195static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, 189static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
196 struct msghdr *msg, size_t len) 190 struct msghdr *msg, size_t len)
197{ 191{
198 struct sock *sk = sock->sk; 192 struct sock *sk = sock->sk;
199 struct nfc_dev *dev = nfc_rawsock(sk)->dev; 193 struct nfc_dev *dev = nfc_rawsock(sk)->dev;
@@ -230,7 +224,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
230} 224}
231 225
232static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, 226static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
233 struct msghdr *msg, size_t len, int flags) 227 struct msghdr *msg, size_t len, int flags)
234{ 228{
235 int noblock = flags & MSG_DONTWAIT; 229 int noblock = flags & MSG_DONTWAIT;
236 struct sock *sk = sock->sk; 230 struct sock *sk = sock->sk;
@@ -286,7 +280,7 @@ static void rawsock_destruct(struct sock *sk)
286 280
287 if (sk->sk_state == TCP_ESTABLISHED) { 281 if (sk->sk_state == TCP_ESTABLISHED) {
288 nfc_deactivate_target(nfc_rawsock(sk)->dev, 282 nfc_deactivate_target(nfc_rawsock(sk)->dev,
289 nfc_rawsock(sk)->target_idx); 283 nfc_rawsock(sk)->target_idx);
290 nfc_put_device(nfc_rawsock(sk)->dev); 284 nfc_put_device(nfc_rawsock(sk)->dev);
291 } 285 }
292 286
@@ -299,7 +293,7 @@ static void rawsock_destruct(struct sock *sk)
299} 293}
300 294
301static int rawsock_create(struct net *net, struct socket *sock, 295static int rawsock_create(struct net *net, struct socket *sock,
302 const struct nfc_protocol *nfc_proto) 296 const struct nfc_protocol *nfc_proto)
303{ 297{
304 struct sock *sk; 298 struct sock *sk;
305 299
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2c030505b335..2c74daa5aca5 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -38,7 +38,6 @@
38#include <linux/udp.h> 38#include <linux/udp.h>
39#include <linux/ethtool.h> 39#include <linux/ethtool.h>
40#include <linux/wait.h> 40#include <linux/wait.h>
41#include <asm/system.h>
42#include <asm/div64.h> 41#include <asm/div64.h>
43#include <linux/highmem.h> 42#include <linux/highmem.h>
44#include <linux/netfilter_bridge.h> 43#include <linux/netfilter_bridge.h>
@@ -322,7 +321,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
322 return -ENOMEM; 321 return -ENOMEM;
323 322
324 nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); 323 nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
325 if (!skb) 324 if (!nskb)
326 return -ENOMEM; 325 return -ENOMEM;
327 326
328 nskb->vlan_tci = 0; 327 nskb->vlan_tci = 0;
@@ -422,6 +421,19 @@ static int validate_sample(const struct nlattr *attr,
422 return validate_actions(actions, key, depth + 1); 421 return validate_actions(actions, key, depth + 1);
423} 422}
424 423
424static int validate_tp_port(const struct sw_flow_key *flow_key)
425{
426 if (flow_key->eth.type == htons(ETH_P_IP)) {
427 if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
428 return 0;
429 } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
430 if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
431 return 0;
432 }
433
434 return -EINVAL;
435}
436
425static int validate_set(const struct nlattr *a, 437static int validate_set(const struct nlattr *a,
426 const struct sw_flow_key *flow_key) 438 const struct sw_flow_key *flow_key)
427{ 439{
@@ -463,18 +475,13 @@ static int validate_set(const struct nlattr *a,
463 if (flow_key->ip.proto != IPPROTO_TCP) 475 if (flow_key->ip.proto != IPPROTO_TCP)
464 return -EINVAL; 476 return -EINVAL;
465 477
466 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 478 return validate_tp_port(flow_key);
467 return -EINVAL;
468
469 break;
470 479
471 case OVS_KEY_ATTR_UDP: 480 case OVS_KEY_ATTR_UDP:
472 if (flow_key->ip.proto != IPPROTO_UDP) 481 if (flow_key->ip.proto != IPPROTO_UDP)
473 return -EINVAL; 482 return -EINVAL;
474 483
475 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 484 return validate_tp_port(flow_key);
476 return -EINVAL;
477 break;
478 485
479 default: 486 default:
480 return -EINVAL; 487 return -EINVAL;
@@ -779,15 +786,18 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
779 tcp_flags = flow->tcp_flags; 786 tcp_flags = flow->tcp_flags;
780 spin_unlock_bh(&flow->lock); 787 spin_unlock_bh(&flow->lock);
781 788
782 if (used) 789 if (used &&
783 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)); 790 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
791 goto nla_put_failure;
784 792
785 if (stats.n_packets) 793 if (stats.n_packets &&
786 NLA_PUT(skb, OVS_FLOW_ATTR_STATS, 794 nla_put(skb, OVS_FLOW_ATTR_STATS,
787 sizeof(struct ovs_flow_stats), &stats); 795 sizeof(struct ovs_flow_stats), &stats))
796 goto nla_put_failure;
788 797
789 if (tcp_flags) 798 if (tcp_flags &&
790 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); 799 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
800 goto nla_put_failure;
791 801
792 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if 802 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
793 * this is the first flow to be dumped into 'skb'. This is unusual for 803 * this is the first flow to be dumped into 'skb'. This is unusual for
@@ -1169,7 +1179,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1169 goto nla_put_failure; 1179 goto nla_put_failure;
1170 1180
1171 get_dp_stats(dp, &dp_stats); 1181 get_dp_stats(dp, &dp_stats);
1172 NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats); 1182 if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1183 goto nla_put_failure;
1173 1184
1174 return genlmsg_end(skb, ovs_header); 1185 return genlmsg_end(skb, ovs_header);
1175 1186
@@ -1469,14 +1480,16 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1469 1480
1470 ovs_header->dp_ifindex = get_dpifindex(vport->dp); 1481 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1471 1482
1472 NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); 1483 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1473 NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type); 1484 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1474 NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)); 1485 nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1475 NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); 1486 nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
1487 goto nla_put_failure;
1476 1488
1477 ovs_vport_get_stats(vport, &vport_stats); 1489 ovs_vport_get_stats(vport, &vport_stats);
1478 NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), 1490 if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1479 &vport_stats); 1491 &vport_stats))
1492 goto nla_put_failure;
1480 1493
1481 err = ovs_vport_get_options(vport, skb); 1494 err = ovs_vport_get_options(vport, skb);
1482 if (err == -EMSGSIZE) 1495 if (err == -EMSGSIZE)
@@ -1642,10 +1655,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1642 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1655 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1643 OVS_VPORT_CMD_NEW); 1656 OVS_VPORT_CMD_NEW);
1644 if (IS_ERR(reply)) { 1657 if (IS_ERR(reply)) {
1645 err = PTR_ERR(reply);
1646 netlink_set_err(init_net.genl_sock, 0, 1658 netlink_set_err(init_net.genl_sock, 0,
1647 ovs_dp_vport_multicast_group.id, err); 1659 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
1648 return 0; 1660 goto exit_unlock;
1649 } 1661 }
1650 1662
1651 genl_notify(reply, genl_info_net(info), info->snd_pid, 1663 genl_notify(reply, genl_info_net(info), info->snd_pid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1252c3081ef1..6d4d8097cf96 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
183 u8 tcp_flags = 0; 183 u8 tcp_flags = 0;
184 184
185 if (flow->key.eth.type == htons(ETH_P_IP) && 185 if (flow->key.eth.type == htons(ETH_P_IP) &&
186 flow->key.ip.proto == IPPROTO_TCP) { 186 flow->key.ip.proto == IPPROTO_TCP &&
187 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
187 u8 *tcp = (u8 *)tcp_hdr(skb); 188 u8 *tcp = (u8 *)tcp_hdr(skb);
188 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; 189 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
189 } 190 }
@@ -1174,11 +1175,13 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1174 struct ovs_key_ethernet *eth_key; 1175 struct ovs_key_ethernet *eth_key;
1175 struct nlattr *nla, *encap; 1176 struct nlattr *nla, *encap;
1176 1177
1177 if (swkey->phy.priority) 1178 if (swkey->phy.priority &&
1178 NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); 1179 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
1180 goto nla_put_failure;
1179 1181
1180 if (swkey->phy.in_port != USHRT_MAX) 1182 if (swkey->phy.in_port != USHRT_MAX &&
1181 NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); 1183 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
1184 goto nla_put_failure;
1182 1185
1183 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); 1186 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1184 if (!nla) 1187 if (!nla)
@@ -1188,8 +1191,9 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1188 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); 1191 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
1189 1192
1190 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { 1193 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1191 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); 1194 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
1192 NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); 1195 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
1196 goto nla_put_failure;
1193 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); 1197 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1194 if (!swkey->eth.tci) 1198 if (!swkey->eth.tci)
1195 goto unencap; 1199 goto unencap;
@@ -1200,7 +1204,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1200 if (swkey->eth.type == htons(ETH_P_802_2)) 1204 if (swkey->eth.type == htons(ETH_P_802_2))
1201 goto unencap; 1205 goto unencap;
1202 1206
1203 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); 1207 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
1208 goto nla_put_failure;
1204 1209
1205 if (swkey->eth.type == htons(ETH_P_IP)) { 1210 if (swkey->eth.type == htons(ETH_P_IP)) {
1206 struct ovs_key_ipv4 *ipv4_key; 1211 struct ovs_key_ipv4 *ipv4_key;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 322b8d206693..b6b1d7daa3cb 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -66,6 +66,7 @@ static int internal_dev_mac_addr(struct net_device *dev, void *p)
66 66
67 if (!is_valid_ether_addr(addr->sa_data)) 67 if (!is_valid_ether_addr(addr->sa_data))
68 return -EADDRNOTAVAIL; 68 return -EADDRNOTAVAIL;
69 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
69 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 70 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
70 return 0; 71 return 0;
71} 72}
@@ -145,7 +146,7 @@ static void do_setup(struct net_device *netdev)
145 netdev->vlan_features = netdev->features; 146 netdev->vlan_features = netdev->features;
146 netdev->features |= NETIF_F_HW_VLAN_TX; 147 netdev->features |= NETIF_F_HW_VLAN_TX;
147 netdev->hw_features = netdev->features & ~NETIF_F_LLTX; 148 netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
148 random_ether_addr(netdev->dev_addr); 149 eth_hw_addr_random(netdev);
149} 150}
150 151
151static struct vport *internal_dev_create(const struct vport_parms *parms) 152static struct vport *internal_dev_create(const struct vport_parms *parms)
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index c1068aed03d1..3fd6c0d88e12 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -140,9 +140,9 @@ int ovs_netdev_get_ifindex(const struct vport *vport)
140 return netdev_vport->dev->ifindex; 140 return netdev_vport->dev->ifindex;
141} 141}
142 142
143static unsigned packet_length(const struct sk_buff *skb) 143static unsigned int packet_length(const struct sk_buff *skb)
144{ 144{
145 unsigned length = skb->len - ETH_HLEN; 145 unsigned int length = skb->len - ETH_HLEN;
146 146
147 if (skb->protocol == htons(ETH_P_8021Q)) 147 if (skb->protocol == htons(ETH_P_8021Q))
148 length -= VLAN_HLEN; 148 length -= VLAN_HLEN;
@@ -157,9 +157,9 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
157 int len; 157 int len;
158 158
159 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { 159 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
160 if (net_ratelimit()) 160 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
161 pr_warn("%s: dropped over-mtu packet: %d > %d\n", 161 ovs_dp_name(vport->dp),
162 ovs_dp_name(vport->dp), packet_length(skb), mtu); 162 packet_length(skb), mtu);
163 goto error; 163 goto error;
164 } 164 }
165 165
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2dbb32b988c4..0f661745df0f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -73,7 +73,6 @@
73#include <net/sock.h> 73#include <net/sock.h>
74#include <linux/errno.h> 74#include <linux/errno.h>
75#include <linux/timer.h> 75#include <linux/timer.h>
76#include <asm/system.h>
77#include <asm/uaccess.h> 76#include <asm/uaccess.h>
78#include <asm/ioctls.h> 77#include <asm/ioctls.h>
79#include <asm/page.h> 78#include <asm/page.h>
@@ -1459,6 +1458,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1459 struct net_device *dev; 1458 struct net_device *dev;
1460 __be16 proto = 0; 1459 __be16 proto = 0;
1461 int err; 1460 int err;
1461 int extra_len = 0;
1462 1462
1463 /* 1463 /*
1464 * Get and verify the address. 1464 * Get and verify the address.
@@ -1493,8 +1493,16 @@ retry:
1493 * raw protocol and you must do your own fragmentation at this level. 1493 * raw protocol and you must do your own fragmentation at this level.
1494 */ 1494 */
1495 1495
1496 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1497 if (!netif_supports_nofcs(dev)) {
1498 err = -EPROTONOSUPPORT;
1499 goto out_unlock;
1500 }
1501 extra_len = 4; /* We're doing our own CRC */
1502 }
1503
1496 err = -EMSGSIZE; 1504 err = -EMSGSIZE;
1497 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN) 1505 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1498 goto out_unlock; 1506 goto out_unlock;
1499 1507
1500 if (!skb) { 1508 if (!skb) {
@@ -1526,7 +1534,7 @@ retry:
1526 goto retry; 1534 goto retry;
1527 } 1535 }
1528 1536
1529 if (len > (dev->mtu + dev->hard_header_len)) { 1537 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1530 /* Earlier code assumed this would be a VLAN pkt, 1538 /* Earlier code assumed this would be a VLAN pkt,
1531 * double-check this now that we have the actual 1539 * double-check this now that we have the actual
1532 * packet in hand. 1540 * packet in hand.
@@ -1548,6 +1556,9 @@ retry:
1548 if (err < 0) 1556 if (err < 0)
1549 goto out_unlock; 1557 goto out_unlock;
1550 1558
1559 if (unlikely(extra_len == 4))
1560 skb->no_fcs = 1;
1561
1551 dev_queue_xmit(skb); 1562 dev_queue_xmit(skb);
1552 rcu_read_unlock(); 1563 rcu_read_unlock();
1553 return len; 1564 return len;
@@ -1643,7 +1654,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1643 skb->data = skb_head; 1654 skb->data = skb_head;
1644 skb->len = skb_len; 1655 skb->len = skb_len;
1645 } 1656 }
1646 kfree_skb(skb); 1657 consume_skb(skb);
1647 skb = nskb; 1658 skb = nskb;
1648 } 1659 }
1649 1660
@@ -1753,7 +1764,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1753 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 1764 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1754 po->tp_reserve; 1765 po->tp_reserve;
1755 } else { 1766 } else {
1756 unsigned maclen = skb_network_offset(skb); 1767 unsigned int maclen = skb_network_offset(skb);
1757 netoff = TPACKET_ALIGN(po->tp_hdrlen + 1768 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1758 (maclen < 16 ? 16 : maclen)) + 1769 (maclen < 16 ? 16 : maclen)) +
1759 po->tp_reserve; 1770 po->tp_reserve;
@@ -2209,6 +2220,7 @@ static int packet_snd(struct socket *sock,
2209 struct packet_sock *po = pkt_sk(sk); 2220 struct packet_sock *po = pkt_sk(sk);
2210 unsigned short gso_type = 0; 2221 unsigned short gso_type = 0;
2211 int hlen, tlen; 2222 int hlen, tlen;
2223 int extra_len = 0;
2212 2224
2213 /* 2225 /*
2214 * Get and verify the address. 2226 * Get and verify the address.
@@ -2288,8 +2300,16 @@ static int packet_snd(struct socket *sock,
2288 } 2300 }
2289 } 2301 }
2290 2302
2303 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2304 if (!netif_supports_nofcs(dev)) {
2305 err = -EPROTONOSUPPORT;
2306 goto out_unlock;
2307 }
2308 extra_len = 4; /* We're doing our own CRC */
2309 }
2310
2291 err = -EMSGSIZE; 2311 err = -EMSGSIZE;
2292 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN)) 2312 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2293 goto out_unlock; 2313 goto out_unlock;
2294 2314
2295 err = -ENOBUFS; 2315 err = -ENOBUFS;
@@ -2315,7 +2335,7 @@ static int packet_snd(struct socket *sock,
2315 if (err < 0) 2335 if (err < 0)
2316 goto out_free; 2336 goto out_free;
2317 2337
2318 if (!gso_type && (len > dev->mtu + reserve)) { 2338 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2319 /* Earlier code assumed this would be a VLAN pkt, 2339 /* Earlier code assumed this would be a VLAN pkt,
2320 * double-check this now that we have the actual 2340 * double-check this now that we have the actual
2321 * packet in hand. 2341 * packet in hand.
@@ -2353,6 +2373,9 @@ static int packet_snd(struct socket *sock,
2353 len += vnet_hdr_len; 2373 len += vnet_hdr_len;
2354 } 2374 }
2355 2375
2376 if (unlikely(extra_len == 4))
2377 skb->no_fcs = 1;
2378
2356 /* 2379 /*
2357 * Now send it 2380 * Now send it
2358 */ 2381 */
@@ -3201,10 +3224,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3201 char __user *optval, int __user *optlen) 3224 char __user *optval, int __user *optlen)
3202{ 3225{
3203 int len; 3226 int len;
3204 int val; 3227 int val, lv = sizeof(val);
3205 struct sock *sk = sock->sk; 3228 struct sock *sk = sock->sk;
3206 struct packet_sock *po = pkt_sk(sk); 3229 struct packet_sock *po = pkt_sk(sk);
3207 void *data; 3230 void *data = &val;
3208 struct tpacket_stats st; 3231 struct tpacket_stats st;
3209 union tpacket_stats_u st_u; 3232 union tpacket_stats_u st_u;
3210 3233
@@ -3219,21 +3242,17 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3219 3242
3220 switch (optname) { 3243 switch (optname) {
3221 case PACKET_STATISTICS: 3244 case PACKET_STATISTICS:
3222 if (po->tp_version == TPACKET_V3) {
3223 len = sizeof(struct tpacket_stats_v3);
3224 } else {
3225 if (len > sizeof(struct tpacket_stats))
3226 len = sizeof(struct tpacket_stats);
3227 }
3228 spin_lock_bh(&sk->sk_receive_queue.lock); 3245 spin_lock_bh(&sk->sk_receive_queue.lock);
3229 if (po->tp_version == TPACKET_V3) { 3246 if (po->tp_version == TPACKET_V3) {
3247 lv = sizeof(struct tpacket_stats_v3);
3230 memcpy(&st_u.stats3, &po->stats, 3248 memcpy(&st_u.stats3, &po->stats,
3231 sizeof(struct tpacket_stats)); 3249 sizeof(struct tpacket_stats));
3232 st_u.stats3.tp_freeze_q_cnt = 3250 st_u.stats3.tp_freeze_q_cnt =
3233 po->stats_u.stats3.tp_freeze_q_cnt; 3251 po->stats_u.stats3.tp_freeze_q_cnt;
3234 st_u.stats3.tp_packets += po->stats.tp_drops; 3252 st_u.stats3.tp_packets += po->stats.tp_drops;
3235 data = &st_u.stats3; 3253 data = &st_u.stats3;
3236 } else { 3254 } else {
3255 lv = sizeof(struct tpacket_stats);
3237 st = po->stats; 3256 st = po->stats;
3238 st.tp_packets += st.tp_drops; 3257 st.tp_packets += st.tp_drops;
3239 data = &st; 3258 data = &st;
@@ -3242,31 +3261,16 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3242 spin_unlock_bh(&sk->sk_receive_queue.lock); 3261 spin_unlock_bh(&sk->sk_receive_queue.lock);
3243 break; 3262 break;
3244 case PACKET_AUXDATA: 3263 case PACKET_AUXDATA:
3245 if (len > sizeof(int))
3246 len = sizeof(int);
3247 val = po->auxdata; 3264 val = po->auxdata;
3248
3249 data = &val;
3250 break; 3265 break;
3251 case PACKET_ORIGDEV: 3266 case PACKET_ORIGDEV:
3252 if (len > sizeof(int))
3253 len = sizeof(int);
3254 val = po->origdev; 3267 val = po->origdev;
3255
3256 data = &val;
3257 break; 3268 break;
3258 case PACKET_VNET_HDR: 3269 case PACKET_VNET_HDR:
3259 if (len > sizeof(int))
3260 len = sizeof(int);
3261 val = po->has_vnet_hdr; 3270 val = po->has_vnet_hdr;
3262
3263 data = &val;
3264 break; 3271 break;
3265 case PACKET_VERSION: 3272 case PACKET_VERSION:
3266 if (len > sizeof(int))
3267 len = sizeof(int);
3268 val = po->tp_version; 3273 val = po->tp_version;
3269 data = &val;
3270 break; 3274 break;
3271 case PACKET_HDRLEN: 3275 case PACKET_HDRLEN:
3272 if (len > sizeof(int)) 3276 if (len > sizeof(int))
@@ -3286,39 +3290,28 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3286 default: 3290 default:
3287 return -EINVAL; 3291 return -EINVAL;
3288 } 3292 }
3289 data = &val;
3290 break; 3293 break;
3291 case PACKET_RESERVE: 3294 case PACKET_RESERVE:
3292 if (len > sizeof(unsigned int))
3293 len = sizeof(unsigned int);
3294 val = po->tp_reserve; 3295 val = po->tp_reserve;
3295 data = &val;
3296 break; 3296 break;
3297 case PACKET_LOSS: 3297 case PACKET_LOSS:
3298 if (len > sizeof(unsigned int))
3299 len = sizeof(unsigned int);
3300 val = po->tp_loss; 3298 val = po->tp_loss;
3301 data = &val;
3302 break; 3299 break;
3303 case PACKET_TIMESTAMP: 3300 case PACKET_TIMESTAMP:
3304 if (len > sizeof(int))
3305 len = sizeof(int);
3306 val = po->tp_tstamp; 3301 val = po->tp_tstamp;
3307 data = &val;
3308 break; 3302 break;
3309 case PACKET_FANOUT: 3303 case PACKET_FANOUT:
3310 if (len > sizeof(int))
3311 len = sizeof(int);
3312 val = (po->fanout ? 3304 val = (po->fanout ?
3313 ((u32)po->fanout->id | 3305 ((u32)po->fanout->id |
3314 ((u32)po->fanout->type << 16)) : 3306 ((u32)po->fanout->type << 16)) :
3315 0); 3307 0);
3316 data = &val;
3317 break; 3308 break;
3318 default: 3309 default:
3319 return -ENOPROTOOPT; 3310 return -ENOPROTOOPT;
3320 } 3311 }
3321 3312
3313 if (len > lv)
3314 len = lv;
3322 if (put_user(len, optlen)) 3315 if (put_user(len, optlen))
3323 return -EFAULT; 3316 return -EFAULT;
3324 if (copy_to_user(optval, data, len)) 3317 if (copy_to_user(optval, data, len))
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index d65f699fbf34..779ce4ff92ec 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -129,7 +129,7 @@ static const struct net_proto_family phonet_proto_family = {
129/* Phonet device header operations */ 129/* Phonet device header operations */
130static int pn_header_create(struct sk_buff *skb, struct net_device *dev, 130static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
131 unsigned short type, const void *daddr, 131 unsigned short type, const void *daddr,
132 const void *saddr, unsigned len) 132 const void *saddr, unsigned int len)
133{ 133{
134 u8 *media = skb_push(skb, 1); 134 u8 *media = skb_push(skb, 1);
135 135
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9f60008740e3..9dd4f926f7d1 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -273,7 +273,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
273 hdr = pnp_hdr(skb); 273 hdr = pnp_hdr(skb);
274 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 274 if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
275 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", 275 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
276 (unsigned)hdr->data[0]); 276 (unsigned int)hdr->data[0]);
277 return -EOPNOTSUPP; 277 return -EOPNOTSUPP;
278 } 278 }
279 279
@@ -305,7 +305,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
305 305
306 default: 306 default:
307 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n", 307 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
308 (unsigned)hdr->data[1]); 308 (unsigned int)hdr->data[1]);
309 return -EOPNOTSUPP; 309 return -EOPNOTSUPP;
310 } 310 }
311 if (wake) 311 if (wake)
@@ -478,9 +478,9 @@ static void pipe_destruct(struct sock *sk)
478 skb_queue_purge(&pn->ctrlreq_queue); 478 skb_queue_purge(&pn->ctrlreq_queue);
479} 479}
480 480
481static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n) 481static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
482{ 482{
483 unsigned i; 483 unsigned int i;
484 u8 final_fc = PN_NO_FLOW_CONTROL; 484 u8 final_fc = PN_NO_FLOW_CONTROL;
485 485
486 for (i = 0; i < n; i++) { 486 for (i = 0; i < n; i++) {
@@ -1130,6 +1130,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
1130 int flags = msg->msg_flags; 1130 int flags = msg->msg_flags;
1131 int err, done; 1131 int err, done;
1132 1132
1133 if (len > USHRT_MAX)
1134 return -EMSGSIZE;
1135
1133 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| 1136 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1134 MSG_CMSG_COMPAT)) || 1137 MSG_CMSG_COMPAT)) ||
1135 !(msg->msg_flags & MSG_EOR)) 1138 !(msg->msg_flags & MSG_EOR))
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9b9a85ecc4c7..36f75a9e2c3d 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -44,7 +44,7 @@ struct phonet_net {
44 struct phonet_routes routes; 44 struct phonet_routes routes;
45}; 45};
46 46
47int phonet_net_id __read_mostly; 47static int phonet_net_id __read_mostly;
48 48
49static struct phonet_net *phonet_pernet(struct net *net) 49static struct phonet_net *phonet_pernet(struct net *net)
50{ 50{
@@ -268,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev)
268static void phonet_route_autodel(struct net_device *dev) 268static void phonet_route_autodel(struct net_device *dev)
269{ 269{
270 struct phonet_net *pnn = phonet_pernet(dev_net(dev)); 270 struct phonet_net *pnn = phonet_pernet(dev_net(dev));
271 unsigned i; 271 unsigned int i;
272 DECLARE_BITMAP(deleted, 64); 272 DECLARE_BITMAP(deleted, 64);
273 273
274 /* Remove left-over Phonet routes */ 274 /* Remove left-over Phonet routes */
@@ -331,23 +331,6 @@ static int __net_init phonet_init_net(struct net *net)
331 331
332static void __net_exit phonet_exit_net(struct net *net) 332static void __net_exit phonet_exit_net(struct net *net)
333{ 333{
334 struct phonet_net *pnn = phonet_pernet(net);
335 struct net_device *dev;
336 unsigned i;
337
338 rtnl_lock();
339 for_each_netdev(net, dev)
340 phonet_device_destroy(dev);
341
342 for (i = 0; i < 64; i++) {
343 dev = pnn->routes.table[i];
344 if (dev) {
345 rtm_phonet_notify(RTM_DELROUTE, dev, i);
346 dev_put(dev);
347 }
348 }
349 rtnl_unlock();
350
351 proc_net_remove(net, "phonet"); 334 proc_net_remove(net, "phonet");
352} 335}
353 336
@@ -361,7 +344,7 @@ static struct pernet_operations phonet_net_ops = {
361/* Initialize Phonet devices list */ 344/* Initialize Phonet devices list */
362int __init phonet_device_init(void) 345int __init phonet_device_init(void)
363{ 346{
364 int err = register_pernet_device(&phonet_net_ops); 347 int err = register_pernet_subsys(&phonet_net_ops);
365 if (err) 348 if (err)
366 return err; 349 return err;
367 350
@@ -377,7 +360,7 @@ void phonet_device_exit(void)
377{ 360{
378 rtnl_unregister_all(PF_PHONET); 361 rtnl_unregister_all(PF_PHONET);
379 unregister_netdevice_notifier(&phonet_device_notifier); 362 unregister_netdevice_notifier(&phonet_device_notifier);
380 unregister_pernet_device(&phonet_net_ops); 363 unregister_pernet_subsys(&phonet_net_ops);
381 proc_net_remove(&init_net, "pnresource"); 364 proc_net_remove(&init_net, "pnresource");
382} 365}
383 366
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d61f6761777d..cfdf135fcd69 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -116,7 +116,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
116 ifm->ifa_flags = IFA_F_PERMANENT; 116 ifm->ifa_flags = IFA_F_PERMANENT;
117 ifm->ifa_scope = RT_SCOPE_LINK; 117 ifm->ifa_scope = RT_SCOPE_LINK;
118 ifm->ifa_index = dev->ifindex; 118 ifm->ifa_index = dev->ifindex;
119 NLA_PUT_U8(skb, IFA_LOCAL, addr); 119 if (nla_put_u8(skb, IFA_LOCAL, addr))
120 goto nla_put_failure;
120 return nlmsg_end(skb, nlh); 121 return nlmsg_end(skb, nlh);
121 122
122nla_put_failure: 123nla_put_failure:
@@ -183,8 +184,9 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
183 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 184 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
184 rtm->rtm_type = RTN_UNICAST; 185 rtm->rtm_type = RTN_UNICAST;
185 rtm->rtm_flags = 0; 186 rtm->rtm_flags = 0;
186 NLA_PUT_U8(skb, RTA_DST, dst); 187 if (nla_put_u8(skb, RTA_DST, dst) ||
187 NLA_PUT_U32(skb, RTA_OIF, dev->ifindex); 188 nla_put_u32(skb, RTA_OIF, dev->ifindex))
189 goto nla_put_failure;
188 return nlmsg_end(skb, nlh); 190 return nlmsg_end(skb, nlh);
189 191
190nla_put_failure: 192nla_put_failure:
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 4c7eff30dfa9..89cfa9ce4939 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -58,7 +58,7 @@ static struct {
58 58
59void __init pn_sock_init(void) 59void __init pn_sock_init(void)
60{ 60{
61 unsigned i; 61 unsigned int i;
62 62
63 for (i = 0; i < PN_HASHSIZE; i++) 63 for (i = 0; i < PN_HASHSIZE; i++)
64 INIT_HLIST_HEAD(pnsocks.hlist + i); 64 INIT_HLIST_HEAD(pnsocks.hlist + i);
@@ -116,7 +116,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
116void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) 116void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
117{ 117{
118 struct hlist_head *hlist = pnsocks.hlist; 118 struct hlist_head *hlist = pnsocks.hlist;
119 unsigned h; 119 unsigned int h;
120 120
121 rcu_read_lock(); 121 rcu_read_lock();
122 for (h = 0; h < PN_HASHSIZE; h++) { 122 for (h = 0; h < PN_HASHSIZE; h++) {
@@ -545,7 +545,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
545 struct hlist_head *hlist = pnsocks.hlist; 545 struct hlist_head *hlist = pnsocks.hlist;
546 struct hlist_node *node; 546 struct hlist_node *node;
547 struct sock *sknode; 547 struct sock *sknode;
548 unsigned h; 548 unsigned int h;
549 549
550 for (h = 0; h < PN_HASHSIZE; h++) { 550 for (h = 0; h < PN_HASHSIZE; h++) {
551 sk_for_each_rcu(sknode, node, hlist) { 551 sk_for_each_rcu(sknode, node, hlist) {
@@ -710,7 +710,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
710 710
711void pn_sock_unbind_all_res(struct sock *sk) 711void pn_sock_unbind_all_res(struct sock *sk)
712{ 712{
713 unsigned res, match = 0; 713 unsigned int res, match = 0;
714 714
715 mutex_lock(&resource_mutex); 715 mutex_lock(&resource_mutex);
716 for (res = 0; res < 256; res++) { 716 for (res = 0; res < 256; res++) {
@@ -732,7 +732,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
732static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) 732static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
733{ 733{
734 struct net *net = seq_file_net(seq); 734 struct net *net = seq_file_net(seq);
735 unsigned i; 735 unsigned int i;
736 736
737 if (!net_eq(net, &init_net)) 737 if (!net_eq(net, &init_net))
738 return NULL; 738 return NULL;
@@ -750,7 +750,7 @@ static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
750static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) 750static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
751{ 751{
752 struct net *net = seq_file_net(seq); 752 struct net *net = seq_file_net(seq);
753 unsigned i; 753 unsigned int i;
754 754
755 BUG_ON(!net_eq(net, &init_net)); 755 BUG_ON(!net_eq(net, &init_net));
756 756
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index cea1c7dbdae2..696348fd31a1 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -27,6 +27,10 @@
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/init.h> 28#include <linux/init.h>
29 29
30#include <net/sock.h>
31#include <linux/phonet.h>
32#include <net/phonet/phonet.h>
33
30#define DYNAMIC_PORT_MIN 0x40 34#define DYNAMIC_PORT_MIN 0x40
31#define DYNAMIC_PORT_MAX 0x7f 35#define DYNAMIC_PORT_MAX 0x7f
32 36
@@ -46,7 +50,8 @@ static void set_local_port_range(int range[2])
46 50
47void phonet_get_local_port_range(int *min, int *max) 51void phonet_get_local_port_range(int *min, int *max)
48{ 52{
49 unsigned seq; 53 unsigned int seq;
54
50 do { 55 do {
51 seq = read_seqbegin(&local_port_range_lock); 56 seq = read_seqbegin(&local_port_range_lock);
52 if (min) 57 if (min)
@@ -93,19 +98,13 @@ static struct ctl_table phonet_table[] = {
93 { } 98 { }
94}; 99};
95 100
96static struct ctl_path phonet_ctl_path[] = {
97 { .procname = "net", },
98 { .procname = "phonet", },
99 { },
100};
101
102int __init phonet_sysctl_init(void) 101int __init phonet_sysctl_init(void)
103{ 102{
104 phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table); 103 phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table);
105 return phonet_table_hrd == NULL ? -ENOMEM : 0; 104 return phonet_table_hrd == NULL ? -ENOMEM : 0;
106} 105}
107 106
108void phonet_sysctl_exit(void) 107void phonet_sysctl_exit(void)
109{ 108{
110 unregister_sysctl_table(phonet_table_hrd); 109 unregister_net_sysctl_table(phonet_table_hrd);
111} 110}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index edfaaaf164eb..8d2b3d5a7c21 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -186,8 +186,7 @@ struct rds_ib_device {
186 struct work_struct free_work; 186 struct work_struct free_work;
187}; 187};
188 188
189#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus) 189#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
190#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
191#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) 190#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
192 191
193/* bits for i_ack_flags */ 192/* bits for i_ack_flags */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 51c868923f64..a1e116277477 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -749,7 +749,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
749 int ret; 749 int ret;
750 750
751 /* XXX too lazy? */ 751 /* XXX too lazy? */
752 ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL); 752 ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
753 if (!ic) 753 if (!ic)
754 return -ENOMEM; 754 return -ENOMEM;
755 755
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index e29e0ca32f74..8d194912c695 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -763,7 +763,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
763 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 763 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
764 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ 764 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
765 765
766 addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0); 766 addr = kmap_atomic(sg_page(&frag->f_sg));
767 767
768 src = addr + frag_off; 768 src = addr + frag_off;
769 dst = (void *)map->m_page_addrs[map_page] + map_off; 769 dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -773,7 +773,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
773 uncongested |= ~(*src) & *dst; 773 uncongested |= ~(*src) & *dst;
774 *dst++ = *src++; 774 *dst++ = *src++;
775 } 775 }
776 kunmap_atomic(addr, KM_SOFTIRQ0); 776 kunmap_atomic(addr);
777 777
778 copied += to_copy; 778 copied += to_copy;
779 779
@@ -826,7 +826,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
826 826
827 if (data_len < sizeof(struct rds_header)) { 827 if (data_len < sizeof(struct rds_header)) {
828 rds_ib_conn_error(conn, "incoming message " 828 rds_ib_conn_error(conn, "incoming message "
829 "from %pI4 didn't inclue a " 829 "from %pI4 didn't include a "
830 "header, disconnecting and " 830 "header, disconnecting and "
831 "reconnecting\n", 831 "reconnecting\n",
832 &conn->c_faddr); 832 &conn->c_faddr);
@@ -919,8 +919,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
919 rds_ib_cong_recv(conn, ibinc); 919 rds_ib_cong_recv(conn, ibinc);
920 else { 920 else {
921 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, 921 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
922 &ibinc->ii_inc, GFP_ATOMIC, 922 &ibinc->ii_inc, GFP_ATOMIC);
923 KM_SOFTIRQ0);
924 state->ack_next = be64_to_cpu(hdr->h_sequence); 923 state->ack_next = be64_to_cpu(hdr->h_sequence);
925 state->ack_next_valid = 1; 924 state->ack_next_valid = 1;
926 } 925 }
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 1253b006efdb..7e643bafb4af 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -106,22 +106,15 @@ static ctl_table rds_ib_sysctl_table[] = {
106 { } 106 { }
107}; 107};
108 108
109static struct ctl_path rds_ib_sysctl_path[] = {
110 { .procname = "net", },
111 { .procname = "rds", },
112 { .procname = "ib", },
113 { }
114};
115
116void rds_ib_sysctl_exit(void) 109void rds_ib_sysctl_exit(void)
117{ 110{
118 if (rds_ib_sysctl_hdr) 111 if (rds_ib_sysctl_hdr)
119 unregister_sysctl_table(rds_ib_sysctl_hdr); 112 unregister_net_sysctl_table(rds_ib_sysctl_hdr);
120} 113}
121 114
122int rds_ib_sysctl_init(void) 115int rds_ib_sysctl_init(void)
123{ 116{
124 rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); 117 rds_ib_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/ib", rds_ib_sysctl_table);
125 if (!rds_ib_sysctl_hdr) 118 if (!rds_ib_sysctl_hdr)
126 return -ENOMEM; 119 return -ENOMEM;
127 return 0; 120 return 0;
diff --git a/net/rds/info.c b/net/rds/info.c
index f1c016c4146e..9a6b4f66187c 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -104,7 +104,7 @@ EXPORT_SYMBOL_GPL(rds_info_deregister_func);
104void rds_info_iter_unmap(struct rds_info_iterator *iter) 104void rds_info_iter_unmap(struct rds_info_iterator *iter)
105{ 105{
106 if (iter->addr) { 106 if (iter->addr) {
107 kunmap_atomic(iter->addr, KM_USER0); 107 kunmap_atomic(iter->addr);
108 iter->addr = NULL; 108 iter->addr = NULL;
109 } 109 }
110} 110}
@@ -119,7 +119,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
119 119
120 while (bytes) { 120 while (bytes) {
121 if (!iter->addr) 121 if (!iter->addr)
122 iter->addr = kmap_atomic(*iter->pages, KM_USER0); 122 iter->addr = kmap_atomic(*iter->pages);
123 123
124 this = min(bytes, PAGE_SIZE - iter->offset); 124 this = min(bytes, PAGE_SIZE - iter->offset);
125 125
@@ -134,7 +134,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
134 iter->offset += this; 134 iter->offset += this;
135 135
136 if (iter->offset == PAGE_SIZE) { 136 if (iter->offset == PAGE_SIZE) {
137 kunmap_atomic(iter->addr, KM_USER0); 137 kunmap_atomic(iter->addr);
138 iter->addr = NULL; 138 iter->addr = NULL;
139 iter->offset = 0; 139 iter->offset = 0;
140 iter->pages++; 140 iter->pages++;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 9556d2895f7a..a91e1db62ee6 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -694,7 +694,7 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
694 unsigned long flags; 694 unsigned long flags;
695 695
696 /* XXX too lazy? */ 696 /* XXX too lazy? */
697 ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL); 697 ic = kzalloc(sizeof(struct rds_iw_connection), gfp);
698 if (!ic) 698 if (!ic)
699 return -ENOMEM; 699 return -ENOMEM;
700 700
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 5e57347f49ff..45033358358e 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -598,7 +598,7 @@ static void rds_iw_cong_recv(struct rds_connection *conn,
598 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 598 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
599 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ 599 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
600 600
601 addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0); 601 addr = kmap_atomic(frag->f_page);
602 602
603 src = addr + frag_off; 603 src = addr + frag_off;
604 dst = (void *)map->m_page_addrs[map_page] + map_off; 604 dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -608,7 +608,7 @@ static void rds_iw_cong_recv(struct rds_connection *conn,
608 uncongested |= ~(*src) & *dst; 608 uncongested |= ~(*src) & *dst;
609 *dst++ = *src++; 609 *dst++ = *src++;
610 } 610 }
611 kunmap_atomic(addr, KM_SOFTIRQ0); 611 kunmap_atomic(addr);
612 612
613 copied += to_copy; 613 copied += to_copy;
614 614
@@ -661,7 +661,7 @@ static void rds_iw_process_recv(struct rds_connection *conn,
661 661
662 if (byte_len < sizeof(struct rds_header)) { 662 if (byte_len < sizeof(struct rds_header)) {
663 rds_iw_conn_error(conn, "incoming message " 663 rds_iw_conn_error(conn, "incoming message "
664 "from %pI4 didn't inclue a " 664 "from %pI4 didn't include a "
665 "header, disconnecting and " 665 "header, disconnecting and "
666 "reconnecting\n", 666 "reconnecting\n",
667 &conn->c_faddr); 667 &conn->c_faddr);
@@ -754,8 +754,7 @@ static void rds_iw_process_recv(struct rds_connection *conn,
754 rds_iw_cong_recv(conn, iwinc); 754 rds_iw_cong_recv(conn, iwinc);
755 else { 755 else {
756 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, 756 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
757 &iwinc->ii_inc, GFP_ATOMIC, 757 &iwinc->ii_inc, GFP_ATOMIC);
758 KM_SOFTIRQ0);
759 state->ack_next = be64_to_cpu(hdr->h_sequence); 758 state->ack_next = be64_to_cpu(hdr->h_sequence);
760 state->ack_next_valid = 1; 759 state->ack_next_valid = 1;
761 } 760 }
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index e2e47176e729..5d5ebd576f3f 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -109,22 +109,15 @@ static ctl_table rds_iw_sysctl_table[] = {
109 { } 109 { }
110}; 110};
111 111
112static struct ctl_path rds_iw_sysctl_path[] = {
113 { .procname = "net", },
114 { .procname = "rds", },
115 { .procname = "iw", },
116 { }
117};
118
119void rds_iw_sysctl_exit(void) 112void rds_iw_sysctl_exit(void)
120{ 113{
121 if (rds_iw_sysctl_hdr) 114 if (rds_iw_sysctl_hdr)
122 unregister_sysctl_table(rds_iw_sysctl_hdr); 115 unregister_net_sysctl_table(rds_iw_sysctl_hdr);
123} 116}
124 117
125int rds_iw_sysctl_init(void) 118int rds_iw_sysctl_init(void)
126{ 119{
127 rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); 120 rds_iw_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/iw", rds_iw_sysctl_table);
128 if (!rds_iw_sysctl_hdr) 121 if (!rds_iw_sysctl_hdr)
129 return -ENOMEM; 122 return -ENOMEM;
130 return 0; 123 return 0;
diff --git a/net/rds/loop.c b/net/rds/loop.c
index bca6761a3ca2..6b12b68541ae 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -79,7 +79,7 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
79 rds_message_addref(rm); 79 rds_message_addref(rm);
80 80
81 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc, 81 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
82 GFP_KERNEL, KM_USER0); 82 GFP_KERNEL);
83 83
84 rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence), 84 rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence),
85 NULL); 85 NULL);
@@ -121,7 +121,7 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
121 struct rds_loop_connection *lc; 121 struct rds_loop_connection *lc;
122 unsigned long flags; 122 unsigned long flags;
123 123
124 lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL); 124 lc = kzalloc(sizeof(struct rds_loop_connection), gfp);
125 if (!lc) 125 if (!lc)
126 return -ENOMEM; 126 return -ENOMEM;
127 127
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 7eaba1831f0d..ec1d731ecff0 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -704,7 +704,7 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
704 __be32 saddr); 704 __be32 saddr);
705void rds_inc_put(struct rds_incoming *inc); 705void rds_inc_put(struct rds_incoming *inc);
706void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, 706void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
707 struct rds_incoming *inc, gfp_t gfp, enum km_type km); 707 struct rds_incoming *inc, gfp_t gfp);
708int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 708int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
709 size_t size, int msg_flags); 709 size_t size, int msg_flags);
710void rds_clear_recv_queue(struct rds_sock *rs); 710void rds_clear_recv_queue(struct rds_sock *rs);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index bc3f8cd6d070..5c6e9f132026 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -155,7 +155,7 @@ static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock
155 * tell us which roles the addrs in the conn are playing for this message. 155 * tell us which roles the addrs in the conn are playing for this message.
156 */ 156 */
157void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, 157void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
158 struct rds_incoming *inc, gfp_t gfp, enum km_type km) 158 struct rds_incoming *inc, gfp_t gfp)
159{ 159{
160 struct rds_sock *rs = NULL; 160 struct rds_sock *rs = NULL;
161 struct sock *sk; 161 struct sock *sk;
diff --git a/net/rds/send.c b/net/rds/send.c
index e2d63c59e7c2..96531d4033a2 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -935,7 +935,6 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
935 /* Mirror Linux UDP mirror of BSD error message compatibility */ 935 /* Mirror Linux UDP mirror of BSD error message compatibility */
936 /* XXX: Perhaps MSG_MORE someday */ 936 /* XXX: Perhaps MSG_MORE someday */
937 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { 937 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
938 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
939 ret = -EOPNOTSUPP; 938 ret = -EOPNOTSUPP;
940 goto out; 939 goto out;
941 } 940 }
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 25ad0c77a26c..907214b4c4d0 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -92,17 +92,10 @@ static ctl_table rds_sysctl_rds_table[] = {
92 { } 92 { }
93}; 93};
94 94
95static struct ctl_path rds_sysctl_path[] = {
96 { .procname = "net", },
97 { .procname = "rds", },
98 { }
99};
100
101
102void rds_sysctl_exit(void) 95void rds_sysctl_exit(void)
103{ 96{
104 if (rds_sysctl_reg_table) 97 if (rds_sysctl_reg_table)
105 unregister_sysctl_table(rds_sysctl_reg_table); 98 unregister_net_sysctl_table(rds_sysctl_reg_table);
106} 99}
107 100
108int rds_sysctl_init(void) 101int rds_sysctl_init(void)
@@ -110,7 +103,7 @@ int rds_sysctl_init(void)
110 rds_sysctl_reconnect_min = msecs_to_jiffies(1); 103 rds_sysctl_reconnect_min = msecs_to_jiffies(1);
111 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; 104 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
112 105
113 rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); 106 rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
114 if (!rds_sysctl_reg_table) 107 if (!rds_sysctl_reg_table)
115 return -ENOMEM; 108 return -ENOMEM;
116 return 0; 109 return 0;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 8b5cc4aa8868..72981375f47c 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -145,7 +145,7 @@ int rds_tcp_listen_init(void)
145 if (ret < 0) 145 if (ret < 0)
146 goto out; 146 goto out;
147 147
148 sock->sk->sk_reuse = 1; 148 sock->sk->sk_reuse = SK_CAN_REUSE;
149 rds_tcp_nonagle(sock); 149 rds_tcp_nonagle(sock);
150 150
151 write_lock_bh(&sock->sk->sk_callback_lock); 151 write_lock_bh(&sock->sk->sk_callback_lock);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 78205e25500a..6243258f840f 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -169,7 +169,6 @@ static void rds_tcp_cong_recv(struct rds_connection *conn,
169struct rds_tcp_desc_arg { 169struct rds_tcp_desc_arg {
170 struct rds_connection *conn; 170 struct rds_connection *conn;
171 gfp_t gfp; 171 gfp_t gfp;
172 enum km_type km;
173}; 172};
174 173
175static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, 174static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
@@ -255,7 +254,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
255 else 254 else
256 rds_recv_incoming(conn, conn->c_faddr, 255 rds_recv_incoming(conn, conn->c_faddr,
257 conn->c_laddr, &tinc->ti_inc, 256 conn->c_laddr, &tinc->ti_inc,
258 arg->gfp, arg->km); 257 arg->gfp);
259 258
260 tc->t_tinc_hdr_rem = sizeof(struct rds_header); 259 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
261 tc->t_tinc_data_rem = 0; 260 tc->t_tinc_data_rem = 0;
@@ -272,8 +271,7 @@ out:
272} 271}
273 272
274/* the caller has to hold the sock lock */ 273/* the caller has to hold the sock lock */
275static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp, 274static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp)
276 enum km_type km)
277{ 275{
278 struct rds_tcp_connection *tc = conn->c_transport_data; 276 struct rds_tcp_connection *tc = conn->c_transport_data;
279 struct socket *sock = tc->t_sock; 277 struct socket *sock = tc->t_sock;
@@ -283,7 +281,6 @@ static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp,
283 /* It's like glib in the kernel! */ 281 /* It's like glib in the kernel! */
284 arg.conn = conn; 282 arg.conn = conn;
285 arg.gfp = gfp; 283 arg.gfp = gfp;
286 arg.km = km;
287 desc.arg.data = &arg; 284 desc.arg.data = &arg;
288 desc.error = 0; 285 desc.error = 0;
289 desc.count = 1; /* give more than one skb per call */ 286 desc.count = 1; /* give more than one skb per call */
@@ -311,7 +308,7 @@ int rds_tcp_recv(struct rds_connection *conn)
311 rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock); 308 rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
312 309
313 lock_sock(sock->sk); 310 lock_sock(sock->sk);
314 ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0); 311 ret = rds_tcp_read_sock(conn, GFP_KERNEL);
315 release_sock(sock->sk); 312 release_sock(sock->sk);
316 313
317 return ret; 314 return ret;
@@ -336,7 +333,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
336 ready = tc->t_orig_data_ready; 333 ready = tc->t_orig_data_ready;
337 rds_tcp_stats_inc(s_tcp_data_ready_calls); 334 rds_tcp_stats_inc(s_tcp_data_ready_calls);
338 335
339 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) 336 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
340 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
341out: 338out:
342 read_unlock_bh(&sk->sk_callback_lock); 339 read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 354760ebbbd2..f974961754ca 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -29,6 +29,7 @@
29#include <linux/rfkill.h> 29#include <linux/rfkill.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/device.h>
32#include <linux/miscdevice.h> 33#include <linux/miscdevice.h>
33#include <linux/wait.h> 34#include <linux/wait.h>
34#include <linux/poll.h> 35#include <linux/poll.h>
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index f9ea925ad9cb..c4719ce604c2 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -34,7 +34,6 @@
34#include <linux/if_arp.h> 34#include <linux/if_arp.h>
35#include <linux/skbuff.h> 35#include <linux/skbuff.h>
36#include <net/sock.h> 36#include <net/sock.h>
37#include <asm/system.h>
38#include <asm/uaccess.h> 37#include <asm/uaccess.h>
39#include <linux/fcntl.h> 38#include <linux/fcntl.h>
40#include <linux/termios.h> 39#include <linux/termios.h>
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 178ff4f73c85..28dbdb911b85 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -21,7 +21,6 @@
21#include <linux/if_ether.h> 21#include <linux/if_ether.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#include <asm/system.h>
25#include <asm/io.h> 24#include <asm/io.h>
26 25
27#include <linux/inet.h> 26#include <linux/inet.h>
@@ -38,7 +37,7 @@
38 37
39static int rose_header(struct sk_buff *skb, struct net_device *dev, 38static int rose_header(struct sk_buff *skb, struct net_device *dev,
40 unsigned short type, 39 unsigned short type,
41 const void *daddr, const void *saddr, unsigned len) 40 const void *daddr, const void *saddr, unsigned int len)
42{ 41{
43 unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2); 42 unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
44 43
@@ -96,11 +95,11 @@ static int rose_set_mac_address(struct net_device *dev, void *addr)
96 struct sockaddr *sa = addr; 95 struct sockaddr *sa = addr;
97 int err; 96 int err;
98 97
99 if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len)) 98 if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
100 return 0; 99 return 0;
101 100
102 if (dev->flags & IFF_UP) { 101 if (dev->flags & IFF_UP) {
103 err = rose_add_loopback_node((rose_address *)dev->dev_addr); 102 err = rose_add_loopback_node((rose_address *)sa->sa_data);
104 if (err) 103 if (err)
105 return err; 104 return err;
106 105
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 7f7fcb46b4fa..79c4abcfa6b4 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -26,7 +26,6 @@
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <net/sock.h> 27#include <net/sock.h>
28#include <net/tcp_states.h> 28#include <net/tcp_states.h>
29#include <asm/system.h>
30#include <linux/fcntl.h> 29#include <linux/fcntl.h>
31#include <linux/mm.h> 30#include <linux/mm.h>
32#include <linux/interrupt.h> 31#include <linux/interrupt.h>
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index 7a02bd1cc5a0..bc5514211b0c 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -22,7 +22,6 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <asm/system.h>
26#include <linux/fcntl.h> 25#include <linux/fcntl.h>
27#include <linux/mm.h> 26#include <linux/mm.h>
28#include <linux/interrupt.h> 27#include <linux/interrupt.h>
diff --git a/net/rose/rose_out.c b/net/rose/rose_out.c
index 4ebf33afbe47..9ad98b524646 100644
--- a/net/rose/rose_out.c
+++ b/net/rose/rose_out.c
@@ -21,7 +21,6 @@
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <asm/system.h>
25#include <linux/fcntl.h> 24#include <linux/fcntl.h>
26#include <linux/mm.h> 25#include <linux/mm.h>
27#include <linux/interrupt.h> 26#include <linux/interrupt.h>
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index cd9b7ee60f3e..40148932c8a4 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -25,7 +25,6 @@
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/tcp_states.h> 27#include <net/tcp_states.h>
28#include <asm/system.h>
29#include <asm/uaccess.h> 28#include <asm/uaccess.h>
30#include <linux/fcntl.h> 29#include <linux/fcntl.h>
31#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 30#include <linux/termios.h> /* For TIOCINQ/OUTQ */
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index f6c71caa94b9..7ca57741b2fb 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -22,7 +22,6 @@
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <asm/system.h>
26#include <linux/fcntl.h> 25#include <linux/fcntl.h>
27#include <linux/mm.h> 26#include <linux/mm.h>
28#include <linux/interrupt.h> 27#include <linux/interrupt.h>
@@ -400,7 +399,7 @@ int rose_parse_facilities(unsigned char *p, unsigned packet_len,
400 399
401 facilities_len = *p++; 400 facilities_len = *p++;
402 401
403 if (facilities_len == 0 || (unsigned)facilities_len > packet_len) 402 if (facilities_len == 0 || (unsigned int)facilities_len > packet_len)
404 return 0; 403 return 0;
405 404
406 while (facilities_len >= 3 && *p == 0x00) { 405 while (facilities_len >= 3 && *p == 0x00) {
diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
index b6c8f38cc26c..bc5469d6d9cb 100644
--- a/net/rose/rose_timer.c
+++ b/net/rose/rose_timer.c
@@ -23,7 +23,6 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/tcp_states.h> 25#include <net/tcp_states.h>
26#include <asm/system.h>
27#include <linux/fcntl.h> 26#include <linux/fcntl.h>
28#include <linux/mm.h> 27#include <linux/mm.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c
index df6d9dac2186..94ca9c2ccd69 100644
--- a/net/rose/sysctl_net_rose.c
+++ b/net/rose/sysctl_net_rose.c
@@ -118,18 +118,12 @@ static ctl_table rose_table[] = {
118 { } 118 { }
119}; 119};
120 120
121static struct ctl_path rose_path[] = {
122 { .procname = "net", },
123 { .procname = "rose", },
124 { }
125};
126
127void __init rose_register_sysctl(void) 121void __init rose_register_sysctl(void)
128{ 122{
129 rose_table_header = register_sysctl_paths(rose_path, rose_table); 123 rose_table_header = register_net_sysctl(&init_net, "net/rose", rose_table);
130} 124}
131 125
132void rose_unregister_sysctl(void) 126void rose_unregister_sysctl(void)
133{ 127{
134 unregister_sysctl_table(rose_table_header); 128 unregister_net_sysctl_table(rose_table_header);
135} 129}
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 74c064c0dfdd..05996d0dd828 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -26,7 +26,7 @@ MODULE_AUTHOR("Red Hat, Inc.");
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27MODULE_ALIAS_NETPROTO(PF_RXRPC); 27MODULE_ALIAS_NETPROTO(PF_RXRPC);
28 28
29unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; 29unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
30module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); 30module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
31MODULE_PARM_DESC(debug, "RxRPC debugging mask"); 31MODULE_PARM_DESC(debug, "RxRPC debugging mask");
32 32
@@ -513,7 +513,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
513 char __user *optval, unsigned int optlen) 513 char __user *optval, unsigned int optlen)
514{ 514{
515 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 515 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
516 unsigned min_sec_level; 516 unsigned int min_sec_level;
517 int ret; 517 int ret;
518 518
519 _enter(",%d,%d,,%d", level, optname, optlen); 519 _enter(",%d,%d,,%d", level, optname, optlen);
@@ -555,13 +555,13 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
555 555
556 case RXRPC_MIN_SECURITY_LEVEL: 556 case RXRPC_MIN_SECURITY_LEVEL:
557 ret = -EINVAL; 557 ret = -EINVAL;
558 if (optlen != sizeof(unsigned)) 558 if (optlen != sizeof(unsigned int))
559 goto error; 559 goto error;
560 ret = -EISCONN; 560 ret = -EISCONN;
561 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 561 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
562 goto error; 562 goto error;
563 ret = get_user(min_sec_level, 563 ret = get_user(min_sec_level,
564 (unsigned __user *) optval); 564 (unsigned int __user *) optval);
565 if (ret < 0) 565 if (ret < 0)
566 goto error; 566 goto error;
567 ret = -EINVAL; 567 ret = -EINVAL;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index c3126e864f3c..e4d9cbcff402 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -19,7 +19,7 @@
19#include <net/af_rxrpc.h> 19#include <net/af_rxrpc.h>
20#include "ar-internal.h" 20#include "ar-internal.h"
21 21
22static unsigned rxrpc_ack_defer = 1; 22static unsigned int rxrpc_ack_defer = 1;
23 23
24static const char *const rxrpc_acks[] = { 24static const char *const rxrpc_acks[] = {
25 "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", 25 "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
@@ -548,11 +548,11 @@ static void rxrpc_zap_tx_window(struct rxrpc_call *call)
548 * process the extra information that may be appended to an ACK packet 548 * process the extra information that may be appended to an ACK packet
549 */ 549 */
550static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, 550static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
551 unsigned latest, int nAcks) 551 unsigned int latest, int nAcks)
552{ 552{
553 struct rxrpc_ackinfo ackinfo; 553 struct rxrpc_ackinfo ackinfo;
554 struct rxrpc_peer *peer; 554 struct rxrpc_peer *peer;
555 unsigned mtu; 555 unsigned int mtu;
556 556
557 if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) { 557 if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
558 _leave(" [no ackinfo]"); 558 _leave(" [no ackinfo]");
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index bf656c230ba9..a3bbb360a3f9 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -38,8 +38,8 @@ const char *const rxrpc_call_states[] = {
38struct kmem_cache *rxrpc_call_jar; 38struct kmem_cache *rxrpc_call_jar;
39LIST_HEAD(rxrpc_calls); 39LIST_HEAD(rxrpc_calls);
40DEFINE_RWLOCK(rxrpc_call_lock); 40DEFINE_RWLOCK(rxrpc_call_lock);
41static unsigned rxrpc_call_max_lifetime = 60; 41static unsigned int rxrpc_call_max_lifetime = 60;
42static unsigned rxrpc_dead_call_timeout = 2; 42static unsigned int rxrpc_dead_call_timeout = 2;
43 43
44static void rxrpc_destroy_call(struct work_struct *work); 44static void rxrpc_destroy_call(struct work_struct *work);
45static void rxrpc_call_life_expired(unsigned long _call); 45static void rxrpc_call_life_expired(unsigned long _call);
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 1a2b0633fece..529572f18d1f 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -76,7 +76,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
76 * --ANK */ 76 * --ANK */
77// ret = -ENOBUFS; 77// ret = -ENOBUFS;
78// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 78// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
79// (unsigned) sk->sk_rcvbuf) 79// (unsigned int) sk->sk_rcvbuf)
80// goto out; 80// goto out;
81 81
82 ret = sk_filter(sk, skb); 82 ret = sk_filter(sk, skb);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 8e22bd345e71..a693aca2ae2e 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -83,7 +83,7 @@ struct rxrpc_skb_priv {
83 struct rxrpc_call *call; /* call with which associated */ 83 struct rxrpc_call *call; /* call with which associated */
84 unsigned long resend_at; /* time in jiffies at which to resend */ 84 unsigned long resend_at; /* time in jiffies at which to resend */
85 union { 85 union {
86 unsigned offset; /* offset into buffer of next read */ 86 unsigned int offset; /* offset into buffer of next read */
87 int remain; /* amount of space remaining for next write */ 87 int remain; /* amount of space remaining for next write */
88 u32 error; /* network error code */ 88 u32 error; /* network error code */
89 bool need_resend; /* T if needs resending */ 89 bool need_resend; /* T if needs resending */
@@ -176,9 +176,9 @@ struct rxrpc_peer {
176 struct list_head error_targets; /* targets for net error distribution */ 176 struct list_head error_targets; /* targets for net error distribution */
177 spinlock_t lock; /* access lock */ 177 spinlock_t lock; /* access lock */
178 atomic_t usage; 178 atomic_t usage;
179 unsigned if_mtu; /* interface MTU for this peer */ 179 unsigned int if_mtu; /* interface MTU for this peer */
180 unsigned mtu; /* network MTU for this peer */ 180 unsigned int mtu; /* network MTU for this peer */
181 unsigned maxdata; /* data size (MTU - hdrsize) */ 181 unsigned int maxdata; /* data size (MTU - hdrsize) */
182 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ 182 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
183 int debug_id; /* debug ID for printks */ 183 int debug_id; /* debug ID for printks */
184 int net_error; /* network error distributed */ 184 int net_error; /* network error distributed */
@@ -187,8 +187,8 @@ struct rxrpc_peer {
187 /* calculated RTT cache */ 187 /* calculated RTT cache */
188#define RXRPC_RTT_CACHE_SIZE 32 188#define RXRPC_RTT_CACHE_SIZE 32
189 suseconds_t rtt; /* current RTT estimate (in uS) */ 189 suseconds_t rtt; /* current RTT estimate (in uS) */
190 unsigned rtt_point; /* next entry at which to insert */ 190 unsigned int rtt_point; /* next entry at which to insert */
191 unsigned rtt_usage; /* amount of cache actually used */ 191 unsigned int rtt_usage; /* amount of cache actually used */
192 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ 192 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
193}; 193};
194 194
@@ -271,7 +271,7 @@ struct rxrpc_connection {
271 } state; 271 } state;
272 int error; /* error code for local abort */ 272 int error; /* error code for local abort */
273 int debug_id; /* debug ID for printks */ 273 int debug_id; /* debug ID for printks */
274 unsigned call_counter; /* call ID counter */ 274 unsigned int call_counter; /* call ID counter */
275 atomic_t serial; /* packet serial number counter */ 275 atomic_t serial; /* packet serial number counter */
276 atomic_t hi_serial; /* highest serial number received */ 276 atomic_t hi_serial; /* highest serial number received */
277 u8 avail_calls; /* number of calls available */ 277 u8 avail_calls; /* number of calls available */
@@ -592,7 +592,7 @@ extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
592/* 592/*
593 * debug tracing 593 * debug tracing
594 */ 594 */
595extern unsigned rxrpc_debug; 595extern unsigned int rxrpc_debug;
596 596
597#define dbgprintk(FMT,...) \ 597#define dbgprintk(FMT,...) \
598 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) 598 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index ae3a035f5390..8b1f9f49960f 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -82,7 +82,7 @@ static int rxrpc_vet_description_s(const char *desc)
82 * - the caller guarantees we have at least 4 words 82 * - the caller guarantees we have at least 4 words
83 */ 83 */
84static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, 84static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
85 unsigned toklen) 85 unsigned int toklen)
86{ 86{
87 struct rxrpc_key_token *token, **pptoken; 87 struct rxrpc_key_token *token, **pptoken;
88 size_t plen; 88 size_t plen;
@@ -210,10 +210,10 @@ static void rxrpc_rxk5_free(struct rxk5_key *rxk5)
210 */ 210 */
211static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, 211static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
212 const __be32 **_xdr, 212 const __be32 **_xdr,
213 unsigned *_toklen) 213 unsigned int *_toklen)
214{ 214{
215 const __be32 *xdr = *_xdr; 215 const __be32 *xdr = *_xdr;
216 unsigned toklen = *_toklen, n_parts, loop, tmp; 216 unsigned int toklen = *_toklen, n_parts, loop, tmp;
217 217
218 /* there must be at least one name, and at least #names+1 length 218 /* there must be at least one name, and at least #names+1 length
219 * words */ 219 * words */
@@ -286,10 +286,10 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
286static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, 286static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
287 size_t max_data_size, 287 size_t max_data_size,
288 const __be32 **_xdr, 288 const __be32 **_xdr,
289 unsigned *_toklen) 289 unsigned int *_toklen)
290{ 290{
291 const __be32 *xdr = *_xdr; 291 const __be32 *xdr = *_xdr;
292 unsigned toklen = *_toklen, len; 292 unsigned int toklen = *_toklen, len;
293 293
294 /* there must be at least one tag and one length word */ 294 /* there must be at least one tag and one length word */
295 if (toklen <= 8) 295 if (toklen <= 8)
@@ -330,11 +330,11 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
330 u8 max_n_elem, 330 u8 max_n_elem,
331 size_t max_elem_size, 331 size_t max_elem_size,
332 const __be32 **_xdr, 332 const __be32 **_xdr,
333 unsigned *_toklen) 333 unsigned int *_toklen)
334{ 334{
335 struct krb5_tagged_data *td; 335 struct krb5_tagged_data *td;
336 const __be32 *xdr = *_xdr; 336 const __be32 *xdr = *_xdr;
337 unsigned toklen = *_toklen, n_elem, loop; 337 unsigned int toklen = *_toklen, n_elem, loop;
338 int ret; 338 int ret;
339 339
340 /* there must be at least one count */ 340 /* there must be at least one count */
@@ -380,10 +380,10 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
380 * extract a krb5 ticket 380 * extract a krb5 ticket
381 */ 381 */
382static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, 382static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
383 const __be32 **_xdr, unsigned *_toklen) 383 const __be32 **_xdr, unsigned int *_toklen)
384{ 384{
385 const __be32 *xdr = *_xdr; 385 const __be32 *xdr = *_xdr;
386 unsigned toklen = *_toklen, len; 386 unsigned int toklen = *_toklen, len;
387 387
388 /* there must be at least one length word */ 388 /* there must be at least one length word */
389 if (toklen <= 4) 389 if (toklen <= 4)
@@ -419,7 +419,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
419 * - the caller guarantees we have at least 4 words 419 * - the caller guarantees we have at least 4 words
420 */ 420 */
421static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr, 421static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr,
422 unsigned toklen) 422 unsigned int toklen)
423{ 423{
424 struct rxrpc_key_token *token, **pptoken; 424 struct rxrpc_key_token *token, **pptoken;
425 struct rxk5_key *rxk5; 425 struct rxk5_key *rxk5;
@@ -549,7 +549,7 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
549{ 549{
550 const __be32 *xdr = data, *token; 550 const __be32 *xdr = data, *token;
551 const char *cp; 551 const char *cp;
552 unsigned len, tmp, loop, ntoken, toklen, sec_ix; 552 unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
553 int ret; 553 int ret;
554 554
555 _enter(",{%x,%x,%x,%x},%zu", 555 _enter(",{%x,%x,%x,%x},%zu",
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 7635107726ce..f226709ebd8f 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -31,7 +31,7 @@
31#define REALM_SZ 40 /* size of principal's auth domain */ 31#define REALM_SZ 40 /* size of principal's auth domain */
32#define SNAME_SZ 40 /* size of service name */ 32#define SNAME_SZ 40 /* size of service name */
33 33
34unsigned rxrpc_debug; 34unsigned int rxrpc_debug;
35module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); 35module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
36MODULE_PARM_DESC(debug, "rxkad debugging mask"); 36MODULE_PARM_DESC(debug, "rxkad debugging mask");
37 37
@@ -207,7 +207,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
207 struct rxrpc_crypt iv; 207 struct rxrpc_crypt iv;
208 struct scatterlist sg[16]; 208 struct scatterlist sg[16];
209 struct sk_buff *trailer; 209 struct sk_buff *trailer;
210 unsigned len; 210 unsigned int len;
211 u16 check; 211 u16 check;
212 int nsg; 212 int nsg;
213 213
@@ -826,7 +826,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
826 struct rxrpc_crypt iv, key; 826 struct rxrpc_crypt iv, key;
827 struct scatterlist sg[1]; 827 struct scatterlist sg[1];
828 struct in_addr addr; 828 struct in_addr addr;
829 unsigned life; 829 unsigned int life;
830 time_t issue, now; 830 time_t issue, now;
831 bool little_endian; 831 bool little_endian;
832 int ret; 832 int ret;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2590e91b3289..e7a8976bf25c 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,28 @@ config NET_SCH_QFQ
250 250
251 If unsure, say N. 251 If unsure, say N.
252 252
253config NET_SCH_CODEL
254 tristate "Controlled Delay AQM (CODEL)"
255 help
256 Say Y here if you want to use the Controlled Delay (CODEL)
257 packet scheduling algorithm.
258
259 To compile this driver as a module, choose M here: the module
260 will be called sch_codel.
261
262 If unsure, say N.
263
264config NET_SCH_FQ_CODEL
265 tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)"
266 help
267 Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL)
268 packet scheduling algorithm.
269
270 To compile this driver as a module, choose M here: the module
271 will be called sch_fq_codel.
272
273 If unsure, say N.
274
253config NET_SCH_INGRESS 275config NET_SCH_INGRESS
254 tristate "Ingress Qdisc" 276 tristate "Ingress Qdisc"
255 depends on NET_CLS_ACT 277 depends on NET_CLS_ACT
@@ -260,6 +282,32 @@ config NET_SCH_INGRESS
260 To compile this code as a module, choose M here: the 282 To compile this code as a module, choose M here: the
261 module will be called sch_ingress. 283 module will be called sch_ingress.
262 284
285config NET_SCH_PLUG
286 tristate "Plug network traffic until release (PLUG)"
287 ---help---
288
289 This queuing discipline allows userspace to plug/unplug a network
290 output queue, using the netlink interface. When it receives an
291 enqueue command it inserts a plug into the outbound queue that
292 causes following packets to enqueue until a dequeue command arrives
293 over netlink, causing the plug to be removed and resuming the normal
294 packet flow.
295
296 This module also provides a generic "network output buffering"
297 functionality (aka output commit), wherein upon arrival of a dequeue
298 command, only packets up to the first plug are released for delivery.
299 The Remus HA project uses this module to enable speculative execution
300 of virtual machines by allowing the generated network output to be rolled
301 back if needed.
302
303 For more information, please refer to http://wiki.xensource.com/xenwiki/Remus
304
305 Say Y here if you are using this kernel for Xen dom0 and
306 want to protect Xen guests with Remus.
307
308 To compile this code as a module, choose M here: the
309 module will be called sch_plug.
310
263comment "Classification" 311comment "Classification"
264 312
265config NET_CLS 313config NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index dc5889c0a15a..5940a1992f0d 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -33,9 +33,12 @@ obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
33obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o 33obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
34obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o 34obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o 35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
36obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o 37obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o 38obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
38obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o 39obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
40obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
41obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
39 42
40obj-$(CONFIG_NET_CLS_U32) += cls_u32.o 43obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
41obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o 44obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 93fdf131bd75..5cfb160df063 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -127,7 +127,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
127 nest = nla_nest_start(skb, a->order); 127 nest = nla_nest_start(skb, a->order);
128 if (nest == NULL) 128 if (nest == NULL)
129 goto nla_put_failure; 129 goto nla_put_failure;
130 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 130 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
131 goto nla_put_failure;
131 for (i = 0; i < (hinfo->hmask + 1); i++) { 132 for (i = 0; i < (hinfo->hmask + 1); i++) {
132 p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 133 p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
133 134
@@ -139,7 +140,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
139 p = s_p; 140 p = s_p;
140 } 141 }
141 } 142 }
142 NLA_PUT_U32(skb, TCA_FCNT, n_i); 143 if (nla_put_u32(skb, TCA_FCNT, n_i))
144 goto nla_put_failure;
143 nla_nest_end(skb, nest); 145 nla_nest_end(skb, nest);
144 146
145 return n_i; 147 return n_i;
@@ -437,7 +439,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
437 if (a->ops == NULL || a->ops->dump == NULL) 439 if (a->ops == NULL || a->ops->dump == NULL)
438 return err; 440 return err;
439 441
440 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 442 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
443 goto nla_put_failure;
441 if (tcf_action_copy_stats(skb, a, 0)) 444 if (tcf_action_copy_stats(skb, a, 0))
442 goto nla_put_failure; 445 goto nla_put_failure;
443 nest = nla_nest_start(skb, TCA_OPTIONS); 446 nest = nla_nest_start(skb, TCA_OPTIONS);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 453a73431ac4..2c8ad7c86e43 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -397,7 +397,7 @@ static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
397 397
398 while (len > 1) { 398 while (len > 1) {
399 switch (xh[off]) { 399 switch (xh[off]) {
400 case IPV6_TLV_PAD0: 400 case IPV6_TLV_PAD1:
401 optlen = 1; 401 optlen = 1;
402 break; 402 break;
403 case IPV6_TLV_JUMBO: 403 case IPV6_TLV_JUMBO:
@@ -550,11 +550,13 @@ static int tcf_csum_dump(struct sk_buff *skb,
550 }; 550 };
551 struct tcf_t t; 551 struct tcf_t t;
552 552
553 NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt); 553 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
554 goto nla_put_failure;
554 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 555 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
555 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 556 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
556 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 557 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
557 NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t); 558 if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
559 goto nla_put_failure;
558 560
559 return skb->len; 561 return skb->len;
560 562
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b77f5a06a658..f10fb8256442 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -162,7 +162,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
162 }; 162 };
163 struct tcf_t t; 163 struct tcf_t t;
164 164
165 NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); 165 if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
166 goto nla_put_failure;
166#ifdef CONFIG_GACT_PROB 167#ifdef CONFIG_GACT_PROB
167 if (gact->tcfg_ptype) { 168 if (gact->tcfg_ptype) {
168 struct tc_gact_p p_opt = { 169 struct tc_gact_p p_opt = {
@@ -171,13 +172,15 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
171 .ptype = gact->tcfg_ptype, 172 .ptype = gact->tcfg_ptype,
172 }; 173 };
173 174
174 NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); 175 if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
176 goto nla_put_failure;
175 } 177 }
176#endif 178#endif
177 t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install); 179 t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
178 t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse); 180 t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
179 t.expires = jiffies_to_clock_t(gact->tcf_tm.expires); 181 t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
180 NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t); 182 if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
183 goto nla_put_failure;
181 return skb->len; 184 return skb->len;
182 185
183nla_put_failure: 186nla_put_failure:
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60f8f616e8fa..60e281ad0f07 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * net/sched/ipt.c iptables target interface 2 * net/sched/ipt.c iptables target interface
3 * 3 *
4 *TODO: Add other tables. For now we only support the ipv4 table targets 4 *TODO: Add other tables. For now we only support the ipv4 table targets
5 * 5 *
@@ -235,9 +235,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
235 result = TC_ACT_PIPE; 235 result = TC_ACT_PIPE;
236 break; 236 break;
237 default: 237 default:
238 if (net_ratelimit()) 238 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
239 pr_notice("tc filter: Bogus netfilter code" 239 ret);
240 " %d assume ACCEPT\n", ret);
241 result = TC_POLICE_OK; 240 result = TC_POLICE_OK;
242 break; 241 break;
243 } 242 }
@@ -267,15 +266,17 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
267 c.refcnt = ipt->tcf_refcnt - ref; 266 c.refcnt = ipt->tcf_refcnt - ref;
268 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); 267 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
269 268
270 NLA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); 269 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
271 NLA_PUT_U32(skb, TCA_IPT_INDEX, ipt->tcf_index); 270 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
272 NLA_PUT_U32(skb, TCA_IPT_HOOK, ipt->tcfi_hook); 271 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
273 NLA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c); 272 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
274 NLA_PUT_STRING(skb, TCA_IPT_TABLE, ipt->tcfi_tname); 273 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
274 goto nla_put_failure;
275 tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); 275 tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
276 tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); 276 tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
277 tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); 277 tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
278 NLA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm); 278 if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
279 goto nla_put_failure;
279 kfree(t); 280 kfree(t);
280 return skb->len; 281 return skb->len;
281 282
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e051398fdf6b..fe81cc18e9e0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -174,9 +174,8 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
174 } 174 }
175 175
176 if (!(dev->flags & IFF_UP)) { 176 if (!(dev->flags & IFF_UP)) {
177 if (net_ratelimit()) 177 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
178 pr_notice("tc mirred to Houston: device %s is down\n", 178 dev->name);
179 dev->name);
180 goto out; 179 goto out;
181 } 180 }
182 181
@@ -227,11 +226,13 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
227 }; 226 };
228 struct tcf_t t; 227 struct tcf_t t;
229 228
230 NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); 229 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
230 goto nla_put_failure;
231 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); 231 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
232 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); 232 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
233 t.expires = jiffies_to_clock_t(m->tcf_tm.expires); 233 t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
234 NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); 234 if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
235 goto nla_put_failure;
235 return skb->len; 236 return skb->len;
236 237
237nla_put_failure: 238nla_put_failure:
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 001d1b354869..b5d029eb44f2 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -284,11 +284,13 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
284 }; 284 };
285 struct tcf_t t; 285 struct tcf_t t;
286 286
287 NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); 287 if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
288 goto nla_put_failure;
288 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 289 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
289 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 290 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
290 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 291 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
291 NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); 292 if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
293 goto nla_put_failure;
292 294
293 return skb->len; 295 return skb->len;
294 296
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 10d3aed86560..26aa2f6ce257 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -215,11 +215,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
215 opt->refcnt = p->tcf_refcnt - ref; 215 opt->refcnt = p->tcf_refcnt - ref;
216 opt->bindcnt = p->tcf_bindcnt - bind; 216 opt->bindcnt = p->tcf_bindcnt - bind;
217 217
218 NLA_PUT(skb, TCA_PEDIT_PARMS, s, opt); 218 if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
219 goto nla_put_failure;
219 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 220 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
220 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 221 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
221 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 222 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
222 NLA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); 223 if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t))
224 goto nla_put_failure;
223 kfree(opt); 225 kfree(opt);
224 return skb->len; 226 return skb->len;
225 227
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 6fb3f5af0f85..a9de23297d47 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -356,11 +356,14 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
356 opt.rate = police->tcfp_R_tab->rate; 356 opt.rate = police->tcfp_R_tab->rate;
357 if (police->tcfp_P_tab) 357 if (police->tcfp_P_tab)
358 opt.peakrate = police->tcfp_P_tab->rate; 358 opt.peakrate = police->tcfp_P_tab->rate;
359 NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 359 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
360 if (police->tcfp_result) 360 goto nla_put_failure;
361 NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); 361 if (police->tcfp_result &&
362 if (police->tcfp_ewma_rate) 362 nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
363 NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate); 363 goto nla_put_failure;
364 if (police->tcfp_ewma_rate &&
365 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
366 goto nla_put_failure;
364 return skb->len; 367 return skb->len;
365 368
366nla_put_failure: 369nla_put_failure:
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 73e0a3ab4d55..3922f2a2821b 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -172,12 +172,14 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
172 }; 172 };
173 struct tcf_t t; 173 struct tcf_t t;
174 174
175 NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); 175 if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
176 NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); 176 nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
177 goto nla_put_failure;
177 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 178 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
178 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 179 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
179 t.expires = jiffies_to_clock_t(d->tcf_tm.expires); 180 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
180 NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t); 181 if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t))
182 goto nla_put_failure;
181 return skb->len; 183 return skb->len;
182 184
183nla_put_failure: 185nla_put_failure:
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 35dbbe91027e..476e0fac6712 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -166,20 +166,25 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
166 }; 166 };
167 struct tcf_t t; 167 struct tcf_t t;
168 168
169 NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); 169 if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
170 if (d->flags & SKBEDIT_F_PRIORITY) 170 goto nla_put_failure;
171 NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), 171 if ((d->flags & SKBEDIT_F_PRIORITY) &&
172 &d->priority); 172 nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
173 if (d->flags & SKBEDIT_F_QUEUE_MAPPING) 173 &d->priority))
174 NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, 174 goto nla_put_failure;
175 sizeof(d->queue_mapping), &d->queue_mapping); 175 if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
176 if (d->flags & SKBEDIT_F_MARK) 176 nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING,
177 NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), 177 sizeof(d->queue_mapping), &d->queue_mapping))
178 &d->mark); 178 goto nla_put_failure;
179 if ((d->flags & SKBEDIT_F_MARK) &&
180 nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
181 &d->mark))
182 goto nla_put_failure;
179 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 183 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
180 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 184 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
181 t.expires = jiffies_to_clock_t(d->tcf_tm.expires); 185 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
182 NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); 186 if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
187 goto nla_put_failure;
183 return skb->len; 188 return skb->len;
184 189
185nla_put_failure: 190nla_put_failure:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a69d44f1dac5..f452f696b4b3 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -357,7 +357,8 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
357 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; 357 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
358 tcm->tcm_parent = tp->classid; 358 tcm->tcm_parent = tp->classid;
359 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 359 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
360 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); 360 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
361 goto nla_put_failure;
361 tcm->tcm_handle = fh; 362 tcm->tcm_handle = fh;
362 if (RTM_DELTFILTER != event) { 363 if (RTM_DELTFILTER != event) {
363 tcm->tcm_handle = 0; 364 tcm->tcm_handle = 0;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index ea1f70b5a5f4..590960a22a77 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -257,8 +257,9 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
257 if (nest == NULL) 257 if (nest == NULL)
258 goto nla_put_failure; 258 goto nla_put_failure;
259 259
260 if (f->res.classid) 260 if (f->res.classid &&
261 NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid); 261 nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
262 goto nla_put_failure;
262 263
263 if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 || 264 if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 ||
264 tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) 265 tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index f84fdc3a7f27..7743ea8d1d38 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -22,23 +22,6 @@
22#include <net/sock.h> 22#include <net/sock.h>
23#include <net/cls_cgroup.h> 23#include <net/cls_cgroup.h>
24 24
25static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
26 struct cgroup *cgrp);
27static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
28static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
29
30struct cgroup_subsys net_cls_subsys = {
31 .name = "net_cls",
32 .create = cgrp_create,
33 .destroy = cgrp_destroy,
34 .populate = cgrp_populate,
35#ifdef CONFIG_NET_CLS_CGROUP
36 .subsys_id = net_cls_subsys_id,
37#endif
38 .module = THIS_MODULE,
39};
40
41
42static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) 25static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
43{ 26{
44 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), 27 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
@@ -51,8 +34,7 @@ static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
51 struct cgroup_cls_state, css); 34 struct cgroup_cls_state, css);
52} 35}
53 36
54static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 37static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
55 struct cgroup *cgrp)
56{ 38{
57 struct cgroup_cls_state *cs; 39 struct cgroup_cls_state *cs;
58 40
@@ -66,7 +48,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
66 return &cs->css; 48 return &cs->css;
67} 49}
68 50
69static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) 51static void cgrp_destroy(struct cgroup *cgrp)
70{ 52{
71 kfree(cgrp_cls_state(cgrp)); 53 kfree(cgrp_cls_state(cgrp));
72} 54}
@@ -88,12 +70,19 @@ static struct cftype ss_files[] = {
88 .read_u64 = read_classid, 70 .read_u64 = read_classid,
89 .write_u64 = write_classid, 71 .write_u64 = write_classid,
90 }, 72 },
73 { } /* terminate */
91}; 74};
92 75
93static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) 76struct cgroup_subsys net_cls_subsys = {
94{ 77 .name = "net_cls",
95 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 78 .create = cgrp_create,
96} 79 .destroy = cgrp_destroy,
80#ifdef CONFIG_NET_CLS_CGROUP
81 .subsys_id = net_cls_subsys_id,
82#endif
83 .base_cftypes = ss_files,
84 .module = THIS_MODULE,
85};
97 86
98struct cls_cgroup_head { 87struct cls_cgroup_head {
99 u32 handle; 88 u32 handle;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 1d8bd0dbcd1f..ccd08c8dc6a7 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -572,25 +572,32 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
572 if (nest == NULL) 572 if (nest == NULL)
573 goto nla_put_failure; 573 goto nla_put_failure;
574 574
575 NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask); 575 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
576 NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode); 576 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
577 goto nla_put_failure;
577 578
578 if (f->mask != ~0 || f->xor != 0) { 579 if (f->mask != ~0 || f->xor != 0) {
579 NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask); 580 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
580 NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor); 581 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
582 goto nla_put_failure;
581 } 583 }
582 if (f->rshift) 584 if (f->rshift &&
583 NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift); 585 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
584 if (f->addend) 586 goto nla_put_failure;
585 NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend); 587 if (f->addend &&
588 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
589 goto nla_put_failure;
586 590
587 if (f->divisor) 591 if (f->divisor &&
588 NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor); 592 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
589 if (f->baseclass) 593 goto nla_put_failure;
590 NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); 594 if (f->baseclass &&
595 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
596 goto nla_put_failure;
591 597
592 if (f->perturb_period) 598 if (f->perturb_period &&
593 NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ); 599 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
600 goto nla_put_failure;
594 601
595 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) 602 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
596 goto nla_put_failure; 603 goto nla_put_failure;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 389af152ec45..8384a4797240 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -346,14 +346,17 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
346 if (nest == NULL) 346 if (nest == NULL)
347 goto nla_put_failure; 347 goto nla_put_failure;
348 348
349 if (f->res.classid) 349 if (f->res.classid &&
350 NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid); 350 nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
351 goto nla_put_failure;
351#ifdef CONFIG_NET_CLS_IND 352#ifdef CONFIG_NET_CLS_IND
352 if (strlen(f->indev)) 353 if (strlen(f->indev) &&
353 NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev); 354 nla_put_string(skb, TCA_FW_INDEV, f->indev))
355 goto nla_put_failure;
354#endif /* CONFIG_NET_CLS_IND */ 356#endif /* CONFIG_NET_CLS_IND */
355 if (head->mask != 0xFFFFFFFF) 357 if (head->mask != 0xFFFFFFFF &&
356 NLA_PUT_U32(skb, TCA_FW_MASK, head->mask); 358 nla_put_u32(skb, TCA_FW_MASK, head->mask))
359 goto nla_put_failure;
357 360
358 if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0) 361 if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
359 goto nla_put_failure; 362 goto nla_put_failure;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 13ab66e9df58..36fec4227401 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -571,17 +571,21 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
571 571
572 if (!(f->handle & 0x8000)) { 572 if (!(f->handle & 0x8000)) {
573 id = f->id & 0xFF; 573 id = f->id & 0xFF;
574 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); 574 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
575 goto nla_put_failure;
575 } 576 }
576 if (f->handle & 0x80000000) { 577 if (f->handle & 0x80000000) {
577 if ((f->handle >> 16) != 0xFFFF) 578 if ((f->handle >> 16) != 0xFFFF &&
578 NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); 579 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
580 goto nla_put_failure;
579 } else { 581 } else {
580 id = f->id >> 16; 582 id = f->id >> 16;
581 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); 583 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
584 goto nla_put_failure;
582 } 585 }
583 if (f->res.classid) 586 if (f->res.classid &&
584 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid); 587 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
588 goto nla_put_failure;
585 589
586 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0) 590 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
587 goto nla_put_failure; 591 goto nla_put_failure;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b01427924f81..18ab93ec8d7e 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -615,18 +615,22 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
615 if (nest == NULL) 615 if (nest == NULL)
616 goto nla_put_failure; 616 goto nla_put_failure;
617 617
618 NLA_PUT(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst); 618 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
619 goto nla_put_failure;
619 pinfo.dpi = s->dpi; 620 pinfo.dpi = s->dpi;
620 pinfo.spi = f->spi; 621 pinfo.spi = f->spi;
621 pinfo.protocol = s->protocol; 622 pinfo.protocol = s->protocol;
622 pinfo.tunnelid = s->tunnelid; 623 pinfo.tunnelid = s->tunnelid;
623 pinfo.tunnelhdr = f->tunnelhdr; 624 pinfo.tunnelhdr = f->tunnelhdr;
624 pinfo.pad = 0; 625 pinfo.pad = 0;
625 NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); 626 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
626 if (f->res.classid) 627 goto nla_put_failure;
627 NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); 628 if (f->res.classid &&
628 if (((f->handle >> 8) & 0xFF) != 16) 629 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
629 NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); 630 goto nla_put_failure;
631 if (((f->handle >> 8) & 0xFF) != 16 &&
632 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
633 goto nla_put_failure;
630 634
631 if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) 635 if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
632 goto nla_put_failure; 636 goto nla_put_failure;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index dbe199234c63..fe29420d0b0e 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -438,10 +438,11 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
438 438
439 if (!fh) { 439 if (!fh) {
440 t->tcm_handle = ~0; /* whatever ... */ 440 t->tcm_handle = ~0; /* whatever ... */
441 NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash); 441 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
442 NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask); 442 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
443 NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift); 443 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
444 NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through); 444 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
445 goto nla_put_failure;
445 nla_nest_end(skb, nest); 446 nla_nest_end(skb, nest);
446 } else { 447 } else {
447 if (p->perfect) { 448 if (p->perfect) {
@@ -460,8 +461,9 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
460 } 461 }
461 } 462 }
462 pr_debug("handle = %d\n", t->tcm_handle); 463 pr_debug("handle = %d\n", t->tcm_handle);
463 if (r->res.class) 464 if (r->res.class &&
464 NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid); 465 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
466 goto nla_put_failure;
465 467
466 if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0) 468 if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
467 goto nla_put_failure; 469 goto nla_put_failure;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 939b627b4795..d45373fb00b9 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -234,8 +234,7 @@ out:
234 return -1; 234 return -1;
235 235
236deadloop: 236deadloop:
237 if (net_ratelimit()) 237 net_warn_ratelimited("cls_u32: dead loop\n");
238 pr_warning("cls_u32: dead loop\n");
239 return -1; 238 return -1;
240} 239}
241 240
@@ -733,36 +732,44 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
733 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; 732 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
734 u32 divisor = ht->divisor + 1; 733 u32 divisor = ht->divisor + 1;
735 734
736 NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); 735 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
736 goto nla_put_failure;
737 } else { 737 } else {
738 NLA_PUT(skb, TCA_U32_SEL, 738 if (nla_put(skb, TCA_U32_SEL,
739 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), 739 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
740 &n->sel); 740 &n->sel))
741 goto nla_put_failure;
741 if (n->ht_up) { 742 if (n->ht_up) {
742 u32 htid = n->handle & 0xFFFFF000; 743 u32 htid = n->handle & 0xFFFFF000;
743 NLA_PUT_U32(skb, TCA_U32_HASH, htid); 744 if (nla_put_u32(skb, TCA_U32_HASH, htid))
745 goto nla_put_failure;
744 } 746 }
745 if (n->res.classid) 747 if (n->res.classid &&
746 NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid); 748 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
747 if (n->ht_down) 749 goto nla_put_failure;
748 NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle); 750 if (n->ht_down &&
751 nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
752 goto nla_put_failure;
749 753
750#ifdef CONFIG_CLS_U32_MARK 754#ifdef CONFIG_CLS_U32_MARK
751 if (n->mark.val || n->mark.mask) 755 if ((n->mark.val || n->mark.mask) &&
752 NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark); 756 nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
757 goto nla_put_failure;
753#endif 758#endif
754 759
755 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0) 760 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
756 goto nla_put_failure; 761 goto nla_put_failure;
757 762
758#ifdef CONFIG_NET_CLS_IND 763#ifdef CONFIG_NET_CLS_IND
759 if (strlen(n->indev)) 764 if (strlen(n->indev) &&
760 NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); 765 nla_put_string(skb, TCA_U32_INDEV, n->indev))
766 goto nla_put_failure;
761#endif 767#endif
762#ifdef CONFIG_CLS_U32_PERF 768#ifdef CONFIG_CLS_U32_PERF
763 NLA_PUT(skb, TCA_U32_PCNT, 769 if (nla_put(skb, TCA_U32_PCNT,
764 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), 770 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
765 n->pf); 771 n->pf))
772 goto nla_put_failure;
766#endif 773#endif
767 } 774 }
768 775
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 1363bf14e61b..4790c696cbce 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -585,8 +585,9 @@ static void meta_var_apply_extras(struct meta_value *v,
585 585
586static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv) 586static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
587{ 587{
588 if (v->val && v->len) 588 if (v->val && v->len &&
589 NLA_PUT(skb, tlv, v->len, (void *) v->val); 589 nla_put(skb, tlv, v->len, (void *) v->val))
590 goto nla_put_failure;
590 return 0; 591 return 0;
591 592
592nla_put_failure: 593nla_put_failure:
@@ -636,10 +637,13 @@ static void meta_int_apply_extras(struct meta_value *v,
636 637
637static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) 638static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
638{ 639{
639 if (v->len == sizeof(unsigned long)) 640 if (v->len == sizeof(unsigned long)) {
640 NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); 641 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
641 else if (v->len == sizeof(u32)) 642 goto nla_put_failure;
642 NLA_PUT_U32(skb, tlv, v->val); 643 } else if (v->len == sizeof(u32)) {
644 if (nla_put_u32(skb, tlv, v->val))
645 goto nla_put_failure;
646 }
643 647
644 return 0; 648 return 0;
645 649
@@ -831,7 +835,8 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
831 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left)); 835 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
832 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right)); 836 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
833 837
834 NLA_PUT(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr); 838 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
839 goto nla_put_failure;
835 840
836 ops = meta_type_ops(&meta->lvalue); 841 ops = meta_type_ops(&meta->lvalue);
837 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 || 842 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 88d93eb92507..3a633debb6df 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -441,7 +441,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
441 if (top_start == NULL) 441 if (top_start == NULL)
442 goto nla_put_failure; 442 goto nla_put_failure;
443 443
444 NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); 444 if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
445 goto nla_put_failure;
445 446
446 list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST); 447 list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST);
447 if (list_start == NULL) 448 if (list_start == NULL)
@@ -457,7 +458,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
457 .flags = em->flags 458 .flags = em->flags
458 }; 459 };
459 460
460 NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); 461 if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
462 goto nla_put_failure;
461 463
462 if (em->ops && em->ops->dump) { 464 if (em->ops && em->ops->dump) {
463 if (em->ops->dump(skb, em) < 0) 465 if (em->ops->dump(skb, em) < 0)
@@ -535,9 +537,7 @@ pop_stack:
535 return res; 537 return res;
536 538
537stack_overflow: 539stack_overflow:
538 if (net_ratelimit()) 540 net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n");
539 pr_warning("tc ematch: local stack overflow,"
540 " increase NET_EMATCH_STACK\n");
541 return -1; 541 return -1;
542} 542}
543EXPORT_SYMBOL(__tcf_em_tree_match); 543EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 3d8981fde301..085ce53d570a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -426,7 +426,8 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
426 nest = nla_nest_start(skb, TCA_STAB); 426 nest = nla_nest_start(skb, TCA_STAB);
427 if (nest == NULL) 427 if (nest == NULL)
428 goto nla_put_failure; 428 goto nla_put_failure;
429 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); 429 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
430 goto nla_put_failure;
430 nla_nest_end(skb, nest); 431 nla_nest_end(skb, nest);
431 432
432 return skb->len; 433 return skb->len;
@@ -1201,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1201 tcm->tcm_parent = clid; 1202 tcm->tcm_parent = clid;
1202 tcm->tcm_handle = q->handle; 1203 tcm->tcm_handle = q->handle;
1203 tcm->tcm_info = atomic_read(&q->refcnt); 1204 tcm->tcm_info = atomic_read(&q->refcnt);
1204 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); 1205 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1206 goto nla_put_failure;
1205 if (q->ops->dump && q->ops->dump(q, skb) < 0) 1207 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1206 goto nla_put_failure; 1208 goto nla_put_failure;
1207 q->qstats.qlen = q->q.qlen; 1209 q->qstats.qlen = q->q.qlen;
@@ -1505,7 +1507,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1505 tcm->tcm_parent = q->handle; 1507 tcm->tcm_parent = q->handle;
1506 tcm->tcm_handle = q->handle; 1508 tcm->tcm_handle = q->handle;
1507 tcm->tcm_info = 0; 1509 tcm->tcm_info = 0;
1508 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); 1510 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1511 goto nla_put_failure;
1509 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1512 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1510 goto nla_put_failure; 1513 goto nla_put_failure;
1511 1514
@@ -1688,12 +1691,10 @@ reclassify:
1688 tp = otp; 1691 tp = otp;
1689 1692
1690 if (verd++ >= MAX_REC_LOOP) { 1693 if (verd++ >= MAX_REC_LOOP) {
1691 if (net_ratelimit()) 1694 net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1692 pr_notice("%s: packet reclassify loop" 1695 tp->q->ops->id,
1693 " rule prio %u protocol %02x\n", 1696 tp->prio & 0xffff,
1694 tp->q->ops->id, 1697 ntohs(tp->protocol));
1695 tp->prio & 0xffff,
1696 ntohs(tp->protocol));
1697 return TC_ACT_SHOT; 1698 return TC_ACT_SHOT;
1698 } 1699 }
1699 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); 1700 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e25e49061a0d..ca8e0a57d945 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -16,8 +16,6 @@
16#include <net/netlink.h> 16#include <net/netlink.h>
17#include <net/pkt_sched.h> 17#include <net/pkt_sched.h>
18 18
19extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
20
21/* 19/*
22 * The ATM queuing discipline provides a framework for invoking classifiers 20 * The ATM queuing discipline provides a framework for invoking classifiers
23 * (aka "filters"), which in turn select classes of this queuing discipline. 21 * (aka "filters"), which in turn select classes of this queuing discipline.
@@ -423,8 +421,6 @@ drop: __maybe_unused
423 } 421 }
424 return ret; 422 return ret;
425 } 423 }
426 qdisc_bstats_update(sch, skb);
427 bstats_update(&flow->bstats, skb);
428 /* 424 /*
429 * Okay, this may seem weird. We pretend we've dropped the packet if 425 * Okay, this may seem weird. We pretend we've dropped the packet if
430 * it goes via ATM. The reason for this is that the outer qdisc 426 * it goes via ATM. The reason for this is that the outer qdisc
@@ -472,6 +468,8 @@ static void sch_atm_dequeue(unsigned long data)
472 if (unlikely(!skb)) 468 if (unlikely(!skb))
473 break; 469 break;
474 470
471 qdisc_bstats_update(sch, skb);
472 bstats_update(&flow->bstats, skb);
475 pr_debug("atm_tc_dequeue: sending on class %p\n", flow); 473 pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
476 /* remove any LL header somebody else has attached */ 474 /* remove any LL header somebody else has attached */
477 skb_pull(skb, skb_network_offset(skb)); 475 skb_pull(skb, skb_network_offset(skb));
@@ -601,7 +599,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
601 if (nest == NULL) 599 if (nest == NULL)
602 goto nla_put_failure; 600 goto nla_put_failure;
603 601
604 NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); 602 if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
603 goto nla_put_failure;
605 if (flow->vcc) { 604 if (flow->vcc) {
606 struct sockaddr_atmpvc pvc; 605 struct sockaddr_atmpvc pvc;
607 int state; 606 int state;
@@ -610,15 +609,19 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
610 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; 609 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
611 pvc.sap_addr.vpi = flow->vcc->vpi; 610 pvc.sap_addr.vpi = flow->vcc->vpi;
612 pvc.sap_addr.vci = flow->vcc->vci; 611 pvc.sap_addr.vci = flow->vcc->vci;
613 NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); 612 if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
613 goto nla_put_failure;
614 state = ATM_VF2VS(flow->vcc->flags); 614 state = ATM_VF2VS(flow->vcc->flags);
615 NLA_PUT_U32(skb, TCA_ATM_STATE, state); 615 if (nla_put_u32(skb, TCA_ATM_STATE, state))
616 goto nla_put_failure;
617 }
618 if (flow->excess) {
619 if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
620 goto nla_put_failure;
621 } else {
622 if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
623 goto nla_put_failure;
616 } 624 }
617 if (flow->excess)
618 NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
619 else
620 NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
621
622 nla_nest_end(skb, nest); 625 nla_nest_end(skb, nest);
623 return skb->len; 626 return skb->len;
624 627
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 24d94c097b35..6aabd77d1cfd 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1425,7 +1425,8 @@ static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1425{ 1425{
1426 unsigned char *b = skb_tail_pointer(skb); 1426 unsigned char *b = skb_tail_pointer(skb);
1427 1427
1428 NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); 1428 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1429 goto nla_put_failure;
1429 return skb->len; 1430 return skb->len;
1430 1431
1431nla_put_failure: 1432nla_put_failure:
@@ -1450,7 +1451,8 @@ static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1450 opt.minidle = (u32)(-cl->minidle); 1451 opt.minidle = (u32)(-cl->minidle);
1451 opt.offtime = cl->offtime; 1452 opt.offtime = cl->offtime;
1452 opt.change = ~0; 1453 opt.change = ~0;
1453 NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); 1454 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1455 goto nla_put_failure;
1454 return skb->len; 1456 return skb->len;
1455 1457
1456nla_put_failure: 1458nla_put_failure:
@@ -1468,7 +1470,8 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1468 opt.priority = cl->priority + 1; 1470 opt.priority = cl->priority + 1;
1469 opt.cpriority = cl->cpriority + 1; 1471 opt.cpriority = cl->cpriority + 1;
1470 opt.weight = cl->weight; 1472 opt.weight = cl->weight;
1471 NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); 1473 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1474 goto nla_put_failure;
1472 return skb->len; 1475 return skb->len;
1473 1476
1474nla_put_failure: 1477nla_put_failure:
@@ -1485,7 +1488,8 @@ static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1485 opt.priority2 = cl->priority2 + 1; 1488 opt.priority2 = cl->priority2 + 1;
1486 opt.pad = 0; 1489 opt.pad = 0;
1487 opt.penalty = cl->penalty; 1490 opt.penalty = cl->penalty;
1488 NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); 1491 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1492 goto nla_put_failure;
1489 return skb->len; 1493 return skb->len;
1490 1494
1491nla_put_failure: 1495nla_put_failure:
@@ -1502,7 +1506,8 @@ static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1502 opt.split = cl->split ? cl->split->common.classid : 0; 1506 opt.split = cl->split ? cl->split->common.classid : 0;
1503 opt.defmap = cl->defmap; 1507 opt.defmap = cl->defmap;
1504 opt.defchange = ~0; 1508 opt.defchange = ~0;
1505 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); 1509 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1510 goto nla_put_failure;
1506 } 1511 }
1507 return skb->len; 1512 return skb->len;
1508 1513
@@ -1521,7 +1526,8 @@ static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1521 opt.police = cl->police; 1526 opt.police = cl->police;
1522 opt.__res1 = 0; 1527 opt.__res1 = 0;
1523 opt.__res2 = 0; 1528 opt.__res2 = 0;
1524 NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); 1529 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1530 goto nla_put_failure;
1525 } 1531 }
1526 return skb->len; 1532 return skb->len;
1527 1533
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 7e267d7b9c75..cc37dd52ecf9 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -332,15 +332,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
332 } 332 }
333 333
334 q->stats.pdrop++; 334 q->stats.pdrop++;
335 sch->qstats.drops++; 335 return qdisc_drop(skb, sch);
336 kfree_skb(skb);
337 return NET_XMIT_DROP;
338 336
339 congestion_drop: 337congestion_drop:
340 qdisc_drop(skb, sch); 338 qdisc_drop(skb, sch);
341 return NET_XMIT_CN; 339 return NET_XMIT_CN;
342 340
343 other_drop: 341other_drop:
344 if (ret & __NET_XMIT_BYPASS) 342 if (ret & __NET_XMIT_BYPASS)
345 sch->qstats.drops++; 343 sch->qstats.drops++;
346 kfree_skb(skb); 344 kfree_skb(skb);
@@ -515,8 +513,9 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
515 if (opts == NULL) 513 if (opts == NULL)
516 goto nla_put_failure; 514 goto nla_put_failure;
517 515
518 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt); 516 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
519 NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P); 517 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
518 goto nla_put_failure;
520 return nla_nest_end(skb, opts); 519 return nla_nest_end(skb, opts);
521 520
522nla_put_failure: 521nla_put_failure:
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
new file mode 100644
index 000000000000..2f9ab17db85a
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,276 @@
1/*
2 * Codel - The Controlled-Delay Active Queue Management algorithm
3 *
4 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
5 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
6 *
7 * Implemented on linux by :
8 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
9 * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The names of the authors may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * Alternatively, provided that this notice is retained in full, this
24 * software may be distributed under the terms of the GNU General
25 * Public License ("GPL") version 2, in which case the provisions of the
26 * GPL apply INSTEAD OF those given above.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43#include <linux/module.h>
44#include <linux/slab.h>
45#include <linux/types.h>
46#include <linux/kernel.h>
47#include <linux/errno.h>
48#include <linux/skbuff.h>
49#include <linux/prefetch.h>
50#include <net/pkt_sched.h>
51#include <net/codel.h>
52
53
54#define DEFAULT_CODEL_LIMIT 1000
55
56struct codel_sched_data {
57 struct codel_params params;
58 struct codel_vars vars;
59 struct codel_stats stats;
60 u32 drop_overlimit;
61};
62
63/* This is the specific function called from codel_dequeue()
64 * to dequeue a packet from queue. Note: backlog is handled in
65 * codel, we dont need to reduce it here.
66 */
67static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
68{
69 struct sk_buff *skb = __skb_dequeue(&sch->q);
70
71 prefetch(&skb->end); /* we'll need skb_shinfo() */
72 return skb;
73}
74
75static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
76{
77 struct codel_sched_data *q = qdisc_priv(sch);
78 struct sk_buff *skb;
79
80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
81
82 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
83 * or HTB crashes. Defer it for next round.
84 */
85 if (q->stats.drop_count && sch->q.qlen) {
86 qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
87 q->stats.drop_count = 0;
88 }
89 if (skb)
90 qdisc_bstats_update(sch, skb);
91 return skb;
92}
93
94static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
95{
96 struct codel_sched_data *q;
97
98 if (likely(qdisc_qlen(sch) < sch->limit)) {
99 codel_set_enqueue_time(skb);
100 return qdisc_enqueue_tail(skb, sch);
101 }
102 q = qdisc_priv(sch);
103 q->drop_overlimit++;
104 return qdisc_drop(skb, sch);
105}
106
107static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
108 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
109 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
110 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
111 [TCA_CODEL_ECN] = { .type = NLA_U32 },
112};
113
114static int codel_change(struct Qdisc *sch, struct nlattr *opt)
115{
116 struct codel_sched_data *q = qdisc_priv(sch);
117 struct nlattr *tb[TCA_CODEL_MAX + 1];
118 unsigned int qlen;
119 int err;
120
121 if (!opt)
122 return -EINVAL;
123
124 err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
125 if (err < 0)
126 return err;
127
128 sch_tree_lock(sch);
129
130 if (tb[TCA_CODEL_TARGET]) {
131 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
132
133 q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
134 }
135
136 if (tb[TCA_CODEL_INTERVAL]) {
137 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
138
139 q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
140 }
141
142 if (tb[TCA_CODEL_LIMIT])
143 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
144
145 if (tb[TCA_CODEL_ECN])
146 q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
147
148 qlen = sch->q.qlen;
149 while (sch->q.qlen > sch->limit) {
150 struct sk_buff *skb = __skb_dequeue(&sch->q);
151
152 sch->qstats.backlog -= qdisc_pkt_len(skb);
153 qdisc_drop(skb, sch);
154 }
155 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
156
157 sch_tree_unlock(sch);
158 return 0;
159}
160
161static int codel_init(struct Qdisc *sch, struct nlattr *opt)
162{
163 struct codel_sched_data *q = qdisc_priv(sch);
164
165 sch->limit = DEFAULT_CODEL_LIMIT;
166
167 codel_params_init(&q->params);
168 codel_vars_init(&q->vars);
169 codel_stats_init(&q->stats);
170
171 if (opt) {
172 int err = codel_change(sch, opt);
173
174 if (err)
175 return err;
176 }
177
178 if (sch->limit >= 1)
179 sch->flags |= TCQ_F_CAN_BYPASS;
180 else
181 sch->flags &= ~TCQ_F_CAN_BYPASS;
182
183 return 0;
184}
185
186static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
187{
188 struct codel_sched_data *q = qdisc_priv(sch);
189 struct nlattr *opts;
190
191 opts = nla_nest_start(skb, TCA_OPTIONS);
192 if (opts == NULL)
193 goto nla_put_failure;
194
195 if (nla_put_u32(skb, TCA_CODEL_TARGET,
196 codel_time_to_us(q->params.target)) ||
197 nla_put_u32(skb, TCA_CODEL_LIMIT,
198 sch->limit) ||
199 nla_put_u32(skb, TCA_CODEL_INTERVAL,
200 codel_time_to_us(q->params.interval)) ||
201 nla_put_u32(skb, TCA_CODEL_ECN,
202 q->params.ecn))
203 goto nla_put_failure;
204
205 return nla_nest_end(skb, opts);
206
207nla_put_failure:
208 nla_nest_cancel(skb, opts);
209 return -1;
210}
211
212static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
213{
214 const struct codel_sched_data *q = qdisc_priv(sch);
215 struct tc_codel_xstats st = {
216 .maxpacket = q->stats.maxpacket,
217 .count = q->vars.count,
218 .lastcount = q->vars.lastcount,
219 .drop_overlimit = q->drop_overlimit,
220 .ldelay = codel_time_to_us(q->vars.ldelay),
221 .dropping = q->vars.dropping,
222 .ecn_mark = q->stats.ecn_mark,
223 };
224
225 if (q->vars.dropping) {
226 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
227
228 if (delta >= 0)
229 st.drop_next = codel_time_to_us(delta);
230 else
231 st.drop_next = -codel_time_to_us(-delta);
232 }
233
234 return gnet_stats_copy_app(d, &st, sizeof(st));
235}
236
237static void codel_reset(struct Qdisc *sch)
238{
239 struct codel_sched_data *q = qdisc_priv(sch);
240
241 qdisc_reset_queue(sch);
242 codel_vars_init(&q->vars);
243}
244
245static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
246 .id = "codel",
247 .priv_size = sizeof(struct codel_sched_data),
248
249 .enqueue = codel_qdisc_enqueue,
250 .dequeue = codel_qdisc_dequeue,
251 .peek = qdisc_peek_dequeued,
252 .init = codel_init,
253 .reset = codel_reset,
254 .change = codel_change,
255 .dump = codel_dump,
256 .dump_stats = codel_dump_stats,
257 .owner = THIS_MODULE,
258};
259
260static int __init codel_module_init(void)
261{
262 return register_qdisc(&codel_qdisc_ops);
263}
264
265static void __exit codel_module_exit(void)
266{
267 unregister_qdisc(&codel_qdisc_ops);
268}
269
270module_init(codel_module_init)
271module_exit(codel_module_exit)
272
273MODULE_DESCRIPTION("Controlled Delay queue discipline");
274MODULE_AUTHOR("Dave Taht");
275MODULE_AUTHOR("Eric Dumazet");
276MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 6b7fe4a84f13..9ce0b4fe23ff 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -260,7 +260,8 @@ static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
260 nest = nla_nest_start(skb, TCA_OPTIONS); 260 nest = nla_nest_start(skb, TCA_OPTIONS);
261 if (nest == NULL) 261 if (nest == NULL)
262 goto nla_put_failure; 262 goto nla_put_failure;
263 NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum); 263 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
264 goto nla_put_failure;
264 return nla_nest_end(skb, nest); 265 return nla_nest_end(skb, nest);
265 266
266nla_put_failure: 267nla_put_failure:
@@ -375,8 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
375 cl->deficit = cl->quantum; 376 cl->deficit = cl->quantum;
376 } 377 }
377 378
378 bstats_update(&cl->bstats, skb);
379
380 sch->q.qlen++; 379 sch->q.qlen++;
381 return err; 380 return err;
382} 381}
@@ -402,6 +401,8 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
402 skb = qdisc_dequeue_peeked(cl->qdisc); 401 skb = qdisc_dequeue_peeked(cl->qdisc);
403 if (cl->qdisc->q.qlen == 0) 402 if (cl->qdisc->q.qlen == 0)
404 list_del(&cl->alist); 403 list_del(&cl->alist);
404
405 bstats_update(&cl->bstats, skb);
405 qdisc_bstats_update(sch, skb); 406 qdisc_bstats_update(sch, skb);
406 sch->q.qlen--; 407 sch->q.qlen--;
407 return skb; 408 return skb;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 2c790204d042..3886365cc207 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -265,8 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
265 return NET_XMIT_SUCCESS; 265 return NET_XMIT_SUCCESS;
266 266
267drop: 267drop:
268 kfree_skb(skb); 268 qdisc_drop(skb, sch);
269 sch->qstats.drops++;
270 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 269 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271} 270}
272 271
@@ -429,8 +428,9 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
429 opts = nla_nest_start(skb, TCA_OPTIONS); 428 opts = nla_nest_start(skb, TCA_OPTIONS);
430 if (opts == NULL) 429 if (opts == NULL)
431 goto nla_put_failure; 430 goto nla_put_failure;
432 NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]); 431 if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) ||
433 NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]); 432 nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]))
433 goto nla_put_failure;
434 434
435 return nla_nest_end(skb, opts); 435 return nla_nest_end(skb, opts);
436 436
@@ -447,13 +447,16 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
447 opts = nla_nest_start(skb, TCA_OPTIONS); 447 opts = nla_nest_start(skb, TCA_OPTIONS);
448 if (opts == NULL) 448 if (opts == NULL)
449 goto nla_put_failure; 449 goto nla_put_failure;
450 NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices); 450 if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
451 goto nla_put_failure;
451 452
452 if (p->default_index != NO_DEFAULT_INDEX) 453 if (p->default_index != NO_DEFAULT_INDEX &&
453 NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index); 454 nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
455 goto nla_put_failure;
454 456
455 if (p->set_tc_index) 457 if (p->set_tc_index &&
456 NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX); 458 nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
459 goto nla_put_failure;
457 460
458 return nla_nest_end(skb, opts); 461 return nla_nest_end(skb, opts);
459 462
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 66effe2da8e0..e15a9eb29087 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -85,7 +85,8 @@ static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
85{ 85{
86 struct tc_fifo_qopt opt = { .limit = sch->limit }; 86 struct tc_fifo_qopt opt = { .limit = sch->limit };
87 87
88 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 88 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
89 goto nla_put_failure;
89 return skb->len; 90 return skb->len;
90 91
91nla_put_failure: 92nla_put_failure:
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
new file mode 100644
index 000000000000..9fc1c62ec80e
--- /dev/null
+++ b/net/sched/sch_fq_codel.c
@@ -0,0 +1,626 @@
1/*
2 * Fair Queue CoDel discipline
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/jhash.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <net/netlink.h>
25#include <net/pkt_sched.h>
26#include <net/flow_keys.h>
27#include <net/codel.h>
28
29/* Fair Queue CoDel.
30 *
31 * Principles :
32 * Packets are classified (internal classifier or external) on flows.
33 * This is a Stochastic model (as we use a hash, several flows
34 * might be hashed on same slot)
35 * Each flow has a CoDel managed queue.
36 * Flows are linked onto two (Round Robin) lists,
37 * so that new flows have priority on old ones.
38 *
39 * For a given flow, packets are not reordered (CoDel uses a FIFO)
40 * head drops only.
41 * ECN capability is on by default.
42 * Low memory footprint (64 bytes per flow)
43 */
44
45struct fq_codel_flow {
46 struct sk_buff *head;
47 struct sk_buff *tail;
48 struct list_head flowchain;
49 int deficit;
50 u32 dropped; /* number of drops (or ECN marks) on this flow */
51 struct codel_vars cvars;
52}; /* please try to keep this structure <= 64 bytes */
53
54struct fq_codel_sched_data {
55 struct tcf_proto *filter_list; /* optional external classifier */
56 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
57 u32 *backlogs; /* backlog table [flows_cnt] */
58 u32 flows_cnt; /* number of flows */
59 u32 perturbation; /* hash perturbation */
60 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
61 struct codel_params cparams;
62 struct codel_stats cstats;
63 u32 drop_overlimit;
64 u32 new_flow_count;
65
66 struct list_head new_flows; /* list of new flows */
67 struct list_head old_flows; /* list of old flows */
68};
69
70static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 const struct sk_buff *skb)
72{
73 struct flow_keys keys;
74 unsigned int hash;
75
76 skb_flow_dissect(skb, &keys);
77 hash = jhash_3words((__force u32)keys.dst,
78 (__force u32)keys.src ^ keys.ip_proto,
79 (__force u32)keys.ports, q->perturbation);
80 return ((u64)hash * q->flows_cnt) >> 32;
81}
82
83static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
84 int *qerr)
85{
86 struct fq_codel_sched_data *q = qdisc_priv(sch);
87 struct tcf_result res;
88 int result;
89
90 if (TC_H_MAJ(skb->priority) == sch->handle &&
91 TC_H_MIN(skb->priority) > 0 &&
92 TC_H_MIN(skb->priority) <= q->flows_cnt)
93 return TC_H_MIN(skb->priority);
94
95 if (!q->filter_list)
96 return fq_codel_hash(q, skb) + 1;
97
98 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
99 result = tc_classify(skb, q->filter_list, &res);
100 if (result >= 0) {
101#ifdef CONFIG_NET_CLS_ACT
102 switch (result) {
103 case TC_ACT_STOLEN:
104 case TC_ACT_QUEUED:
105 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
106 case TC_ACT_SHOT:
107 return 0;
108 }
109#endif
110 if (TC_H_MIN(res.classid) <= q->flows_cnt)
111 return TC_H_MIN(res.classid);
112 }
113 return 0;
114}
115
116/* helper functions : might be changed when/if skb use a standard list_head */
117
118/* remove one skb from head of slot queue */
119static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
120{
121 struct sk_buff *skb = flow->head;
122
123 flow->head = skb->next;
124 skb->next = NULL;
125 return skb;
126}
127
128/* add skb to flow queue (tail add) */
129static inline void flow_queue_add(struct fq_codel_flow *flow,
130 struct sk_buff *skb)
131{
132 if (flow->head == NULL)
133 flow->head = skb;
134 else
135 flow->tail->next = skb;
136 flow->tail = skb;
137 skb->next = NULL;
138}
139
140static unsigned int fq_codel_drop(struct Qdisc *sch)
141{
142 struct fq_codel_sched_data *q = qdisc_priv(sch);
143 struct sk_buff *skb;
144 unsigned int maxbacklog = 0, idx = 0, i, len;
145 struct fq_codel_flow *flow;
146
147 /* Queue is full! Find the fat flow and drop packet from it.
148 * This might sound expensive, but with 1024 flows, we scan
149 * 4KB of memory, and we dont need to handle a complex tree
150 * in fast path (packet queue/enqueue) with many cache misses.
151 */
152 for (i = 0; i < q->flows_cnt; i++) {
153 if (q->backlogs[i] > maxbacklog) {
154 maxbacklog = q->backlogs[i];
155 idx = i;
156 }
157 }
158 flow = &q->flows[idx];
159 skb = dequeue_head(flow);
160 len = qdisc_pkt_len(skb);
161 q->backlogs[idx] -= len;
162 kfree_skb(skb);
163 sch->q.qlen--;
164 sch->qstats.drops++;
165 sch->qstats.backlog -= len;
166 flow->dropped++;
167 return idx;
168}
169
170static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
171{
172 struct fq_codel_sched_data *q = qdisc_priv(sch);
173 unsigned int idx;
174 struct fq_codel_flow *flow;
175 int uninitialized_var(ret);
176
177 idx = fq_codel_classify(skb, sch, &ret);
178 if (idx == 0) {
179 if (ret & __NET_XMIT_BYPASS)
180 sch->qstats.drops++;
181 kfree_skb(skb);
182 return ret;
183 }
184 idx--;
185
186 codel_set_enqueue_time(skb);
187 flow = &q->flows[idx];
188 flow_queue_add(flow, skb);
189 q->backlogs[idx] += qdisc_pkt_len(skb);
190 sch->qstats.backlog += qdisc_pkt_len(skb);
191
192 if (list_empty(&flow->flowchain)) {
193 list_add_tail(&flow->flowchain, &q->new_flows);
194 codel_vars_init(&flow->cvars);
195 q->new_flow_count++;
196 flow->deficit = q->quantum;
197 flow->dropped = 0;
198 }
199 if (++sch->q.qlen < sch->limit)
200 return NET_XMIT_SUCCESS;
201
202 q->drop_overlimit++;
203 /* Return Congestion Notification only if we dropped a packet
204 * from this flow.
205 */
206 if (fq_codel_drop(sch) == idx)
207 return NET_XMIT_CN;
208
209 /* As we dropped a packet, better let upper stack know this */
210 qdisc_tree_decrease_qlen(sch, 1);
211 return NET_XMIT_SUCCESS;
212}
213
214/* This is the specific function called from codel_dequeue()
215 * to dequeue a packet from queue. Note: backlog is handled in
216 * codel, we dont need to reduce it here.
217 */
218static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
219{
220 struct fq_codel_sched_data *q = qdisc_priv(sch);
221 struct fq_codel_flow *flow;
222 struct sk_buff *skb = NULL;
223
224 flow = container_of(vars, struct fq_codel_flow, cvars);
225 if (flow->head) {
226 skb = dequeue_head(flow);
227 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
228 sch->q.qlen--;
229 }
230 return skb;
231}
232
233static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
234{
235 struct fq_codel_sched_data *q = qdisc_priv(sch);
236 struct sk_buff *skb;
237 struct fq_codel_flow *flow;
238 struct list_head *head;
239 u32 prev_drop_count, prev_ecn_mark;
240
241begin:
242 head = &q->new_flows;
243 if (list_empty(head)) {
244 head = &q->old_flows;
245 if (list_empty(head))
246 return NULL;
247 }
248 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
249
250 if (flow->deficit <= 0) {
251 flow->deficit += q->quantum;
252 list_move_tail(&flow->flowchain, &q->old_flows);
253 goto begin;
254 }
255
256 prev_drop_count = q->cstats.drop_count;
257 prev_ecn_mark = q->cstats.ecn_mark;
258
259 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
260 dequeue);
261
262 flow->dropped += q->cstats.drop_count - prev_drop_count;
263 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
264
265 if (!skb) {
266 /* force a pass through old_flows to prevent starvation */
267 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
268 list_move_tail(&flow->flowchain, &q->old_flows);
269 else
270 list_del_init(&flow->flowchain);
271 goto begin;
272 }
273 qdisc_bstats_update(sch, skb);
274 flow->deficit -= qdisc_pkt_len(skb);
275 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
276 * or HTB crashes. Defer it for next round.
277 */
278 if (q->cstats.drop_count && sch->q.qlen) {
279 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
280 q->cstats.drop_count = 0;
281 }
282 return skb;
283}
284
285static void fq_codel_reset(struct Qdisc *sch)
286{
287 struct sk_buff *skb;
288
289 while ((skb = fq_codel_dequeue(sch)) != NULL)
290 kfree_skb(skb);
291}
292
293static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
294 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
295 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
296 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
297 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
298 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
299 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
300};
301
302static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
303{
304 struct fq_codel_sched_data *q = qdisc_priv(sch);
305 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
306 int err;
307
308 if (!opt)
309 return -EINVAL;
310
311 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
312 if (err < 0)
313 return err;
314 if (tb[TCA_FQ_CODEL_FLOWS]) {
315 if (q->flows)
316 return -EINVAL;
317 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
318 if (!q->flows_cnt ||
319 q->flows_cnt > 65536)
320 return -EINVAL;
321 }
322 sch_tree_lock(sch);
323
324 if (tb[TCA_FQ_CODEL_TARGET]) {
325 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
326
327 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
328 }
329
330 if (tb[TCA_FQ_CODEL_INTERVAL]) {
331 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
332
333 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
334 }
335
336 if (tb[TCA_FQ_CODEL_LIMIT])
337 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
338
339 if (tb[TCA_FQ_CODEL_ECN])
340 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
341
342 if (tb[TCA_FQ_CODEL_QUANTUM])
343 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
344
345 while (sch->q.qlen > sch->limit) {
346 struct sk_buff *skb = fq_codel_dequeue(sch);
347
348 kfree_skb(skb);
349 q->cstats.drop_count++;
350 }
351 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
352 q->cstats.drop_count = 0;
353
354 sch_tree_unlock(sch);
355 return 0;
356}
357
358static void *fq_codel_zalloc(size_t sz)
359{
360 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
361
362 if (!ptr)
363 ptr = vzalloc(sz);
364 return ptr;
365}
366
367static void fq_codel_free(void *addr)
368{
369 if (addr) {
370 if (is_vmalloc_addr(addr))
371 vfree(addr);
372 else
373 kfree(addr);
374 }
375}
376
377static void fq_codel_destroy(struct Qdisc *sch)
378{
379 struct fq_codel_sched_data *q = qdisc_priv(sch);
380
381 tcf_destroy_chain(&q->filter_list);
382 fq_codel_free(q->backlogs);
383 fq_codel_free(q->flows);
384}
385
386static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
387{
388 struct fq_codel_sched_data *q = qdisc_priv(sch);
389 int i;
390
391 sch->limit = 10*1024;
392 q->flows_cnt = 1024;
393 q->quantum = psched_mtu(qdisc_dev(sch));
394 q->perturbation = net_random();
395 INIT_LIST_HEAD(&q->new_flows);
396 INIT_LIST_HEAD(&q->old_flows);
397 codel_params_init(&q->cparams);
398 codel_stats_init(&q->cstats);
399 q->cparams.ecn = true;
400
401 if (opt) {
402 int err = fq_codel_change(sch, opt);
403 if (err)
404 return err;
405 }
406
407 if (!q->flows) {
408 q->flows = fq_codel_zalloc(q->flows_cnt *
409 sizeof(struct fq_codel_flow));
410 if (!q->flows)
411 return -ENOMEM;
412 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
413 if (!q->backlogs) {
414 fq_codel_free(q->flows);
415 return -ENOMEM;
416 }
417 for (i = 0; i < q->flows_cnt; i++) {
418 struct fq_codel_flow *flow = q->flows + i;
419
420 INIT_LIST_HEAD(&flow->flowchain);
421 }
422 }
423 if (sch->limit >= 1)
424 sch->flags |= TCQ_F_CAN_BYPASS;
425 else
426 sch->flags &= ~TCQ_F_CAN_BYPASS;
427 return 0;
428}
429
430static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
431{
432 struct fq_codel_sched_data *q = qdisc_priv(sch);
433 struct nlattr *opts;
434
435 opts = nla_nest_start(skb, TCA_OPTIONS);
436 if (opts == NULL)
437 goto nla_put_failure;
438
439 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
440 codel_time_to_us(q->cparams.target)) ||
441 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
442 sch->limit) ||
443 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
444 codel_time_to_us(q->cparams.interval)) ||
445 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
446 q->cparams.ecn) ||
447 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
448 q->quantum) ||
449 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
450 q->flows_cnt))
451 goto nla_put_failure;
452
453 nla_nest_end(skb, opts);
454 return skb->len;
455
456nla_put_failure:
457 return -1;
458}
459
460static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
461{
462 struct fq_codel_sched_data *q = qdisc_priv(sch);
463 struct tc_fq_codel_xstats st = {
464 .type = TCA_FQ_CODEL_XSTATS_QDISC,
465 };
466 struct list_head *pos;
467
468 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
469 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
470 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
471 st.qdisc_stats.new_flow_count = q->new_flow_count;
472
473 list_for_each(pos, &q->new_flows)
474 st.qdisc_stats.new_flows_len++;
475
476 list_for_each(pos, &q->old_flows)
477 st.qdisc_stats.old_flows_len++;
478
479 return gnet_stats_copy_app(d, &st, sizeof(st));
480}
481
482static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
483{
484 return NULL;
485}
486
487static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
488{
489 return 0;
490}
491
492static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
493 u32 classid)
494{
495 /* we cannot bypass queue discipline anymore */
496 sch->flags &= ~TCQ_F_CAN_BYPASS;
497 return 0;
498}
499
500static void fq_codel_put(struct Qdisc *q, unsigned long cl)
501{
502}
503
504static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
505{
506 struct fq_codel_sched_data *q = qdisc_priv(sch);
507
508 if (cl)
509 return NULL;
510 return &q->filter_list;
511}
512
513static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
514 struct sk_buff *skb, struct tcmsg *tcm)
515{
516 tcm->tcm_handle |= TC_H_MIN(cl);
517 return 0;
518}
519
520static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
521 struct gnet_dump *d)
522{
523 struct fq_codel_sched_data *q = qdisc_priv(sch);
524 u32 idx = cl - 1;
525 struct gnet_stats_queue qs = { 0 };
526 struct tc_fq_codel_xstats xstats;
527
528 if (idx < q->flows_cnt) {
529 const struct fq_codel_flow *flow = &q->flows[idx];
530 const struct sk_buff *skb = flow->head;
531
532 memset(&xstats, 0, sizeof(xstats));
533 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
534 xstats.class_stats.deficit = flow->deficit;
535 xstats.class_stats.ldelay =
536 codel_time_to_us(flow->cvars.ldelay);
537 xstats.class_stats.count = flow->cvars.count;
538 xstats.class_stats.lastcount = flow->cvars.lastcount;
539 xstats.class_stats.dropping = flow->cvars.dropping;
540 if (flow->cvars.dropping) {
541 codel_tdiff_t delta = flow->cvars.drop_next -
542 codel_get_time();
543
544 xstats.class_stats.drop_next = (delta >= 0) ?
545 codel_time_to_us(delta) :
546 -codel_time_to_us(-delta);
547 }
548 while (skb) {
549 qs.qlen++;
550 skb = skb->next;
551 }
552 qs.backlog = q->backlogs[idx];
553 qs.drops = flow->dropped;
554 }
555 if (gnet_stats_copy_queue(d, &qs) < 0)
556 return -1;
557 if (idx < q->flows_cnt)
558 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
559 return 0;
560}
561
562static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
563{
564 struct fq_codel_sched_data *q = qdisc_priv(sch);
565 unsigned int i;
566
567 if (arg->stop)
568 return;
569
570 for (i = 0; i < q->flows_cnt; i++) {
571 if (list_empty(&q->flows[i].flowchain) ||
572 arg->count < arg->skip) {
573 arg->count++;
574 continue;
575 }
576 if (arg->fn(sch, i + 1, arg) < 0) {
577 arg->stop = 1;
578 break;
579 }
580 arg->count++;
581 }
582}
583
584static const struct Qdisc_class_ops fq_codel_class_ops = {
585 .leaf = fq_codel_leaf,
586 .get = fq_codel_get,
587 .put = fq_codel_put,
588 .tcf_chain = fq_codel_find_tcf,
589 .bind_tcf = fq_codel_bind,
590 .unbind_tcf = fq_codel_put,
591 .dump = fq_codel_dump_class,
592 .dump_stats = fq_codel_dump_class_stats,
593 .walk = fq_codel_walk,
594};
595
596static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
597 .cl_ops = &fq_codel_class_ops,
598 .id = "fq_codel",
599 .priv_size = sizeof(struct fq_codel_sched_data),
600 .enqueue = fq_codel_enqueue,
601 .dequeue = fq_codel_dequeue,
602 .peek = qdisc_peek_dequeued,
603 .drop = fq_codel_drop,
604 .init = fq_codel_init,
605 .reset = fq_codel_reset,
606 .destroy = fq_codel_destroy,
607 .change = fq_codel_change,
608 .dump = fq_codel_dump,
609 .dump_stats = fq_codel_dump_stats,
610 .owner = THIS_MODULE,
611};
612
613static int __init fq_codel_module_init(void)
614{
615 return register_qdisc(&fq_codel_qdisc_ops);
616}
617
618static void __exit fq_codel_module_exit(void)
619{
620 unregister_qdisc(&fq_codel_qdisc_ops);
621}
622
623module_init(fq_codel_module_init)
624module_exit(fq_codel_module_exit)
625MODULE_AUTHOR("Eric Dumazet");
626MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 67fc573e013a..511323e89cec 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -86,9 +86,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
86 * deadloop is detected. Return OK to try the next skb. 86 * deadloop is detected. Return OK to try the next skb.
87 */ 87 */
88 kfree_skb(skb); 88 kfree_skb(skb);
89 if (net_ratelimit()) 89 net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
90 pr_warning("Dead loop on netdevice %s, fix it urgently!\n", 90 dev_queue->dev->name);
91 dev_queue->dev->name);
92 ret = qdisc_qlen(q); 91 ret = qdisc_qlen(q);
93 } else { 92 } else {
94 /* 93 /*
@@ -136,9 +135,9 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
136 ret = handle_dev_cpu_collision(skb, txq, q); 135 ret = handle_dev_cpu_collision(skb, txq, q);
137 } else { 136 } else {
138 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 137 /* Driver returned NETDEV_TX_BUSY - requeue skb */
139 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 138 if (unlikely(ret != NETDEV_TX_BUSY))
140 pr_warning("BUG %s code %d qlen %d\n", 139 net_warn_ratelimited("BUG %s code %d qlen %d\n",
141 dev->name, ret, q->q.qlen); 140 dev->name, ret, q->q.qlen);
142 141
143 ret = dev_requeue_skb(skb, q); 142 ret = dev_requeue_skb(skb, q);
144 } 143 }
@@ -512,7 +511,8 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
512 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 511 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
513 512
514 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 513 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
515 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 514 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
515 goto nla_put_failure;
516 return skb->len; 516 return skb->len;
517 517
518nla_put_failure: 518nla_put_failure:
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0b15236be7b6..e901583e4ea5 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -255,10 +255,8 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
255 u16 dp = tc_index_to_dp(skb); 255 u16 dp = tc_index_to_dp(skb);
256 256
257 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { 257 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
258 if (net_ratelimit()) 258 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
259 pr_warning("GRED: Unable to relocate VQ 0x%x " 259 tc_index_to_dp(skb));
260 "after dequeue, screwing up "
261 "backlog.\n", tc_index_to_dp(skb));
262 } else { 260 } else {
263 q->backlog -= qdisc_pkt_len(skb); 261 q->backlog -= qdisc_pkt_len(skb);
264 262
@@ -287,10 +285,8 @@ static unsigned int gred_drop(struct Qdisc *sch)
287 u16 dp = tc_index_to_dp(skb); 285 u16 dp = tc_index_to_dp(skb);
288 286
289 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { 287 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
290 if (net_ratelimit()) 288 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
291 pr_warning("GRED: Unable to relocate VQ 0x%x " 289 tc_index_to_dp(skb));
292 "while dropping, screwing up "
293 "backlog.\n", tc_index_to_dp(skb));
294 } else { 290 } else {
295 q->backlog -= len; 291 q->backlog -= len;
296 q->stats.other++; 292 q->stats.other++;
@@ -521,14 +517,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
521 opts = nla_nest_start(skb, TCA_OPTIONS); 517 opts = nla_nest_start(skb, TCA_OPTIONS);
522 if (opts == NULL) 518 if (opts == NULL)
523 goto nla_put_failure; 519 goto nla_put_failure;
524 NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); 520 if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
521 goto nla_put_failure;
525 522
526 for (i = 0; i < MAX_DPs; i++) { 523 for (i = 0; i < MAX_DPs; i++) {
527 struct gred_sched_data *q = table->tab[i]; 524 struct gred_sched_data *q = table->tab[i];
528 525
529 max_p[i] = q ? q->parms.max_P : 0; 526 max_p[i] = q ? q->parms.max_P : 0;
530 } 527 }
531 NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p); 528 if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
529 goto nla_put_failure;
532 530
533 parms = nla_nest_start(skb, TCA_GRED_PARMS); 531 parms = nla_nest_start(skb, TCA_GRED_PARMS);
534 if (parms == NULL) 532 if (parms == NULL)
@@ -565,11 +563,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
565 opt.packets = q->packetsin; 563 opt.packets = q->packetsin;
566 opt.bytesin = q->bytesin; 564 opt.bytesin = q->bytesin;
567 565
568 if (gred_wred_mode(table)) { 566 if (gred_wred_mode(table))
569 q->vars.qidlestart = 567 gred_load_wred_set(table, q);
570 table->tab[table->def]->vars.qidlestart;
571 q->vars.qavg = table->tab[table->def]->vars.qavg;
572 }
573 568
574 opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); 569 opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
575 570
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 9bdca2e011e9..6c2ec4510540 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1305,7 +1305,8 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1305 tsc.m1 = sm2m(sc->sm1); 1305 tsc.m1 = sm2m(sc->sm1);
1306 tsc.d = dx2d(sc->dx); 1306 tsc.d = dx2d(sc->dx);
1307 tsc.m2 = sm2m(sc->sm2); 1307 tsc.m2 = sm2m(sc->sm2);
1308 NLA_PUT(skb, attr, sizeof(tsc), &tsc); 1308 if (nla_put(skb, attr, sizeof(tsc), &tsc))
1309 goto nla_put_failure;
1309 1310
1310 return skb->len; 1311 return skb->len;
1311 1312
@@ -1573,7 +1574,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1573 } 1574 }
1574 1575
1575 qopt.defcls = q->defcls; 1576 qopt.defcls = q->defcls;
1576 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 1577 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1578 goto nla_put_failure;
1577 return skb->len; 1579 return skb->len;
1578 1580
1579 nla_put_failure: 1581 nla_put_failure:
@@ -1607,7 +1609,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1607 if (cl->qdisc->q.qlen == 1) 1609 if (cl->qdisc->q.qlen == 1)
1608 set_active(cl, qdisc_pkt_len(skb)); 1610 set_active(cl, qdisc_pkt_len(skb));
1609 1611
1610 bstats_update(&cl->bstats, skb);
1611 sch->q.qlen++; 1612 sch->q.qlen++;
1612 1613
1613 return NET_XMIT_SUCCESS; 1614 return NET_XMIT_SUCCESS;
@@ -1655,6 +1656,7 @@ hfsc_dequeue(struct Qdisc *sch)
1655 return NULL; 1656 return NULL;
1656 } 1657 }
1657 1658
1659 bstats_update(&cl->bstats, skb);
1658 update_vf(cl, qdisc_pkt_len(skb), cur_time); 1660 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1659 if (realtime) 1661 if (realtime)
1660 cl->cl_cumul += qdisc_pkt_len(skb); 1662 cl->cl_cumul += qdisc_pkt_len(skb);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 29b942ce9e82..9d75b7761313 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -558,9 +558,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
558 __skb_queue_tail(&q->direct_queue, skb); 558 __skb_queue_tail(&q->direct_queue, skb);
559 q->direct_pkts++; 559 q->direct_pkts++;
560 } else { 560 } else {
561 kfree_skb(skb); 561 return qdisc_drop(skb, sch);
562 sch->qstats.drops++;
563 return NET_XMIT_DROP;
564 } 562 }
565#ifdef CONFIG_NET_CLS_ACT 563#ifdef CONFIG_NET_CLS_ACT
566 } else if (!cl) { 564 } else if (!cl) {
@@ -576,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
576 } 574 }
577 return ret; 575 return ret;
578 } else { 576 } else {
579 bstats_update(&cl->bstats, skb);
580 htb_activate(q, cl); 577 htb_activate(q, cl);
581 } 578 }
582 579
@@ -837,6 +834,7 @@ next:
837 } while (cl != start); 834 } while (cl != start);
838 835
839 if (likely(skb != NULL)) { 836 if (likely(skb != NULL)) {
837 bstats_update(&cl->bstats, skb);
840 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); 838 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
841 if (cl->un.leaf.deficit[level] < 0) { 839 if (cl->un.leaf.deficit[level] < 0) {
842 cl->un.leaf.deficit[level] += cl->quantum; 840 cl->un.leaf.deficit[level] += cl->quantum;
@@ -1051,7 +1049,8 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1051 nest = nla_nest_start(skb, TCA_OPTIONS); 1049 nest = nla_nest_start(skb, TCA_OPTIONS);
1052 if (nest == NULL) 1050 if (nest == NULL)
1053 goto nla_put_failure; 1051 goto nla_put_failure;
1054 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1052 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
1053 goto nla_put_failure;
1055 nla_nest_end(skb, nest); 1054 nla_nest_end(skb, nest);
1056 1055
1057 spin_unlock_bh(root_lock); 1056 spin_unlock_bh(root_lock);
@@ -1090,7 +1089,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1090 opt.quantum = cl->quantum; 1089 opt.quantum = cl->quantum;
1091 opt.prio = cl->prio; 1090 opt.prio = cl->prio;
1092 opt.level = cl->level; 1091 opt.level = cl->level;
1093 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1092 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1093 goto nla_put_failure;
1094 1094
1095 nla_nest_end(skb, nest); 1095 nla_nest_end(skb, nest);
1096 spin_unlock_bh(root_lock); 1096 spin_unlock_bh(root_lock);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 28de43092330..d1831ca966d4 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -247,7 +247,8 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
247 opt.offset[i] = dev->tc_to_txq[i].offset; 247 opt.offset[i] = dev->tc_to_txq[i].offset;
248 } 248 }
249 249
250 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 250 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
251 goto nla_put_failure;
251 252
252 return skb->len; 253 return skb->len;
253nla_put_failure: 254nla_put_failure:
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 49131d7a7446..2a2b096d9a66 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -284,7 +284,8 @@ static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
284 opt.bands = q->bands; 284 opt.bands = q->bands;
285 opt.max_bands = q->max_bands; 285 opt.max_bands = q->max_bands;
286 286
287 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 287 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
288 goto nla_put_failure;
288 289
289 return skb->len; 290 return skb->len;
290 291
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548fa7ae9..a2a95aabf9c2 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -26,6 +26,7 @@
26 26
27#include <net/netlink.h> 27#include <net/netlink.h>
28#include <net/pkt_sched.h> 28#include <net/pkt_sched.h>
29#include <net/inet_ecn.h>
29 30
30#define VERSION "1.3" 31#define VERSION "1.3"
31 32
@@ -78,6 +79,7 @@ struct netem_sched_data {
78 psched_tdiff_t jitter; 79 psched_tdiff_t jitter;
79 80
80 u32 loss; 81 u32 loss;
82 u32 ecn;
81 u32 limit; 83 u32 limit;
82 u32 counter; 84 u32 counter;
83 u32 gap; 85 u32 gap;
@@ -374,9 +376,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
374 ++count; 376 ++count;
375 377
376 /* Drop packet? */ 378 /* Drop packet? */
377 if (loss_event(q)) 379 if (loss_event(q)) {
378 --count; 380 if (q->ecn && INET_ECN_set_ce(skb))
379 381 sch->qstats.drops++; /* mark packet */
382 else
383 --count;
384 }
380 if (count == 0) { 385 if (count == 0) {
381 sch->qstats.drops++; 386 sch->qstats.drops++;
382 kfree_skb(skb); 387 kfree_skb(skb);
@@ -408,10 +413,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 413 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 414 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
410 (skb->ip_summed == CHECKSUM_PARTIAL && 415 (skb->ip_summed == CHECKSUM_PARTIAL &&
411 skb_checksum_help(skb))) { 416 skb_checksum_help(skb)))
412 sch->qstats.drops++; 417 return qdisc_drop(skb, sch);
413 return NET_XMIT_DROP;
414 }
415 418
416 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 419 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
417 } 420 }
@@ -706,6 +709,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
706 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 709 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
707 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, 710 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
708 [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, 711 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
712 [TCA_NETEM_ECN] = { .type = NLA_U32 },
709}; 713};
710 714
711static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 715static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -776,6 +780,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
776 if (tb[TCA_NETEM_RATE]) 780 if (tb[TCA_NETEM_RATE])
777 get_rate(sch, tb[TCA_NETEM_RATE]); 781 get_rate(sch, tb[TCA_NETEM_RATE]);
778 782
783 if (tb[TCA_NETEM_ECN])
784 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
785
779 q->loss_model = CLG_RANDOM; 786 q->loss_model = CLG_RANDOM;
780 if (tb[TCA_NETEM_LOSS]) 787 if (tb[TCA_NETEM_LOSS])
781 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); 788 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
@@ -834,7 +841,8 @@ static int dump_loss_model(const struct netem_sched_data *q,
834 .p23 = q->clg.a5, 841 .p23 = q->clg.a5,
835 }; 842 };
836 843
837 NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi); 844 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
845 goto nla_put_failure;
838 break; 846 break;
839 } 847 }
840 case CLG_GILB_ELL: { 848 case CLG_GILB_ELL: {
@@ -845,7 +853,8 @@ static int dump_loss_model(const struct netem_sched_data *q,
845 .k1 = q->clg.a4, 853 .k1 = q->clg.a4,
846 }; 854 };
847 855
848 NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge); 856 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
857 goto nla_put_failure;
849 break; 858 break;
850 } 859 }
851 } 860 }
@@ -874,26 +883,34 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
874 qopt.loss = q->loss; 883 qopt.loss = q->loss;
875 qopt.gap = q->gap; 884 qopt.gap = q->gap;
876 qopt.duplicate = q->duplicate; 885 qopt.duplicate = q->duplicate;
877 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 886 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
887 goto nla_put_failure;
878 888
879 cor.delay_corr = q->delay_cor.rho; 889 cor.delay_corr = q->delay_cor.rho;
880 cor.loss_corr = q->loss_cor.rho; 890 cor.loss_corr = q->loss_cor.rho;
881 cor.dup_corr = q->dup_cor.rho; 891 cor.dup_corr = q->dup_cor.rho;
882 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); 892 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
893 goto nla_put_failure;
883 894
884 reorder.probability = q->reorder; 895 reorder.probability = q->reorder;
885 reorder.correlation = q->reorder_cor.rho; 896 reorder.correlation = q->reorder_cor.rho;
886 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); 897 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
898 goto nla_put_failure;
887 899
888 corrupt.probability = q->corrupt; 900 corrupt.probability = q->corrupt;
889 corrupt.correlation = q->corrupt_cor.rho; 901 corrupt.correlation = q->corrupt_cor.rho;
890 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 902 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
903 goto nla_put_failure;
891 904
892 rate.rate = q->rate; 905 rate.rate = q->rate;
893 rate.packet_overhead = q->packet_overhead; 906 rate.packet_overhead = q->packet_overhead;
894 rate.cell_size = q->cell_size; 907 rate.cell_size = q->cell_size;
895 rate.cell_overhead = q->cell_overhead; 908 rate.cell_overhead = q->cell_overhead;
896 NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); 909 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
910 goto nla_put_failure;
911
912 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
913 goto nla_put_failure;
897 914
898 if (dump_loss_model(q, skb) != 0) 915 if (dump_loss_model(q, skb) != 0)
899 goto nla_put_failure; 916 goto nla_put_failure;
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
new file mode 100644
index 000000000000..89f8fcf73f18
--- /dev/null
+++ b/net/sched/sch_plug.c
@@ -0,0 +1,233 @@
1/*
2 * sch_plug.c Queue traffic until an explicit release command
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * There are two ways to use this qdisc:
10 * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating
11 * sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands.
12 *
13 * 2. For network output buffering (a.k.a output commit) functionality.
14 * Output commit property is commonly used by applications using checkpoint
15 * based fault-tolerance to ensure that the checkpoint from which a system
16 * is being restored is consistent w.r.t outside world.
17 *
18 * Consider for e.g. Remus - a Virtual Machine checkpointing system,
19 * wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated
20 * asynchronously to the backup host, while the VM continues executing the
21 * next epoch speculatively.
22 *
23 * The following is a typical sequence of output buffer operations:
24 * 1.At epoch i, start_buffer(i)
25 * 2. At end of epoch i (i.e. after 50ms):
26 * 2.1 Stop VM and take checkpoint(i).
27 * 2.2 start_buffer(i+1) and Resume VM
28 * 3. While speculatively executing epoch(i+1), asynchronously replicate
29 * checkpoint(i) to backup host.
30 * 4. When checkpoint_ack(i) is received from backup, release_buffer(i)
31 * Thus, this Qdisc would receive the following sequence of commands:
32 * TCQ_PLUG_BUFFER (epoch i)
33 * .. TCQ_PLUG_BUFFER (epoch i+1)
34 * ....TCQ_PLUG_RELEASE_ONE (epoch i)
35 * ......TCQ_PLUG_BUFFER (epoch i+2)
36 * ........
37 */
38
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/kernel.h>
42#include <linux/errno.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <net/pkt_sched.h>
46
47/*
48 * State of the queue, when used for network output buffering:
49 *
50 * plug(i+1) plug(i) head
51 * ------------------+--------------------+---------------->
52 * | |
53 * | |
54 * pkts_current_epoch| pkts_last_epoch |pkts_to_release
55 * ----------------->|<--------+--------->|+--------------->
56 * v v
57 *
58 */
59
60struct plug_sched_data {
61 /* If true, the dequeue function releases all packets
62 * from head to end of the queue. The queue turns into
63 * a pass-through queue for newly arriving packets.
64 */
65 bool unplug_indefinite;
66
67 /* Queue Limit in bytes */
68 u32 limit;
69
70 /* Number of packets (output) from the current speculatively
71 * executing epoch.
72 */
73 u32 pkts_current_epoch;
74
75 /* Number of packets corresponding to the recently finished
76 * epoch. These will be released when we receive a
77 * TCQ_PLUG_RELEASE_ONE command. This command is typically
78 * issued after committing a checkpoint at the target.
79 */
80 u32 pkts_last_epoch;
81
82 /*
83 * Number of packets from the head of the queue, that can
84 * be released (committed checkpoint).
85 */
86 u32 pkts_to_release;
87};
88
89static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
90{
91 struct plug_sched_data *q = qdisc_priv(sch);
92
93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
94 if (!q->unplug_indefinite)
95 q->pkts_current_epoch++;
96 return qdisc_enqueue_tail(skb, sch);
97 }
98
99 return qdisc_reshape_fail(skb, sch);
100}
101
102static struct sk_buff *plug_dequeue(struct Qdisc *sch)
103{
104 struct plug_sched_data *q = qdisc_priv(sch);
105
106 if (qdisc_is_throttled(sch))
107 return NULL;
108
109 if (!q->unplug_indefinite) {
110 if (!q->pkts_to_release) {
111 /* No more packets to dequeue. Block the queue
112 * and wait for the next release command.
113 */
114 qdisc_throttled(sch);
115 return NULL;
116 }
117 q->pkts_to_release--;
118 }
119
120 return qdisc_dequeue_head(sch);
121}
122
123static int plug_init(struct Qdisc *sch, struct nlattr *opt)
124{
125 struct plug_sched_data *q = qdisc_priv(sch);
126
127 q->pkts_current_epoch = 0;
128 q->pkts_last_epoch = 0;
129 q->pkts_to_release = 0;
130 q->unplug_indefinite = false;
131
132 if (opt == NULL) {
133 /* We will set a default limit of 100 pkts (~150kB)
134 * in case tx_queue_len is not available. The
135 * default value is completely arbitrary.
136 */
137 u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
138 q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
139 } else {
140 struct tc_plug_qopt *ctl = nla_data(opt);
141
142 if (nla_len(opt) < sizeof(*ctl))
143 return -EINVAL;
144
145 q->limit = ctl->limit;
146 }
147
148 qdisc_throttled(sch);
149 return 0;
150}
151
152/* Receives 4 types of messages:
153 * TCQ_PLUG_BUFFER: Inset a plug into the queue and
154 * buffer any incoming packets
155 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
156 * to beginning of the next plug.
157 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
158 * Stop buffering packets until the next TCQ_PLUG_BUFFER
159 * command is received (just act as a pass-thru queue).
160 * TCQ_PLUG_LIMIT: Increase/decrease queue size
161 */
162static int plug_change(struct Qdisc *sch, struct nlattr *opt)
163{
164 struct plug_sched_data *q = qdisc_priv(sch);
165 struct tc_plug_qopt *msg;
166
167 if (opt == NULL)
168 return -EINVAL;
169
170 msg = nla_data(opt);
171 if (nla_len(opt) < sizeof(*msg))
172 return -EINVAL;
173
174 switch (msg->action) {
175 case TCQ_PLUG_BUFFER:
176 /* Save size of the current buffer */
177 q->pkts_last_epoch = q->pkts_current_epoch;
178 q->pkts_current_epoch = 0;
179 if (q->unplug_indefinite)
180 qdisc_throttled(sch);
181 q->unplug_indefinite = false;
182 break;
183 case TCQ_PLUG_RELEASE_ONE:
184 /* Add packets from the last complete buffer to the
185 * packets to be released set.
186 */
187 q->pkts_to_release += q->pkts_last_epoch;
188 q->pkts_last_epoch = 0;
189 qdisc_unthrottled(sch);
190 netif_schedule_queue(sch->dev_queue);
191 break;
192 case TCQ_PLUG_RELEASE_INDEFINITE:
193 q->unplug_indefinite = true;
194 q->pkts_to_release = 0;
195 q->pkts_last_epoch = 0;
196 q->pkts_current_epoch = 0;
197 qdisc_unthrottled(sch);
198 netif_schedule_queue(sch->dev_queue);
199 break;
200 case TCQ_PLUG_LIMIT:
201 /* Limit is supplied in bytes */
202 q->limit = msg->limit;
203 break;
204 default:
205 return -EINVAL;
206 }
207
208 return 0;
209}
210
211static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
212 .id = "plug",
213 .priv_size = sizeof(struct plug_sched_data),
214 .enqueue = plug_enqueue,
215 .dequeue = plug_dequeue,
216 .peek = qdisc_peek_head,
217 .init = plug_init,
218 .change = plug_change,
219 .owner = THIS_MODULE,
220};
221
222static int __init plug_module_init(void)
223{
224 return register_qdisc(&plug_qdisc_ops);
225}
226
227static void __exit plug_module_exit(void)
228{
229 unregister_qdisc(&plug_qdisc_ops);
230}
231module_init(plug_module_init)
232module_exit(plug_module_exit)
233MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b5d56a22b1d2..79359b69ad8d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -247,7 +247,8 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
247 opt.bands = q->bands; 247 opt.bands = q->bands;
248 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); 248 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
249 249
250 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 250 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
251 goto nla_put_failure;
251 252
252 return skb->len; 253 return skb->len;
253 254
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e68cb440756a..9af01f3df18c 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -429,8 +429,9 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
429 nest = nla_nest_start(skb, TCA_OPTIONS); 429 nest = nla_nest_start(skb, TCA_OPTIONS);
430 if (nest == NULL) 430 if (nest == NULL)
431 goto nla_put_failure; 431 goto nla_put_failure;
432 NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w); 432 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
433 NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax); 433 nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
434 goto nla_put_failure;
434 return nla_nest_end(skb, nest); 435 return nla_nest_end(skb, nest);
435 436
436nla_put_failure: 437nla_put_failure:
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a5cc3012cf42..633e32defdcc 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -272,8 +272,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
272 opts = nla_nest_start(skb, TCA_OPTIONS); 272 opts = nla_nest_start(skb, TCA_OPTIONS);
273 if (opts == NULL) 273 if (opts == NULL)
274 goto nla_put_failure; 274 goto nla_put_failure;
275 NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); 275 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
276 NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P); 276 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
277 goto nla_put_failure;
277 return nla_nest_end(skb, opts); 278 return nla_nest_end(skb, opts);
278 279
279nla_put_failure: 280nla_put_failure:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index d7eea99333e9..74305c883bd3 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,7 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
570 570
571 sch->qstats.backlog = q->qdisc->qstats.backlog; 571 sch->qstats.backlog = q->qdisc->qstats.backlog;
572 opts = nla_nest_start(skb, TCA_OPTIONS); 572 opts = nla_nest_start(skb, TCA_OPTIONS);
573 NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt); 573 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
574 goto nla_put_failure;
574 return nla_nest_end(skb, opts); 575 return nla_nest_end(skb, opts);
575 576
576nla_put_failure: 577nla_put_failure:
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 02a21abea65e..d3a1bc26dbfc 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -812,7 +812,8 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
812 memcpy(&opt.stats, &q->stats, sizeof(opt.stats)); 812 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
813 opt.flags = q->flags; 813 opt.flags = q->flags;
814 814
815 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 815 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
816 goto nla_put_failure;
816 817
817 return skb->len; 818 return skb->len;
818 819
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b8e156319d7b..4b056c15e90c 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -359,7 +359,8 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
359 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 359 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
360 opt.mtu = q->mtu; 360 opt.mtu = q->mtu;
361 opt.buffer = q->buffer; 361 opt.buffer = q->buffer;
362 NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); 362 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
363 goto nla_put_failure;
363 364
364 nla_nest_end(skb, nest); 365 nla_nest_end(skb, nest);
365 return skb->len; 366 return skb->len;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 45326599fda3..ca0c29695d51 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -88,9 +88,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
89 } 89 }
90 90
91 kfree_skb(skb); 91 return qdisc_drop(skb, sch);
92 sch->qstats.drops++;
93 return NET_XMIT_DROP;
94} 92}
95 93
96static struct sk_buff * 94static struct sk_buff *
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index acd2edbc073e..5bc9ab161b37 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1408,7 +1408,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1408} 1408}
1409 1409
1410/* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1410/* Increase asoc's rwnd by len and send any window update SACK if needed. */
1411void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) 1411void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1412{ 1412{
1413 struct sctp_chunk *sack; 1413 struct sctp_chunk *sack;
1414 struct timer_list *timer; 1414 struct timer_list *timer;
@@ -1465,7 +1465,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
1465} 1465}
1466 1466
1467/* Decrease asoc's rwnd by len. */ 1467/* Decrease asoc's rwnd by len. */
1468void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) 1468void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1469{ 1469{
1470 int rx_count; 1470 int rx_count;
1471 int over = 0; 1471 int over = 0;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80f71af71384..80564fe03024 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
342 sctp_bh_lock_sock(sk); 342 sctp_bh_lock_sock(sk);
343 343
344 if (sock_owned_by_user(sk)) { 344 if (sock_owned_by_user(sk)) {
345 if (sk_add_backlog(sk, skb)) 345 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
346 sctp_chunk_free(chunk); 346 sctp_chunk_free(chunk);
347 else 347 else
348 backloged = 1; 348 backloged = 1;
@@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
376 struct sctp_ep_common *rcvr = chunk->rcvr; 376 struct sctp_ep_common *rcvr = chunk->rcvr;
377 int ret; 377 int ret;
378 378
379 ret = sk_add_backlog(sk, skb); 379 ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
380 if (!ret) { 380 if (!ret) {
381 /* Hold the assoc/ep while hanging on the backlog queue. 381 /* Hold the assoc/ep while hanging on the backlog queue.
382 * This way, we know structures we need will not disappear 382 * This way, we know structures we need will not disappear
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 817174eb5f41..f1b7d4bb591e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
377 */ 377 */
378 skb_set_owner_w(nskb, sk); 378 skb_set_owner_w(nskb, sk);
379 379
380 /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ 380 if (!sctp_transport_dst_check(tp)) {
381 if (!dst || (dst->obsolete > 1)) {
382 dst_release(dst);
383 sctp_transport_route(tp, NULL, sctp_sk(sk)); 381 sctp_transport_route(tp, NULL, sctp_sk(sk));
384 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { 382 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
385 sctp_assoc_sync_pmtu(asoc); 383 sctp_assoc_sync_pmtu(asoc);
@@ -663,8 +661,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
663 */ 661 */
664 if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && 662 if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
665 inflight && sctp_state(asoc, ESTABLISHED)) { 663 inflight && sctp_state(asoc, ESTABLISHED)) {
666 unsigned max = transport->pathmtu - packet->overhead; 664 unsigned int max = transport->pathmtu - packet->overhead;
667 unsigned len = chunk->skb->len + q->out_qlen; 665 unsigned int len = chunk->skb->len + q->out_qlen;
668 666
669 /* Check whether this chunk and all the rest of pending 667 /* Check whether this chunk and all the rest of pending
670 * data will fit or delay in hopes of bundling a full 668 * data will fit or delay in hopes of bundling a full
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index cfeb1d4a1ee6..a0fa19f5650c 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1147,7 +1147,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1147 __u32 sack_ctsn, ctsn, tsn; 1147 __u32 sack_ctsn, ctsn, tsn;
1148 __u32 highest_tsn, highest_new_tsn; 1148 __u32 highest_tsn, highest_new_tsn;
1149 __u32 sack_a_rwnd; 1149 __u32 sack_a_rwnd;
1150 unsigned outstanding; 1150 unsigned int outstanding;
1151 struct sctp_transport *primary = asoc->peer.primary_path; 1151 struct sctp_transport *primary = asoc->peer.primary_path;
1152 int count_of_newacks = 0; 1152 int count_of_newacks = 0;
1153 int gap_ack_blocks; 1153 int gap_ack_blocks;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1ff51c9d18d5..c96d1a81cf42 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -524,7 +524,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
524/* Worker routine to handle INIT command failure. */ 524/* Worker routine to handle INIT command failure. */
525static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, 525static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
526 struct sctp_association *asoc, 526 struct sctp_association *asoc,
527 unsigned error) 527 unsigned int error)
528{ 528{
529 struct sctp_ulpevent *event; 529 struct sctp_ulpevent *event;
530 530
@@ -550,7 +550,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
550 sctp_event_t event_type, 550 sctp_event_t event_type,
551 sctp_subtype_t subtype, 551 sctp_subtype_t subtype,
552 struct sctp_chunk *chunk, 552 struct sctp_chunk *chunk,
553 unsigned error) 553 unsigned int error)
554{ 554{
555 struct sctp_ulpevent *event; 555 struct sctp_ulpevent *event;
556 556
@@ -1161,9 +1161,8 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1161 break; 1161 break;
1162 1162
1163 case SCTP_DISPOSITION_VIOLATION: 1163 case SCTP_DISPOSITION_VIOLATION:
1164 if (net_ratelimit()) 1164 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1165 pr_err("protocol violation state %d chunkid %d\n", 1165 state, subtype.chunk);
1166 state, subtype.chunk);
1167 break; 1166 break;
1168 1167
1169 case SCTP_DISPOSITION_NOT_IMPL: 1168 case SCTP_DISPOSITION_NOT_IMPL:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 891f5db8cc31..9fca10357350 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1129,17 +1129,15 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1129 /* This should never happen, but lets log it if so. */ 1129 /* This should never happen, but lets log it if so. */
1130 if (unlikely(!link)) { 1130 if (unlikely(!link)) {
1131 if (from_addr.sa.sa_family == AF_INET6) { 1131 if (from_addr.sa.sa_family == AF_INET6) {
1132 if (net_ratelimit()) 1132 net_warn_ratelimited("%s association %p could not find address %pI6\n",
1133 pr_warn("%s association %p could not find address %pI6\n", 1133 __func__,
1134 __func__, 1134 asoc,
1135 asoc, 1135 &from_addr.v6.sin6_addr);
1136 &from_addr.v6.sin6_addr);
1137 } else { 1136 } else {
1138 if (net_ratelimit()) 1137 net_warn_ratelimited("%s association %p could not find address %pI4\n",
1139 pr_warn("%s association %p could not find address %pI4\n", 1138 __func__,
1140 __func__, 1139 asoc,
1141 asoc, 1140 &from_addr.v4.sin_addr.s_addr);
1142 &from_addr.v4.sin_addr.s_addr);
1143 } 1141 }
1144 return SCTP_DISPOSITION_DISCARD; 1142 return SCTP_DISPOSITION_DISCARD;
1145 } 1143 }
@@ -2410,7 +2408,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2410 sctp_cmd_seq_t *commands) 2408 sctp_cmd_seq_t *commands)
2411{ 2409{
2412 struct sctp_chunk *chunk = arg; 2410 struct sctp_chunk *chunk = arg;
2413 unsigned len; 2411 unsigned int len;
2414 __be16 error = SCTP_ERROR_NO_ERROR; 2412 __be16 error = SCTP_ERROR_NO_ERROR;
2415 2413
2416 /* See if we have an error cause code in the chunk. */ 2414 /* See if we have an error cause code in the chunk. */
@@ -2446,7 +2444,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
2446 sctp_cmd_seq_t *commands) 2444 sctp_cmd_seq_t *commands)
2447{ 2445{
2448 struct sctp_chunk *chunk = arg; 2446 struct sctp_chunk *chunk = arg;
2449 unsigned len; 2447 unsigned int len;
2450 __be16 error = SCTP_ERROR_NO_ERROR; 2448 __be16 error = SCTP_ERROR_NO_ERROR;
2451 2449
2452 if (!sctp_vtag_verify_either(chunk, asoc)) 2450 if (!sctp_vtag_verify_either(chunk, asoc))
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 408ebd0e7330..b3b8a8d813eb 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4133,9 +4133,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4133static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4133static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4134 int __user *optlen) 4134 int __user *optlen)
4135{ 4135{
4136 if (len < sizeof(struct sctp_event_subscribe)) 4136 if (len <= 0)
4137 return -EINVAL; 4137 return -EINVAL;
4138 len = sizeof(struct sctp_event_subscribe); 4138 if (len > sizeof(struct sctp_event_subscribe))
4139 len = sizeof(struct sctp_event_subscribe);
4139 if (put_user(len, optlen)) 4140 if (put_user(len, optlen))
4140 return -EFAULT; 4141 return -EFAULT;
4141 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4142 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
@@ -4170,14 +4171,16 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
4170} 4171}
4171 4172
4172/* Helper routine to branch off an association to a new socket. */ 4173/* Helper routine to branch off an association to a new socket. */
4173SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc, 4174int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4174 struct socket **sockp)
4175{ 4175{
4176 struct sock *sk = asoc->base.sk; 4176 struct sctp_association *asoc = sctp_id2assoc(sk, id);
4177 struct socket *sock; 4177 struct socket *sock;
4178 struct sctp_af *af; 4178 struct sctp_af *af;
4179 int err = 0; 4179 int err = 0;
4180 4180
4181 if (!asoc)
4182 return -EINVAL;
4183
4181 /* An association cannot be branched off from an already peeled-off 4184 /* An association cannot be branched off from an already peeled-off
4182 * socket, nor is this supported for tcp style sockets. 4185 * socket, nor is this supported for tcp style sockets.
4183 */ 4186 */
@@ -4206,13 +4209,13 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
4206 4209
4207 return err; 4210 return err;
4208} 4211}
4212EXPORT_SYMBOL(sctp_do_peeloff);
4209 4213
4210static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4214static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
4211{ 4215{
4212 sctp_peeloff_arg_t peeloff; 4216 sctp_peeloff_arg_t peeloff;
4213 struct socket *newsock; 4217 struct socket *newsock;
4214 int retval = 0; 4218 int retval = 0;
4215 struct sctp_association *asoc;
4216 4219
4217 if (len < sizeof(sctp_peeloff_arg_t)) 4220 if (len < sizeof(sctp_peeloff_arg_t))
4218 return -EINVAL; 4221 return -EINVAL;
@@ -4220,15 +4223,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
4220 if (copy_from_user(&peeloff, optval, len)) 4223 if (copy_from_user(&peeloff, optval, len))
4221 return -EFAULT; 4224 return -EFAULT;
4222 4225
4223 asoc = sctp_id2assoc(sk, peeloff.associd); 4226 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
4224 if (!asoc) {
4225 retval = -EINVAL;
4226 goto out;
4227 }
4228
4229 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __func__, sk, asoc);
4230
4231 retval = sctp_do_peeloff(asoc, &newsock);
4232 if (retval < 0) 4227 if (retval < 0)
4233 goto out; 4228 goto out;
4234 4229
@@ -4239,8 +4234,8 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
4239 goto out; 4234 goto out;
4240 } 4235 }
4241 4236
4242 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n", 4237 SCTP_DEBUG_PRINTK("%s: sk: %p newsk: %p sd: %d\n",
4243 __func__, sk, asoc, newsock->sk, retval); 4238 __func__, sk, newsock->sk, retval);
4244 4239
4245 /* Return the fd mapped to the new socket. */ 4240 /* Return the fd mapped to the new socket. */
4246 peeloff.sd = retval; 4241 peeloff.sd = retval;
@@ -5845,10 +5840,8 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
5845 if (!sctp_sk(sk)->hmac && sctp_hmac_alg) { 5840 if (!sctp_sk(sk)->hmac && sctp_hmac_alg) {
5846 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); 5841 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
5847 if (IS_ERR(tfm)) { 5842 if (IS_ERR(tfm)) {
5848 if (net_ratelimit()) { 5843 net_info_ratelimited("failed to load transform for %s: %ld\n",
5849 pr_info("failed to load transform for %s: %ld\n", 5844 sctp_hmac_alg, PTR_ERR(tfm));
5850 sctp_hmac_alg, PTR_ERR(tfm));
5851 }
5852 return -ENOSYS; 5845 return -ENOSYS;
5853 } 5846 }
5854 sctp_sk(sk)->hmac = tfm; 5847 sctp_sk(sk)->hmac = tfm;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 60ffbd067ff7..e5fe639c89e7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -275,22 +275,16 @@ static ctl_table sctp_table[] = {
275 { /* sentinel */ } 275 { /* sentinel */ }
276}; 276};
277 277
278static struct ctl_path sctp_path[] = {
279 { .procname = "net", },
280 { .procname = "sctp", },
281 { }
282};
283
284static struct ctl_table_header * sctp_sysctl_header; 278static struct ctl_table_header * sctp_sysctl_header;
285 279
286/* Sysctl registration. */ 280/* Sysctl registration. */
287void sctp_sysctl_register(void) 281void sctp_sysctl_register(void)
288{ 282{
289 sctp_sysctl_header = register_sysctl_paths(sctp_path, sctp_table); 283 sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table);
290} 284}
291 285
292/* Sysctl deregistration. */ 286/* Sysctl deregistration. */
293void sctp_sysctl_unregister(void) 287void sctp_sysctl_unregister(void)
294{ 288{
295 unregister_sysctl_table(sctp_sysctl_header); 289 unregister_net_sysctl_table(sctp_sysctl_header);
296} 290}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3889330b7b04..b026ba0c6992 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
227} 227}
228 228
229/* this is a complete rip-off from __sk_dst_check
230 * the cookie is always 0 since this is how it's used in the
231 * pmtu code
232 */
233static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
234{
235 struct dst_entry *dst = t->dst;
236
237 if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
238 dst_release(t->dst);
239 t->dst = NULL;
240 return NULL;
241 }
242
243 return dst;
244}
245
246void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 229void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
247{ 230{
248 struct dst_entry *dst; 231 struct dst_entry *dst;
diff --git a/net/socket.c b/net/socket.c
index 28a96af484b4..6e0ccc09b313 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -181,7 +181,7 @@ static DEFINE_PER_CPU(int, sockets_in_use);
181 * invalid addresses -EFAULT is returned. On a success 0 is returned. 181 * invalid addresses -EFAULT is returned. On a success 0 is returned.
182 */ 182 */
183 183
184int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr) 184int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr)
185{ 185{
186 if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) 186 if (ulen < 0 || ulen > sizeof(struct sockaddr_storage))
187 return -EINVAL; 187 return -EINVAL;
@@ -209,7 +209,7 @@ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr)
209 * specified. Zero is returned for a success. 209 * specified. Zero is returned for a success.
210 */ 210 */
211 211
212static int move_addr_to_user(struct sockaddr *kaddr, int klen, 212static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
213 void __user *uaddr, int __user *ulen) 213 void __user *uaddr, int __user *ulen)
214{ 214{
215 int err; 215 int err;
@@ -479,7 +479,7 @@ static struct socket *sock_alloc(void)
479 inode->i_uid = current_fsuid(); 479 inode->i_uid = current_fsuid();
480 inode->i_gid = current_fsgid(); 480 inode->i_gid = current_fsgid();
481 481
482 percpu_add(sockets_in_use, 1); 482 this_cpu_add(sockets_in_use, 1);
483 return sock; 483 return sock;
484} 484}
485 485
@@ -522,7 +522,7 @@ void sock_release(struct socket *sock)
522 if (rcu_dereference_protected(sock->wq, 1)->fasync_list) 522 if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
523 printk(KERN_ERR "sock_release: fasync list not empty!\n"); 523 printk(KERN_ERR "sock_release: fasync list not empty!\n");
524 524
525 percpu_sub(sockets_in_use, 1); 525 this_cpu_sub(sockets_in_use, 1);
526 if (!sock->file) { 526 if (!sock->file) {
527 iput(SOCK_INODE(sock)); 527 iput(SOCK_INODE(sock));
528 return; 528 return;
@@ -811,9 +811,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
811 811
812 sock = file->private_data; 812 sock = file->private_data;
813 813
814 flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; 814 flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
815 if (more) 815 /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
816 flags |= MSG_MORE; 816 flags |= more;
817 817
818 return kernel_sendpage(sock, page, offset, size, flags); 818 return kernel_sendpage(sock, page, offset, size, flags);
819} 819}
@@ -1234,8 +1234,7 @@ int __sock_create(struct net *net, int family, int type, int protocol,
1234 */ 1234 */
1235 sock = sock_alloc(); 1235 sock = sock_alloc();
1236 if (!sock) { 1236 if (!sock) {
1237 if (net_ratelimit()) 1237 net_warn_ratelimited("socket: no more sockets\n");
1238 printk(KERN_WARNING "socket: no more sockets\n");
1239 return -ENFILE; /* Not exactly a match, but its the 1238 return -ENFILE; /* Not exactly a match, but its the
1240 closest posix thing */ 1239 closest posix thing */
1241 } 1240 }
@@ -1449,7 +1448,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
1449 1448
1450 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1449 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1451 if (sock) { 1450 if (sock) {
1452 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); 1451 err = move_addr_to_kernel(umyaddr, addrlen, &address);
1453 if (err >= 0) { 1452 if (err >= 0) {
1454 err = security_socket_bind(sock, 1453 err = security_socket_bind(sock,
1455 (struct sockaddr *)&address, 1454 (struct sockaddr *)&address,
@@ -1479,7 +1478,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
1479 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1478 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1480 if (sock) { 1479 if (sock) {
1481 somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; 1480 somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
1482 if ((unsigned)backlog > somaxconn) 1481 if ((unsigned int)backlog > somaxconn)
1483 backlog = somaxconn; 1482 backlog = somaxconn;
1484 1483
1485 err = security_socket_listen(sock, backlog); 1484 err = security_socket_listen(sock, backlog);
@@ -1556,7 +1555,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1556 err = -ECONNABORTED; 1555 err = -ECONNABORTED;
1557 goto out_fd; 1556 goto out_fd;
1558 } 1557 }
1559 err = move_addr_to_user((struct sockaddr *)&address, 1558 err = move_addr_to_user(&address,
1560 len, upeer_sockaddr, upeer_addrlen); 1559 len, upeer_sockaddr, upeer_addrlen);
1561 if (err < 0) 1560 if (err < 0)
1562 goto out_fd; 1561 goto out_fd;
@@ -1605,7 +1604,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
1605 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1604 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1606 if (!sock) 1605 if (!sock)
1607 goto out; 1606 goto out;
1608 err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address); 1607 err = move_addr_to_kernel(uservaddr, addrlen, &address);
1609 if (err < 0) 1608 if (err < 0)
1610 goto out_put; 1609 goto out_put;
1611 1610
@@ -1645,7 +1644,7 @@ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
1645 err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); 1644 err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0);
1646 if (err) 1645 if (err)
1647 goto out_put; 1646 goto out_put;
1648 err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); 1647 err = move_addr_to_user(&address, len, usockaddr, usockaddr_len);
1649 1648
1650out_put: 1649out_put:
1651 fput_light(sock->file, fput_needed); 1650 fput_light(sock->file, fput_needed);
@@ -1677,7 +1676,7 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
1677 sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1676 sock->ops->getname(sock, (struct sockaddr *)&address, &len,
1678 1); 1677 1);
1679 if (!err) 1678 if (!err)
1680 err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, 1679 err = move_addr_to_user(&address, len, usockaddr,
1681 usockaddr_len); 1680 usockaddr_len);
1682 fput_light(sock->file, fput_needed); 1681 fput_light(sock->file, fput_needed);
1683 } 1682 }
@@ -1691,7 +1690,7 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
1691 */ 1690 */
1692 1691
1693SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, 1692SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
1694 unsigned, flags, struct sockaddr __user *, addr, 1693 unsigned int, flags, struct sockaddr __user *, addr,
1695 int, addr_len) 1694 int, addr_len)
1696{ 1695{
1697 struct socket *sock; 1696 struct socket *sock;
@@ -1716,7 +1715,7 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
1716 msg.msg_controllen = 0; 1715 msg.msg_controllen = 0;
1717 msg.msg_namelen = 0; 1716 msg.msg_namelen = 0;
1718 if (addr) { 1717 if (addr) {
1719 err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); 1718 err = move_addr_to_kernel(addr, addr_len, &address);
1720 if (err < 0) 1719 if (err < 0)
1721 goto out_put; 1720 goto out_put;
1722 msg.msg_name = (struct sockaddr *)&address; 1721 msg.msg_name = (struct sockaddr *)&address;
@@ -1738,7 +1737,7 @@ out:
1738 */ 1737 */
1739 1738
1740SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, 1739SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
1741 unsigned, flags) 1740 unsigned int, flags)
1742{ 1741{
1743 return sys_sendto(fd, buff, len, flags, NULL, 0); 1742 return sys_sendto(fd, buff, len, flags, NULL, 0);
1744} 1743}
@@ -1750,7 +1749,7 @@ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
1750 */ 1749 */
1751 1750
1752SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, 1751SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1753 unsigned, flags, struct sockaddr __user *, addr, 1752 unsigned int, flags, struct sockaddr __user *, addr,
1754 int __user *, addr_len) 1753 int __user *, addr_len)
1755{ 1754{
1756 struct socket *sock; 1755 struct socket *sock;
@@ -1779,7 +1778,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1779 err = sock_recvmsg(sock, &msg, size, flags); 1778 err = sock_recvmsg(sock, &msg, size, flags);
1780 1779
1781 if (err >= 0 && addr != NULL) { 1780 if (err >= 0 && addr != NULL) {
1782 err2 = move_addr_to_user((struct sockaddr *)&address, 1781 err2 = move_addr_to_user(&address,
1783 msg.msg_namelen, addr, addr_len); 1782 msg.msg_namelen, addr, addr_len);
1784 if (err2 < 0) 1783 if (err2 < 0)
1785 err = err2; 1784 err = err2;
@@ -1795,7 +1794,7 @@ out:
1795 */ 1794 */
1796 1795
1797asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, 1796asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
1798 unsigned flags) 1797 unsigned int flags)
1799{ 1798{
1800 return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); 1799 return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
1801} 1800}
@@ -1897,7 +1896,7 @@ struct used_address {
1897}; 1896};
1898 1897
1899static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1898static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1900 struct msghdr *msg_sys, unsigned flags, 1899 struct msghdr *msg_sys, unsigned int flags,
1901 struct used_address *used_address) 1900 struct used_address *used_address)
1902{ 1901{
1903 struct compat_msghdr __user *msg_compat = 1902 struct compat_msghdr __user *msg_compat =
@@ -1908,7 +1907,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1908 __attribute__ ((aligned(sizeof(__kernel_size_t)))); 1907 __attribute__ ((aligned(sizeof(__kernel_size_t))));
1909 /* 20 is size of ipv6_pktinfo */ 1908 /* 20 is size of ipv6_pktinfo */
1910 unsigned char *ctl_buf = ctl; 1909 unsigned char *ctl_buf = ctl;
1911 int err, ctl_len, iov_size, total_len; 1910 int err, ctl_len, total_len;
1912 1911
1913 err = -EFAULT; 1912 err = -EFAULT;
1914 if (MSG_CMSG_COMPAT & flags) { 1913 if (MSG_CMSG_COMPAT & flags) {
@@ -1917,29 +1916,22 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1917 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 1916 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
1918 return -EFAULT; 1917 return -EFAULT;
1919 1918
1920 /* do not move before msg_sys is valid */
1921 err = -EMSGSIZE;
1922 if (msg_sys->msg_iovlen > UIO_MAXIOV)
1923 goto out;
1924
1925 /* Check whether to allocate the iovec area */
1926 err = -ENOMEM;
1927 iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
1928 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 1919 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
1929 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 1920 err = -EMSGSIZE;
1921 if (msg_sys->msg_iovlen > UIO_MAXIOV)
1922 goto out;
1923 err = -ENOMEM;
1924 iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
1925 GFP_KERNEL);
1930 if (!iov) 1926 if (!iov)
1931 goto out; 1927 goto out;
1932 } 1928 }
1933 1929
1934 /* This will also move the address data into kernel space */ 1930 /* This will also move the address data into kernel space */
1935 if (MSG_CMSG_COMPAT & flags) { 1931 if (MSG_CMSG_COMPAT & flags) {
1936 err = verify_compat_iovec(msg_sys, iov, 1932 err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ);
1937 (struct sockaddr *)&address,
1938 VERIFY_READ);
1939 } else 1933 } else
1940 err = verify_iovec(msg_sys, iov, 1934 err = verify_iovec(msg_sys, iov, &address, VERIFY_READ);
1941 (struct sockaddr *)&address,
1942 VERIFY_READ);
1943 if (err < 0) 1935 if (err < 0)
1944 goto out_freeiov; 1936 goto out_freeiov;
1945 total_len = err; 1937 total_len = err;
@@ -2009,7 +2001,7 @@ out_freectl:
2009 sock_kfree_s(sock->sk, ctl_buf, ctl_len); 2001 sock_kfree_s(sock->sk, ctl_buf, ctl_len);
2010out_freeiov: 2002out_freeiov:
2011 if (iov != iovstack) 2003 if (iov != iovstack)
2012 sock_kfree_s(sock->sk, iov, iov_size); 2004 kfree(iov);
2013out: 2005out:
2014 return err; 2006 return err;
2015} 2007}
@@ -2018,7 +2010,7 @@ out:
2018 * BSD sendmsg interface 2010 * BSD sendmsg interface
2019 */ 2011 */
2020 2012
2021SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) 2013SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
2022{ 2014{
2023 int fput_needed, err; 2015 int fput_needed, err;
2024 struct msghdr msg_sys; 2016 struct msghdr msg_sys;
@@ -2100,14 +2092,14 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
2100} 2092}
2101 2093
2102static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, 2094static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2103 struct msghdr *msg_sys, unsigned flags, int nosec) 2095 struct msghdr *msg_sys, unsigned int flags, int nosec)
2104{ 2096{
2105 struct compat_msghdr __user *msg_compat = 2097 struct compat_msghdr __user *msg_compat =
2106 (struct compat_msghdr __user *)msg; 2098 (struct compat_msghdr __user *)msg;
2107 struct iovec iovstack[UIO_FASTIOV]; 2099 struct iovec iovstack[UIO_FASTIOV];
2108 struct iovec *iov = iovstack; 2100 struct iovec *iov = iovstack;
2109 unsigned long cmsg_ptr; 2101 unsigned long cmsg_ptr;
2110 int err, iov_size, total_len, len; 2102 int err, total_len, len;
2111 2103
2112 /* kernel mode address */ 2104 /* kernel mode address */
2113 struct sockaddr_storage addr; 2105 struct sockaddr_storage addr;
@@ -2122,15 +2114,13 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2122 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 2114 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
2123 return -EFAULT; 2115 return -EFAULT;
2124 2116
2125 err = -EMSGSIZE;
2126 if (msg_sys->msg_iovlen > UIO_MAXIOV)
2127 goto out;
2128
2129 /* Check whether to allocate the iovec area */
2130 err = -ENOMEM;
2131 iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
2132 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2117 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
2133 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 2118 err = -EMSGSIZE;
2119 if (msg_sys->msg_iovlen > UIO_MAXIOV)
2120 goto out;
2121 err = -ENOMEM;
2122 iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
2123 GFP_KERNEL);
2134 if (!iov) 2124 if (!iov)
2135 goto out; 2125 goto out;
2136 } 2126 }
@@ -2143,13 +2133,9 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2143 uaddr = (__force void __user *)msg_sys->msg_name; 2133 uaddr = (__force void __user *)msg_sys->msg_name;
2144 uaddr_len = COMPAT_NAMELEN(msg); 2134 uaddr_len = COMPAT_NAMELEN(msg);
2145 if (MSG_CMSG_COMPAT & flags) { 2135 if (MSG_CMSG_COMPAT & flags) {
2146 err = verify_compat_iovec(msg_sys, iov, 2136 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
2147 (struct sockaddr *)&addr,
2148 VERIFY_WRITE);
2149 } else 2137 } else
2150 err = verify_iovec(msg_sys, iov, 2138 err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
2151 (struct sockaddr *)&addr,
2152 VERIFY_WRITE);
2153 if (err < 0) 2139 if (err < 0)
2154 goto out_freeiov; 2140 goto out_freeiov;
2155 total_len = err; 2141 total_len = err;
@@ -2166,7 +2152,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2166 len = err; 2152 len = err;
2167 2153
2168 if (uaddr != NULL) { 2154 if (uaddr != NULL) {
2169 err = move_addr_to_user((struct sockaddr *)&addr, 2155 err = move_addr_to_user(&addr,
2170 msg_sys->msg_namelen, uaddr, 2156 msg_sys->msg_namelen, uaddr,
2171 uaddr_len); 2157 uaddr_len);
2172 if (err < 0) 2158 if (err < 0)
@@ -2188,7 +2174,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2188 2174
2189out_freeiov: 2175out_freeiov:
2190 if (iov != iovstack) 2176 if (iov != iovstack)
2191 sock_kfree_s(sock->sk, iov, iov_size); 2177 kfree(iov);
2192out: 2178out:
2193 return err; 2179 return err;
2194} 2180}
@@ -2532,6 +2518,12 @@ EXPORT_SYMBOL(sock_unregister);
2532static int __init sock_init(void) 2518static int __init sock_init(void)
2533{ 2519{
2534 int err; 2520 int err;
2521 /*
2522 * Initialize the network sysctl infrastructure.
2523 */
2524 err = net_sysctl_init();
2525 if (err)
2526 goto out;
2535 2527
2536 /* 2528 /*
2537 * Initialize sock SLAB cache. 2529 * Initialize sock SLAB cache.
@@ -2600,7 +2592,7 @@ void socket_seq_show(struct seq_file *seq)
2600 2592
2601#ifdef CONFIG_COMPAT 2593#ifdef CONFIG_COMPAT
2602static int do_siocgstamp(struct net *net, struct socket *sock, 2594static int do_siocgstamp(struct net *net, struct socket *sock,
2603 unsigned int cmd, struct compat_timeval __user *up) 2595 unsigned int cmd, void __user *up)
2604{ 2596{
2605 mm_segment_t old_fs = get_fs(); 2597 mm_segment_t old_fs = get_fs();
2606 struct timeval ktv; 2598 struct timeval ktv;
@@ -2609,15 +2601,14 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
2609 set_fs(KERNEL_DS); 2601 set_fs(KERNEL_DS);
2610 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); 2602 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
2611 set_fs(old_fs); 2603 set_fs(old_fs);
2612 if (!err) { 2604 if (!err)
2613 err = put_user(ktv.tv_sec, &up->tv_sec); 2605 err = compat_put_timeval(up, &ktv);
2614 err |= __put_user(ktv.tv_usec, &up->tv_usec); 2606
2615 }
2616 return err; 2607 return err;
2617} 2608}
2618 2609
2619static int do_siocgstampns(struct net *net, struct socket *sock, 2610static int do_siocgstampns(struct net *net, struct socket *sock,
2620 unsigned int cmd, struct compat_timespec __user *up) 2611 unsigned int cmd, void __user *up)
2621{ 2612{
2622 mm_segment_t old_fs = get_fs(); 2613 mm_segment_t old_fs = get_fs();
2623 struct timespec kts; 2614 struct timespec kts;
@@ -2626,10 +2617,9 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
2626 set_fs(KERNEL_DS); 2617 set_fs(KERNEL_DS);
2627 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); 2618 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
2628 set_fs(old_fs); 2619 set_fs(old_fs);
2629 if (!err) { 2620 if (!err)
2630 err = put_user(kts.tv_sec, &up->tv_sec); 2621 err = compat_put_timespec(up, &kts);
2631 err |= __put_user(kts.tv_nsec, &up->tv_nsec); 2622
2632 }
2633 return err; 2623 return err;
2634} 2624}
2635 2625
@@ -3233,7 +3223,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3233 return -ENOIOCTLCMD; 3223 return -ENOIOCTLCMD;
3234} 3224}
3235 3225
3236static long compat_sock_ioctl(struct file *file, unsigned cmd, 3226static long compat_sock_ioctl(struct file *file, unsigned int cmd,
3237 unsigned long arg) 3227 unsigned long arg)
3238{ 3228{
3239 struct socket *sock = file->private_data; 3229 struct socket *sock = file->private_data;
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index ffd243d09188..9fe8857d8d59 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -39,3 +39,16 @@ config RPCSEC_GSS_KRB5
39 Kerberos support should be installed. 39 Kerberos support should be installed.
40 40
41 If unsure, say Y. 41 If unsure, say Y.
42
43config SUNRPC_DEBUG
44 bool "RPC: Enable dprintk debugging"
45 depends on SUNRPC && SYSCTL
46 help
47 This option enables a sysctl-based debugging interface
48 that is be used by the 'rpcdebug' utility to turn on or off
49 logging of different aspects of the kernel RPC activity.
50
51 Disabling this option will make your kernel slightly smaller,
52 but makes troubleshooting NFS issues significantly harder.
53
54 If unsure, say Y.
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index ee77742e0ed6..d11418f97f1f 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -156,8 +156,9 @@ static size_t rpc_pton4(const char *buf, const size_t buflen,
156} 156}
157 157
158#if IS_ENABLED(CONFIG_IPV6) 158#if IS_ENABLED(CONFIG_IPV6)
159static int rpc_parse_scope_id(const char *buf, const size_t buflen, 159static int rpc_parse_scope_id(struct net *net, const char *buf,
160 const char *delim, struct sockaddr_in6 *sin6) 160 const size_t buflen, const char *delim,
161 struct sockaddr_in6 *sin6)
161{ 162{
162 char *p; 163 char *p;
163 size_t len; 164 size_t len;
@@ -177,7 +178,7 @@ static int rpc_parse_scope_id(const char *buf, const size_t buflen,
177 unsigned long scope_id = 0; 178 unsigned long scope_id = 0;
178 struct net_device *dev; 179 struct net_device *dev;
179 180
180 dev = dev_get_by_name(&init_net, p); 181 dev = dev_get_by_name(net, p);
181 if (dev != NULL) { 182 if (dev != NULL) {
182 scope_id = dev->ifindex; 183 scope_id = dev->ifindex;
183 dev_put(dev); 184 dev_put(dev);
@@ -197,7 +198,7 @@ static int rpc_parse_scope_id(const char *buf, const size_t buflen,
197 return 0; 198 return 0;
198} 199}
199 200
200static size_t rpc_pton6(const char *buf, const size_t buflen, 201static size_t rpc_pton6(struct net *net, const char *buf, const size_t buflen,
201 struct sockaddr *sap, const size_t salen) 202 struct sockaddr *sap, const size_t salen)
202{ 203{
203 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 204 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
@@ -213,14 +214,14 @@ static size_t rpc_pton6(const char *buf, const size_t buflen,
213 if (in6_pton(buf, buflen, addr, IPV6_SCOPE_DELIMITER, &delim) == 0) 214 if (in6_pton(buf, buflen, addr, IPV6_SCOPE_DELIMITER, &delim) == 0)
214 return 0; 215 return 0;
215 216
216 if (!rpc_parse_scope_id(buf, buflen, delim, sin6)) 217 if (!rpc_parse_scope_id(net, buf, buflen, delim, sin6))
217 return 0; 218 return 0;
218 219
219 sin6->sin6_family = AF_INET6; 220 sin6->sin6_family = AF_INET6;
220 return sizeof(struct sockaddr_in6); 221 return sizeof(struct sockaddr_in6);
221} 222}
222#else 223#else
223static size_t rpc_pton6(const char *buf, const size_t buflen, 224static size_t rpc_pton6(struct net *net, const char *buf, const size_t buflen,
224 struct sockaddr *sap, const size_t salen) 225 struct sockaddr *sap, const size_t salen)
225{ 226{
226 return 0; 227 return 0;
@@ -229,6 +230,7 @@ static size_t rpc_pton6(const char *buf, const size_t buflen,
229 230
230/** 231/**
231 * rpc_pton - Construct a sockaddr in @sap 232 * rpc_pton - Construct a sockaddr in @sap
233 * @net: applicable network namespace
232 * @buf: C string containing presentation format IP address 234 * @buf: C string containing presentation format IP address
233 * @buflen: length of presentation address in bytes 235 * @buflen: length of presentation address in bytes
234 * @sap: buffer into which to plant socket address 236 * @sap: buffer into which to plant socket address
@@ -241,14 +243,14 @@ static size_t rpc_pton6(const char *buf, const size_t buflen,
241 * socket address, if successful. Returns zero if an error 243 * socket address, if successful. Returns zero if an error
242 * occurred. 244 * occurred.
243 */ 245 */
244size_t rpc_pton(const char *buf, const size_t buflen, 246size_t rpc_pton(struct net *net, const char *buf, const size_t buflen,
245 struct sockaddr *sap, const size_t salen) 247 struct sockaddr *sap, const size_t salen)
246{ 248{
247 unsigned int i; 249 unsigned int i;
248 250
249 for (i = 0; i < buflen; i++) 251 for (i = 0; i < buflen; i++)
250 if (buf[i] == ':') 252 if (buf[i] == ':')
251 return rpc_pton6(buf, buflen, sap, salen); 253 return rpc_pton6(net, buf, buflen, sap, salen);
252 return rpc_pton4(buf, buflen, sap, salen); 254 return rpc_pton4(buf, buflen, sap, salen);
253} 255}
254EXPORT_SYMBOL_GPL(rpc_pton); 256EXPORT_SYMBOL_GPL(rpc_pton);
@@ -295,6 +297,7 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
295 297
296/** 298/**
297 * rpc_uaddr2sockaddr - convert a universal address to a socket address. 299 * rpc_uaddr2sockaddr - convert a universal address to a socket address.
300 * @net: applicable network namespace
298 * @uaddr: C string containing universal address to convert 301 * @uaddr: C string containing universal address to convert
299 * @uaddr_len: length of universal address string 302 * @uaddr_len: length of universal address string
300 * @sap: buffer into which to plant socket address 303 * @sap: buffer into which to plant socket address
@@ -306,8 +309,9 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
306 * Returns the size of the socket address if successful; otherwise 309 * Returns the size of the socket address if successful; otherwise
307 * zero is returned. 310 * zero is returned.
308 */ 311 */
309size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len, 312size_t rpc_uaddr2sockaddr(struct net *net, const char *uaddr,
310 struct sockaddr *sap, const size_t salen) 313 const size_t uaddr_len, struct sockaddr *sap,
314 const size_t salen)
311{ 315{
312 char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')]; 316 char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')];
313 unsigned long portlo, porthi; 317 unsigned long portlo, porthi;
@@ -339,7 +343,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
339 port = (unsigned short)((porthi << 8) | portlo); 343 port = (unsigned short)((porthi << 8) | portlo);
340 344
341 *c = '\0'; 345 *c = '\0';
342 if (rpc_pton(buf, strlen(buf), sap, salen) == 0) 346 if (rpc_pton(net, buf, strlen(buf), sap, salen) == 0)
343 return 0; 347 return 0;
344 348
345 switch (sap->sa_family) { 349 switch (sap->sa_family) {
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 75762f346975..6ed6f201b022 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -160,8 +160,8 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
160 if (gcred->acred.group_info->ngroups != acred->group_info->ngroups) 160 if (gcred->acred.group_info->ngroups != acred->group_info->ngroups)
161 goto out_nomatch; 161 goto out_nomatch;
162 for (i = 0; i < gcred->acred.group_info->ngroups; i++) { 162 for (i = 0; i < gcred->acred.group_info->ngroups; i++) {
163 if (GROUP_AT(gcred->acred.group_info, i) != 163 if (!gid_eq(GROUP_AT(gcred->acred.group_info, i),
164 GROUP_AT(acred->group_info, i)) 164 GROUP_AT(acred->group_info, i)))
165 goto out_nomatch; 165 goto out_nomatch;
166 } 166 }
167out_match: 167out_match:
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index affa631ac1ab..d3ad81f8da5b 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -81,7 +81,7 @@ struct gss_auth {
81 * mechanism (for example, "krb5") and exists for 81 * mechanism (for example, "krb5") and exists for
82 * backwards-compatibility with older gssd's. 82 * backwards-compatibility with older gssd's.
83 */ 83 */
84 struct dentry *dentry[2]; 84 struct rpc_pipe *pipe[2];
85}; 85};
86 86
87/* pipe_version >= 0 if and only if someone has a pipe open. */ 87/* pipe_version >= 0 if and only if someone has a pipe open. */
@@ -112,7 +112,7 @@ gss_put_ctx(struct gss_cl_ctx *ctx)
112/* gss_cred_set_ctx: 112/* gss_cred_set_ctx:
113 * called by gss_upcall_callback and gss_create_upcall in order 113 * called by gss_upcall_callback and gss_create_upcall in order
114 * to set the gss context. The actual exchange of an old context 114 * to set the gss context. The actual exchange of an old context
115 * and a new one is protected by the inode->i_lock. 115 * and a new one is protected by the pipe->lock.
116 */ 116 */
117static void 117static void
118gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 118gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
@@ -251,7 +251,7 @@ struct gss_upcall_msg {
251 struct rpc_pipe_msg msg; 251 struct rpc_pipe_msg msg;
252 struct list_head list; 252 struct list_head list;
253 struct gss_auth *auth; 253 struct gss_auth *auth;
254 struct rpc_inode *inode; 254 struct rpc_pipe *pipe;
255 struct rpc_wait_queue rpc_waitqueue; 255 struct rpc_wait_queue rpc_waitqueue;
256 wait_queue_head_t waitqueue; 256 wait_queue_head_t waitqueue;
257 struct gss_cl_ctx *ctx; 257 struct gss_cl_ctx *ctx;
@@ -294,10 +294,10 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
294} 294}
295 295
296static struct gss_upcall_msg * 296static struct gss_upcall_msg *
297__gss_find_upcall(struct rpc_inode *rpci, uid_t uid) 297__gss_find_upcall(struct rpc_pipe *pipe, uid_t uid)
298{ 298{
299 struct gss_upcall_msg *pos; 299 struct gss_upcall_msg *pos;
300 list_for_each_entry(pos, &rpci->in_downcall, list) { 300 list_for_each_entry(pos, &pipe->in_downcall, list) {
301 if (pos->uid != uid) 301 if (pos->uid != uid)
302 continue; 302 continue;
303 atomic_inc(&pos->count); 303 atomic_inc(&pos->count);
@@ -315,18 +315,17 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
315static inline struct gss_upcall_msg * 315static inline struct gss_upcall_msg *
316gss_add_msg(struct gss_upcall_msg *gss_msg) 316gss_add_msg(struct gss_upcall_msg *gss_msg)
317{ 317{
318 struct rpc_inode *rpci = gss_msg->inode; 318 struct rpc_pipe *pipe = gss_msg->pipe;
319 struct inode *inode = &rpci->vfs_inode;
320 struct gss_upcall_msg *old; 319 struct gss_upcall_msg *old;
321 320
322 spin_lock(&inode->i_lock); 321 spin_lock(&pipe->lock);
323 old = __gss_find_upcall(rpci, gss_msg->uid); 322 old = __gss_find_upcall(pipe, gss_msg->uid);
324 if (old == NULL) { 323 if (old == NULL) {
325 atomic_inc(&gss_msg->count); 324 atomic_inc(&gss_msg->count);
326 list_add(&gss_msg->list, &rpci->in_downcall); 325 list_add(&gss_msg->list, &pipe->in_downcall);
327 } else 326 } else
328 gss_msg = old; 327 gss_msg = old;
329 spin_unlock(&inode->i_lock); 328 spin_unlock(&pipe->lock);
330 return gss_msg; 329 return gss_msg;
331} 330}
332 331
@@ -342,14 +341,14 @@ __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
342static void 341static void
343gss_unhash_msg(struct gss_upcall_msg *gss_msg) 342gss_unhash_msg(struct gss_upcall_msg *gss_msg)
344{ 343{
345 struct inode *inode = &gss_msg->inode->vfs_inode; 344 struct rpc_pipe *pipe = gss_msg->pipe;
346 345
347 if (list_empty(&gss_msg->list)) 346 if (list_empty(&gss_msg->list))
348 return; 347 return;
349 spin_lock(&inode->i_lock); 348 spin_lock(&pipe->lock);
350 if (!list_empty(&gss_msg->list)) 349 if (!list_empty(&gss_msg->list))
351 __gss_unhash_msg(gss_msg); 350 __gss_unhash_msg(gss_msg);
352 spin_unlock(&inode->i_lock); 351 spin_unlock(&pipe->lock);
353} 352}
354 353
355static void 354static void
@@ -376,11 +375,11 @@ gss_upcall_callback(struct rpc_task *task)
376 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, 375 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
377 struct gss_cred, gc_base); 376 struct gss_cred, gc_base);
378 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 377 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
379 struct inode *inode = &gss_msg->inode->vfs_inode; 378 struct rpc_pipe *pipe = gss_msg->pipe;
380 379
381 spin_lock(&inode->i_lock); 380 spin_lock(&pipe->lock);
382 gss_handle_downcall_result(gss_cred, gss_msg); 381 gss_handle_downcall_result(gss_cred, gss_msg);
383 spin_unlock(&inode->i_lock); 382 spin_unlock(&pipe->lock);
384 task->tk_status = gss_msg->msg.errno; 383 task->tk_status = gss_msg->msg.errno;
385 gss_release_msg(gss_msg); 384 gss_release_msg(gss_msg);
386} 385}
@@ -450,7 +449,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
450 kfree(gss_msg); 449 kfree(gss_msg);
451 return ERR_PTR(vers); 450 return ERR_PTR(vers);
452 } 451 }
453 gss_msg->inode = RPC_I(gss_auth->dentry[vers]->d_inode); 452 gss_msg->pipe = gss_auth->pipe[vers];
454 INIT_LIST_HEAD(&gss_msg->list); 453 INIT_LIST_HEAD(&gss_msg->list);
455 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 454 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
456 init_waitqueue_head(&gss_msg->waitqueue); 455 init_waitqueue_head(&gss_msg->waitqueue);
@@ -474,8 +473,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
474 return gss_new; 473 return gss_new;
475 gss_msg = gss_add_msg(gss_new); 474 gss_msg = gss_add_msg(gss_new);
476 if (gss_msg == gss_new) { 475 if (gss_msg == gss_new) {
477 struct inode *inode = &gss_new->inode->vfs_inode; 476 int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
478 int res = rpc_queue_upcall(inode, &gss_new->msg);
479 if (res) { 477 if (res) {
480 gss_unhash_msg(gss_new); 478 gss_unhash_msg(gss_new);
481 gss_msg = ERR_PTR(res); 479 gss_msg = ERR_PTR(res);
@@ -506,7 +504,7 @@ gss_refresh_upcall(struct rpc_task *task)
506 struct gss_cred *gss_cred = container_of(cred, 504 struct gss_cred *gss_cred = container_of(cred,
507 struct gss_cred, gc_base); 505 struct gss_cred, gc_base);
508 struct gss_upcall_msg *gss_msg; 506 struct gss_upcall_msg *gss_msg;
509 struct inode *inode; 507 struct rpc_pipe *pipe;
510 int err = 0; 508 int err = 0;
511 509
512 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, 510 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
@@ -524,8 +522,8 @@ gss_refresh_upcall(struct rpc_task *task)
524 err = PTR_ERR(gss_msg); 522 err = PTR_ERR(gss_msg);
525 goto out; 523 goto out;
526 } 524 }
527 inode = &gss_msg->inode->vfs_inode; 525 pipe = gss_msg->pipe;
528 spin_lock(&inode->i_lock); 526 spin_lock(&pipe->lock);
529 if (gss_cred->gc_upcall != NULL) 527 if (gss_cred->gc_upcall != NULL)
530 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 528 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
531 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 529 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
@@ -538,7 +536,7 @@ gss_refresh_upcall(struct rpc_task *task)
538 gss_handle_downcall_result(gss_cred, gss_msg); 536 gss_handle_downcall_result(gss_cred, gss_msg);
539 err = gss_msg->msg.errno; 537 err = gss_msg->msg.errno;
540 } 538 }
541 spin_unlock(&inode->i_lock); 539 spin_unlock(&pipe->lock);
542 gss_release_msg(gss_msg); 540 gss_release_msg(gss_msg);
543out: 541out:
544 dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", 542 dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n",
@@ -549,7 +547,7 @@ out:
549static inline int 547static inline int
550gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 548gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
551{ 549{
552 struct inode *inode; 550 struct rpc_pipe *pipe;
553 struct rpc_cred *cred = &gss_cred->gc_base; 551 struct rpc_cred *cred = &gss_cred->gc_base;
554 struct gss_upcall_msg *gss_msg; 552 struct gss_upcall_msg *gss_msg;
555 DEFINE_WAIT(wait); 553 DEFINE_WAIT(wait);
@@ -573,14 +571,14 @@ retry:
573 err = PTR_ERR(gss_msg); 571 err = PTR_ERR(gss_msg);
574 goto out; 572 goto out;
575 } 573 }
576 inode = &gss_msg->inode->vfs_inode; 574 pipe = gss_msg->pipe;
577 for (;;) { 575 for (;;) {
578 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); 576 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
579 spin_lock(&inode->i_lock); 577 spin_lock(&pipe->lock);
580 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 578 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
581 break; 579 break;
582 } 580 }
583 spin_unlock(&inode->i_lock); 581 spin_unlock(&pipe->lock);
584 if (fatal_signal_pending(current)) { 582 if (fatal_signal_pending(current)) {
585 err = -ERESTARTSYS; 583 err = -ERESTARTSYS;
586 goto out_intr; 584 goto out_intr;
@@ -591,7 +589,7 @@ retry:
591 gss_cred_set_ctx(cred, gss_msg->ctx); 589 gss_cred_set_ctx(cred, gss_msg->ctx);
592 else 590 else
593 err = gss_msg->msg.errno; 591 err = gss_msg->msg.errno;
594 spin_unlock(&inode->i_lock); 592 spin_unlock(&pipe->lock);
595out_intr: 593out_intr:
596 finish_wait(&gss_msg->waitqueue, &wait); 594 finish_wait(&gss_msg->waitqueue, &wait);
597 gss_release_msg(gss_msg); 595 gss_release_msg(gss_msg);
@@ -609,7 +607,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
609 const void *p, *end; 607 const void *p, *end;
610 void *buf; 608 void *buf;
611 struct gss_upcall_msg *gss_msg; 609 struct gss_upcall_msg *gss_msg;
612 struct inode *inode = filp->f_path.dentry->d_inode; 610 struct rpc_pipe *pipe = RPC_I(filp->f_dentry->d_inode)->pipe;
613 struct gss_cl_ctx *ctx; 611 struct gss_cl_ctx *ctx;
614 uid_t uid; 612 uid_t uid;
615 ssize_t err = -EFBIG; 613 ssize_t err = -EFBIG;
@@ -639,14 +637,14 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
639 637
640 err = -ENOENT; 638 err = -ENOENT;
641 /* Find a matching upcall */ 639 /* Find a matching upcall */
642 spin_lock(&inode->i_lock); 640 spin_lock(&pipe->lock);
643 gss_msg = __gss_find_upcall(RPC_I(inode), uid); 641 gss_msg = __gss_find_upcall(pipe, uid);
644 if (gss_msg == NULL) { 642 if (gss_msg == NULL) {
645 spin_unlock(&inode->i_lock); 643 spin_unlock(&pipe->lock);
646 goto err_put_ctx; 644 goto err_put_ctx;
647 } 645 }
648 list_del_init(&gss_msg->list); 646 list_del_init(&gss_msg->list);
649 spin_unlock(&inode->i_lock); 647 spin_unlock(&pipe->lock);
650 648
651 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 649 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
652 if (IS_ERR(p)) { 650 if (IS_ERR(p)) {
@@ -674,9 +672,9 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
674 err = mlen; 672 err = mlen;
675 673
676err_release_msg: 674err_release_msg:
677 spin_lock(&inode->i_lock); 675 spin_lock(&pipe->lock);
678 __gss_unhash_msg(gss_msg); 676 __gss_unhash_msg(gss_msg);
679 spin_unlock(&inode->i_lock); 677 spin_unlock(&pipe->lock);
680 gss_release_msg(gss_msg); 678 gss_release_msg(gss_msg);
681err_put_ctx: 679err_put_ctx:
682 gss_put_ctx(ctx); 680 gss_put_ctx(ctx);
@@ -722,23 +720,23 @@ static int gss_pipe_open_v1(struct inode *inode)
722static void 720static void
723gss_pipe_release(struct inode *inode) 721gss_pipe_release(struct inode *inode)
724{ 722{
725 struct rpc_inode *rpci = RPC_I(inode); 723 struct rpc_pipe *pipe = RPC_I(inode)->pipe;
726 struct gss_upcall_msg *gss_msg; 724 struct gss_upcall_msg *gss_msg;
727 725
728restart: 726restart:
729 spin_lock(&inode->i_lock); 727 spin_lock(&pipe->lock);
730 list_for_each_entry(gss_msg, &rpci->in_downcall, list) { 728 list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
731 729
732 if (!list_empty(&gss_msg->msg.list)) 730 if (!list_empty(&gss_msg->msg.list))
733 continue; 731 continue;
734 gss_msg->msg.errno = -EPIPE; 732 gss_msg->msg.errno = -EPIPE;
735 atomic_inc(&gss_msg->count); 733 atomic_inc(&gss_msg->count);
736 __gss_unhash_msg(gss_msg); 734 __gss_unhash_msg(gss_msg);
737 spin_unlock(&inode->i_lock); 735 spin_unlock(&pipe->lock);
738 gss_release_msg(gss_msg); 736 gss_release_msg(gss_msg);
739 goto restart; 737 goto restart;
740 } 738 }
741 spin_unlock(&inode->i_lock); 739 spin_unlock(&pipe->lock);
742 740
743 put_pipe_version(); 741 put_pipe_version();
744} 742}
@@ -759,6 +757,75 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
759 } 757 }
760} 758}
761 759
760static void gss_pipes_dentries_destroy(struct rpc_auth *auth)
761{
762 struct gss_auth *gss_auth;
763
764 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
765 if (gss_auth->pipe[0]->dentry)
766 rpc_unlink(gss_auth->pipe[0]->dentry);
767 if (gss_auth->pipe[1]->dentry)
768 rpc_unlink(gss_auth->pipe[1]->dentry);
769}
770
771static int gss_pipes_dentries_create(struct rpc_auth *auth)
772{
773 int err;
774 struct gss_auth *gss_auth;
775 struct rpc_clnt *clnt;
776
777 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
778 clnt = gss_auth->client;
779
780 gss_auth->pipe[1]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry,
781 "gssd",
782 clnt, gss_auth->pipe[1]);
783 if (IS_ERR(gss_auth->pipe[1]->dentry))
784 return PTR_ERR(gss_auth->pipe[1]->dentry);
785 gss_auth->pipe[0]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry,
786 gss_auth->mech->gm_name,
787 clnt, gss_auth->pipe[0]);
788 if (IS_ERR(gss_auth->pipe[0]->dentry)) {
789 err = PTR_ERR(gss_auth->pipe[0]->dentry);
790 goto err_unlink_pipe_1;
791 }
792 return 0;
793
794err_unlink_pipe_1:
795 rpc_unlink(gss_auth->pipe[1]->dentry);
796 return err;
797}
798
799static void gss_pipes_dentries_destroy_net(struct rpc_clnt *clnt,
800 struct rpc_auth *auth)
801{
802 struct net *net = rpc_net_ns(clnt);
803 struct super_block *sb;
804
805 sb = rpc_get_sb_net(net);
806 if (sb) {
807 if (clnt->cl_dentry)
808 gss_pipes_dentries_destroy(auth);
809 rpc_put_sb_net(net);
810 }
811}
812
813static int gss_pipes_dentries_create_net(struct rpc_clnt *clnt,
814 struct rpc_auth *auth)
815{
816 struct net *net = rpc_net_ns(clnt);
817 struct super_block *sb;
818 int err = 0;
819
820 sb = rpc_get_sb_net(net);
821 if (sb) {
822 if (clnt->cl_dentry)
823 err = gss_pipes_dentries_create(auth);
824 rpc_put_sb_net(net);
825 }
826 return err;
827}
828
762/* 829/*
763 * NOTE: we have the opportunity to use different 830 * NOTE: we have the opportunity to use different
764 * parameters based on the input flavor (which must be a pseudoflavor) 831 * parameters based on the input flavor (which must be a pseudoflavor)
@@ -801,32 +868,33 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
801 * that we supported only the old pipe. So we instead create 868 * that we supported only the old pipe. So we instead create
802 * the new pipe first. 869 * the new pipe first.
803 */ 870 */
804 gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_path.dentry, 871 gss_auth->pipe[1] = rpc_mkpipe_data(&gss_upcall_ops_v1,
805 "gssd", 872 RPC_PIPE_WAIT_FOR_OPEN);
806 clnt, &gss_upcall_ops_v1, 873 if (IS_ERR(gss_auth->pipe[1])) {
807 RPC_PIPE_WAIT_FOR_OPEN); 874 err = PTR_ERR(gss_auth->pipe[1]);
808 if (IS_ERR(gss_auth->dentry[1])) {
809 err = PTR_ERR(gss_auth->dentry[1]);
810 goto err_put_mech; 875 goto err_put_mech;
811 } 876 }
812 877
813 gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_path.dentry, 878 gss_auth->pipe[0] = rpc_mkpipe_data(&gss_upcall_ops_v0,
814 gss_auth->mech->gm_name, 879 RPC_PIPE_WAIT_FOR_OPEN);
815 clnt, &gss_upcall_ops_v0, 880 if (IS_ERR(gss_auth->pipe[0])) {
816 RPC_PIPE_WAIT_FOR_OPEN); 881 err = PTR_ERR(gss_auth->pipe[0]);
817 if (IS_ERR(gss_auth->dentry[0])) { 882 goto err_destroy_pipe_1;
818 err = PTR_ERR(gss_auth->dentry[0]);
819 goto err_unlink_pipe_1;
820 } 883 }
884 err = gss_pipes_dentries_create_net(clnt, auth);
885 if (err)
886 goto err_destroy_pipe_0;
821 err = rpcauth_init_credcache(auth); 887 err = rpcauth_init_credcache(auth);
822 if (err) 888 if (err)
823 goto err_unlink_pipe_0; 889 goto err_unlink_pipes;
824 890
825 return auth; 891 return auth;
826err_unlink_pipe_0: 892err_unlink_pipes:
827 rpc_unlink(gss_auth->dentry[0]); 893 gss_pipes_dentries_destroy_net(clnt, auth);
828err_unlink_pipe_1: 894err_destroy_pipe_0:
829 rpc_unlink(gss_auth->dentry[1]); 895 rpc_destroy_pipe_data(gss_auth->pipe[0]);
896err_destroy_pipe_1:
897 rpc_destroy_pipe_data(gss_auth->pipe[1]);
830err_put_mech: 898err_put_mech:
831 gss_mech_put(gss_auth->mech); 899 gss_mech_put(gss_auth->mech);
832err_free: 900err_free:
@@ -839,8 +907,9 @@ out_dec:
839static void 907static void
840gss_free(struct gss_auth *gss_auth) 908gss_free(struct gss_auth *gss_auth)
841{ 909{
842 rpc_unlink(gss_auth->dentry[1]); 910 gss_pipes_dentries_destroy_net(gss_auth->client, &gss_auth->rpc_auth);
843 rpc_unlink(gss_auth->dentry[0]); 911 rpc_destroy_pipe_data(gss_auth->pipe[0]);
912 rpc_destroy_pipe_data(gss_auth->pipe[1]);
844 gss_mech_put(gss_auth->mech); 913 gss_mech_put(gss_auth->mech);
845 914
846 kfree(gss_auth); 915 kfree(gss_auth);
@@ -1547,7 +1616,9 @@ static const struct rpc_authops authgss_ops = {
1547 .create = gss_create, 1616 .create = gss_create,
1548 .destroy = gss_destroy, 1617 .destroy = gss_destroy,
1549 .lookup_cred = gss_lookup_cred, 1618 .lookup_cred = gss_lookup_cred,
1550 .crcreate = gss_create_cred 1619 .crcreate = gss_create_cred,
1620 .pipes_create = gss_pipes_dentries_create,
1621 .pipes_destroy = gss_pipes_dentries_destroy,
1551}; 1622};
1552 1623
1553static const struct rpc_credops gss_credops = { 1624static const struct rpc_credops gss_credops = {
@@ -1591,6 +1662,21 @@ static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
1591 .release_pipe = gss_pipe_release, 1662 .release_pipe = gss_pipe_release,
1592}; 1663};
1593 1664
1665static __net_init int rpcsec_gss_init_net(struct net *net)
1666{
1667 return gss_svc_init_net(net);
1668}
1669
1670static __net_exit void rpcsec_gss_exit_net(struct net *net)
1671{
1672 gss_svc_shutdown_net(net);
1673}
1674
1675static struct pernet_operations rpcsec_gss_net_ops = {
1676 .init = rpcsec_gss_init_net,
1677 .exit = rpcsec_gss_exit_net,
1678};
1679
1594/* 1680/*
1595 * Initialize RPCSEC_GSS module 1681 * Initialize RPCSEC_GSS module
1596 */ 1682 */
@@ -1604,8 +1690,13 @@ static int __init init_rpcsec_gss(void)
1604 err = gss_svc_init(); 1690 err = gss_svc_init();
1605 if (err) 1691 if (err)
1606 goto out_unregister; 1692 goto out_unregister;
1693 err = register_pernet_subsys(&rpcsec_gss_net_ops);
1694 if (err)
1695 goto out_svc_exit;
1607 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); 1696 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
1608 return 0; 1697 return 0;
1698out_svc_exit:
1699 gss_svc_shutdown();
1609out_unregister: 1700out_unregister:
1610 rpcauth_unregister(&authgss_ops); 1701 rpcauth_unregister(&authgss_ops);
1611out: 1702out:
@@ -1614,6 +1705,7 @@ out:
1614 1705
1615static void __exit exit_rpcsec_gss(void) 1706static void __exit exit_rpcsec_gss(void)
1616{ 1707{
1708 unregister_pernet_subsys(&rpcsec_gss_net_ops);
1617 gss_svc_shutdown(); 1709 gss_svc_shutdown();
1618 rpcauth_unregister(&authgss_ops); 1710 rpcauth_unregister(&authgss_ops);
1619 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1711 rcu_barrier(); /* Wait for completion of call_rcu()'s */
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 9576f35ab701..0f43e894bc0a 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -600,11 +600,14 @@ gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
600 u32 ret; 600 u32 ret;
601 struct scatterlist sg[1]; 601 struct scatterlist sg[1];
602 struct blkcipher_desc desc = { .tfm = cipher, .info = iv }; 602 struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
603 u8 data[crypto_blkcipher_blocksize(cipher) * 2]; 603 u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
604 struct page **save_pages; 604 struct page **save_pages;
605 u32 len = buf->len - offset; 605 u32 len = buf->len - offset;
606 606
607 BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2); 607 if (len > ARRAY_SIZE(data)) {
608 WARN_ON(0);
609 return -ENOMEM;
610 }
608 611
609 /* 612 /*
610 * For encryption, we want to read from the cleartext 613 * For encryption, we want to read from the cleartext
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 8c67890de427..d3611f11a8df 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -344,7 +344,7 @@ out_err:
344 return PTR_ERR(p); 344 return PTR_ERR(p);
345} 345}
346 346
347struct crypto_blkcipher * 347static struct crypto_blkcipher *
348context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) 348context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
349{ 349{
350 struct crypto_blkcipher *cp; 350 struct crypto_blkcipher *cp;
@@ -624,7 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
624 ctx->seq_send = ctx->seq_send64; 624 ctx->seq_send = ctx->seq_send64;
625 if (ctx->seq_send64 != ctx->seq_send) { 625 if (ctx->seq_send64 != ctx->seq_send) {
626 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, 626 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
627 (long unsigned)ctx->seq_send64, ctx->seq_send); 627 (unsigned long)ctx->seq_send64, ctx->seq_send);
628 p = ERR_PTR(-EINVAL); 628 p = ERR_PTR(-EINVAL);
629 goto out_err; 629 goto out_err;
630 } 630 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index d7941eab7796..62ae3273186c 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -159,7 +159,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
159 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 159 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
160} 160}
161 161
162u32 162static u32
163gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, 163gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
164 struct xdr_netobj *token) 164 struct xdr_netobj *token)
165{ 165{
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 2763e3e48db4..107c4528654f 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -82,9 +82,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
82 >>PAGE_CACHE_SHIFT; 82 >>PAGE_CACHE_SHIFT;
83 unsigned int offset = (buf->page_base + len - 1) 83 unsigned int offset = (buf->page_base + len - 1)
84 & (PAGE_CACHE_SIZE - 1); 84 & (PAGE_CACHE_SIZE - 1);
85 ptr = kmap_atomic(buf->pages[last], KM_USER0); 85 ptr = kmap_atomic(buf->pages[last]);
86 pad = *(ptr + offset); 86 pad = *(ptr + offset);
87 kunmap_atomic(ptr, KM_USER0); 87 kunmap_atomic(ptr);
88 goto out; 88 goto out;
89 } else 89 } else
90 len -= buf->page_len; 90 len -= buf->page_len;
@@ -381,21 +381,53 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
381} 381}
382 382
383/* 383/*
384 * We cannot currently handle tokens with rotated data. We need a 384 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need
385 * generalized routine to rotate the data in place. It is anticipated 385 * to do more than that, we shift repeatedly. Kevin Coffman reports
386 * that we won't encounter rotated data in the general case. 386 * seeing 28 bytes as the value used by Microsoft clients and servers
387 * with AES, so this constant is chosen to allow handling 28 in one pass
388 * without using too much stack space.
389 *
390 * If that proves to a problem perhaps we could use a more clever
391 * algorithm.
387 */ 392 */
388static u32 393#define LOCAL_BUF_LEN 32u
389rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc) 394
395static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
390{ 396{
391 unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN); 397 char head[LOCAL_BUF_LEN];
398 char tmp[LOCAL_BUF_LEN];
399 unsigned int this_len, i;
400
401 BUG_ON(shift > LOCAL_BUF_LEN);
392 402
393 if (realrrc == 0) 403 read_bytes_from_xdr_buf(buf, 0, head, shift);
394 return 0; 404 for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
405 this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
406 read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
407 write_bytes_to_xdr_buf(buf, i, tmp, this_len);
408 }
409 write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
410}
395 411
396 dprintk("%s: cannot process token with rotated data: " 412static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
397 "rrc %u, realrrc %u\n", __func__, rrc, realrrc); 413{
398 return 1; 414 int shifted = 0;
415 int this_shift;
416
417 shift %= buf->len;
418 while (shifted < shift) {
419 this_shift = min(shift - shifted, LOCAL_BUF_LEN);
420 rotate_buf_a_little(buf, this_shift);
421 shifted += this_shift;
422 }
423}
424
425static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
426{
427 struct xdr_buf subbuf;
428
429 xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
430 _rotate_left(&subbuf, shift);
399} 431}
400 432
401static u32 433static u32
@@ -495,11 +527,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
495 527
496 seqnum = be64_to_cpup((__be64 *)(ptr + 8)); 528 seqnum = be64_to_cpup((__be64 *)(ptr + 8));
497 529
498 if (rrc != 0) { 530 if (rrc != 0)
499 err = rotate_left(kctx, offset, buf, rrc); 531 rotate_left(offset + 16, buf, rrc);
500 if (err)
501 return GSS_S_FAILURE;
502 }
503 532
504 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, 533 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
505 &headskip, &tailskip); 534 &headskip, &tailskip);
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index ca8cad8251c7..782bfe1b6465 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -242,12 +242,13 @@ EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
242int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) 242int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
243{ 243{
244 struct gss_api_mech *pos = NULL; 244 struct gss_api_mech *pos = NULL;
245 int i = 0; 245 int j, i = 0;
246 246
247 spin_lock(&registered_mechs_lock); 247 spin_lock(&registered_mechs_lock);
248 list_for_each_entry(pos, &registered_mechs, gm_list) { 248 list_for_each_entry(pos, &registered_mechs, gm_list) {
249 array_ptr[i] = pos->gm_pfs->pseudoflavor; 249 for (j=0; j < pos->gm_pf_num; j++) {
250 i++; 250 array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
251 }
251 } 252 }
252 spin_unlock(&registered_mechs_lock); 253 spin_unlock(&registered_mechs_lock);
253 return i; 254 return i;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 8d0f7d3c71c8..73e957386600 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -41,6 +41,7 @@
41#include <linux/types.h> 41#include <linux/types.h>
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/pagemap.h> 43#include <linux/pagemap.h>
44#include <linux/user_namespace.h>
44 45
45#include <linux/sunrpc/auth_gss.h> 46#include <linux/sunrpc/auth_gss.h>
46#include <linux/sunrpc/gss_err.h> 47#include <linux/sunrpc/gss_err.h>
@@ -48,6 +49,8 @@
48#include <linux/sunrpc/svcauth_gss.h> 49#include <linux/sunrpc/svcauth_gss.h>
49#include <linux/sunrpc/cache.h> 50#include <linux/sunrpc/cache.h>
50 51
52#include "../netns.h"
53
51#ifdef RPC_DEBUG 54#ifdef RPC_DEBUG
52# define RPCDBG_FACILITY RPCDBG_AUTH 55# define RPCDBG_FACILITY RPCDBG_AUTH
53#endif 56#endif
@@ -75,10 +78,8 @@ struct rsi {
75 int major_status, minor_status; 78 int major_status, minor_status;
76}; 79};
77 80
78static struct cache_head *rsi_table[RSI_HASHMAX]; 81static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
79static struct cache_detail rsi_cache; 82static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item);
80static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
81static struct rsi *rsi_lookup(struct rsi *item);
82 83
83static void rsi_free(struct rsi *rsii) 84static void rsi_free(struct rsi *rsii)
84{ 85{
@@ -216,7 +217,7 @@ static int rsi_parse(struct cache_detail *cd,
216 if (dup_to_netobj(&rsii.in_token, buf, len)) 217 if (dup_to_netobj(&rsii.in_token, buf, len))
217 goto out; 218 goto out;
218 219
219 rsip = rsi_lookup(&rsii); 220 rsip = rsi_lookup(cd, &rsii);
220 if (!rsip) 221 if (!rsip)
221 goto out; 222 goto out;
222 223
@@ -258,21 +259,20 @@ static int rsi_parse(struct cache_detail *cd,
258 if (dup_to_netobj(&rsii.out_token, buf, len)) 259 if (dup_to_netobj(&rsii.out_token, buf, len))
259 goto out; 260 goto out;
260 rsii.h.expiry_time = expiry; 261 rsii.h.expiry_time = expiry;
261 rsip = rsi_update(&rsii, rsip); 262 rsip = rsi_update(cd, &rsii, rsip);
262 status = 0; 263 status = 0;
263out: 264out:
264 rsi_free(&rsii); 265 rsi_free(&rsii);
265 if (rsip) 266 if (rsip)
266 cache_put(&rsip->h, &rsi_cache); 267 cache_put(&rsip->h, cd);
267 else 268 else
268 status = -ENOMEM; 269 status = -ENOMEM;
269 return status; 270 return status;
270} 271}
271 272
272static struct cache_detail rsi_cache = { 273static struct cache_detail rsi_cache_template = {
273 .owner = THIS_MODULE, 274 .owner = THIS_MODULE,
274 .hash_size = RSI_HASHMAX, 275 .hash_size = RSI_HASHMAX,
275 .hash_table = rsi_table,
276 .name = "auth.rpcsec.init", 276 .name = "auth.rpcsec.init",
277 .cache_put = rsi_put, 277 .cache_put = rsi_put,
278 .cache_upcall = rsi_upcall, 278 .cache_upcall = rsi_upcall,
@@ -283,24 +283,24 @@ static struct cache_detail rsi_cache = {
283 .alloc = rsi_alloc, 283 .alloc = rsi_alloc,
284}; 284};
285 285
286static struct rsi *rsi_lookup(struct rsi *item) 286static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item)
287{ 287{
288 struct cache_head *ch; 288 struct cache_head *ch;
289 int hash = rsi_hash(item); 289 int hash = rsi_hash(item);
290 290
291 ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash); 291 ch = sunrpc_cache_lookup(cd, &item->h, hash);
292 if (ch) 292 if (ch)
293 return container_of(ch, struct rsi, h); 293 return container_of(ch, struct rsi, h);
294 else 294 else
295 return NULL; 295 return NULL;
296} 296}
297 297
298static struct rsi *rsi_update(struct rsi *new, struct rsi *old) 298static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old)
299{ 299{
300 struct cache_head *ch; 300 struct cache_head *ch;
301 int hash = rsi_hash(new); 301 int hash = rsi_hash(new);
302 302
303 ch = sunrpc_cache_update(&rsi_cache, &new->h, 303 ch = sunrpc_cache_update(cd, &new->h,
304 &old->h, hash); 304 &old->h, hash);
305 if (ch) 305 if (ch)
306 return container_of(ch, struct rsi, h); 306 return container_of(ch, struct rsi, h);
@@ -336,22 +336,17 @@ struct rsc {
336 struct svc_cred cred; 336 struct svc_cred cred;
337 struct gss_svc_seq_data seqdata; 337 struct gss_svc_seq_data seqdata;
338 struct gss_ctx *mechctx; 338 struct gss_ctx *mechctx;
339 char *client_name;
340}; 339};
341 340
342static struct cache_head *rsc_table[RSC_HASHMAX]; 341static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
343static struct cache_detail rsc_cache; 342static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item);
344static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
345static struct rsc *rsc_lookup(struct rsc *item);
346 343
347static void rsc_free(struct rsc *rsci) 344static void rsc_free(struct rsc *rsci)
348{ 345{
349 kfree(rsci->handle.data); 346 kfree(rsci->handle.data);
350 if (rsci->mechctx) 347 if (rsci->mechctx)
351 gss_delete_sec_context(&rsci->mechctx); 348 gss_delete_sec_context(&rsci->mechctx);
352 if (rsci->cred.cr_group_info) 349 free_svc_cred(&rsci->cred);
353 put_group_info(rsci->cred.cr_group_info);
354 kfree(rsci->client_name);
355} 350}
356 351
357static void rsc_put(struct kref *ref) 352static void rsc_put(struct kref *ref)
@@ -389,7 +384,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
389 tmp->handle.data = NULL; 384 tmp->handle.data = NULL;
390 new->mechctx = NULL; 385 new->mechctx = NULL;
391 new->cred.cr_group_info = NULL; 386 new->cred.cr_group_info = NULL;
392 new->client_name = NULL; 387 new->cred.cr_principal = NULL;
393} 388}
394 389
395static void 390static void
@@ -404,8 +399,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
404 spin_lock_init(&new->seqdata.sd_lock); 399 spin_lock_init(&new->seqdata.sd_lock);
405 new->cred = tmp->cred; 400 new->cred = tmp->cred;
406 tmp->cred.cr_group_info = NULL; 401 tmp->cred.cr_group_info = NULL;
407 new->client_name = tmp->client_name; 402 new->cred.cr_principal = tmp->cred.cr_principal;
408 tmp->client_name = NULL; 403 tmp->cred.cr_principal = NULL;
409} 404}
410 405
411static struct cache_head * 406static struct cache_head *
@@ -444,7 +439,7 @@ static int rsc_parse(struct cache_detail *cd,
444 if (expiry == 0) 439 if (expiry == 0)
445 goto out; 440 goto out;
446 441
447 rscp = rsc_lookup(&rsci); 442 rscp = rsc_lookup(cd, &rsci);
448 if (!rscp) 443 if (!rscp)
449 goto out; 444 goto out;
450 445
@@ -473,9 +468,13 @@ static int rsc_parse(struct cache_detail *cd,
473 status = -EINVAL; 468 status = -EINVAL;
474 for (i=0; i<N; i++) { 469 for (i=0; i<N; i++) {
475 gid_t gid; 470 gid_t gid;
471 kgid_t kgid;
476 if (get_int(&mesg, &gid)) 472 if (get_int(&mesg, &gid))
477 goto out; 473 goto out;
478 GROUP_AT(rsci.cred.cr_group_info, i) = gid; 474 kgid = make_kgid(&init_user_ns, gid);
475 if (!gid_valid(kgid))
476 goto out;
477 GROUP_AT(rsci.cred.cr_group_info, i) = kgid;
479 } 478 }
480 479
481 /* mech name */ 480 /* mech name */
@@ -499,29 +498,28 @@ static int rsc_parse(struct cache_detail *cd,
499 /* get client name */ 498 /* get client name */
500 len = qword_get(&mesg, buf, mlen); 499 len = qword_get(&mesg, buf, mlen);
501 if (len > 0) { 500 if (len > 0) {
502 rsci.client_name = kstrdup(buf, GFP_KERNEL); 501 rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
503 if (!rsci.client_name) 502 if (!rsci.cred.cr_principal)
504 goto out; 503 goto out;
505 } 504 }
506 505
507 } 506 }
508 rsci.h.expiry_time = expiry; 507 rsci.h.expiry_time = expiry;
509 rscp = rsc_update(&rsci, rscp); 508 rscp = rsc_update(cd, &rsci, rscp);
510 status = 0; 509 status = 0;
511out: 510out:
512 gss_mech_put(gm); 511 gss_mech_put(gm);
513 rsc_free(&rsci); 512 rsc_free(&rsci);
514 if (rscp) 513 if (rscp)
515 cache_put(&rscp->h, &rsc_cache); 514 cache_put(&rscp->h, cd);
516 else 515 else
517 status = -ENOMEM; 516 status = -ENOMEM;
518 return status; 517 return status;
519} 518}
520 519
521static struct cache_detail rsc_cache = { 520static struct cache_detail rsc_cache_template = {
522 .owner = THIS_MODULE, 521 .owner = THIS_MODULE,
523 .hash_size = RSC_HASHMAX, 522 .hash_size = RSC_HASHMAX,
524 .hash_table = rsc_table,
525 .name = "auth.rpcsec.context", 523 .name = "auth.rpcsec.context",
526 .cache_put = rsc_put, 524 .cache_put = rsc_put,
527 .cache_parse = rsc_parse, 525 .cache_parse = rsc_parse,
@@ -531,24 +529,24 @@ static struct cache_detail rsc_cache = {
531 .alloc = rsc_alloc, 529 .alloc = rsc_alloc,
532}; 530};
533 531
534static struct rsc *rsc_lookup(struct rsc *item) 532static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item)
535{ 533{
536 struct cache_head *ch; 534 struct cache_head *ch;
537 int hash = rsc_hash(item); 535 int hash = rsc_hash(item);
538 536
539 ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash); 537 ch = sunrpc_cache_lookup(cd, &item->h, hash);
540 if (ch) 538 if (ch)
541 return container_of(ch, struct rsc, h); 539 return container_of(ch, struct rsc, h);
542 else 540 else
543 return NULL; 541 return NULL;
544} 542}
545 543
546static struct rsc *rsc_update(struct rsc *new, struct rsc *old) 544static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old)
547{ 545{
548 struct cache_head *ch; 546 struct cache_head *ch;
549 int hash = rsc_hash(new); 547 int hash = rsc_hash(new);
550 548
551 ch = sunrpc_cache_update(&rsc_cache, &new->h, 549 ch = sunrpc_cache_update(cd, &new->h,
552 &old->h, hash); 550 &old->h, hash);
553 if (ch) 551 if (ch)
554 return container_of(ch, struct rsc, h); 552 return container_of(ch, struct rsc, h);
@@ -558,7 +556,7 @@ static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
558 556
559 557
560static struct rsc * 558static struct rsc *
561gss_svc_searchbyctx(struct xdr_netobj *handle) 559gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
562{ 560{
563 struct rsc rsci; 561 struct rsc rsci;
564 struct rsc *found; 562 struct rsc *found;
@@ -566,11 +564,11 @@ gss_svc_searchbyctx(struct xdr_netobj *handle)
566 memset(&rsci, 0, sizeof(rsci)); 564 memset(&rsci, 0, sizeof(rsci));
567 if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) 565 if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
568 return NULL; 566 return NULL;
569 found = rsc_lookup(&rsci); 567 found = rsc_lookup(cd, &rsci);
570 rsc_free(&rsci); 568 rsc_free(&rsci);
571 if (!found) 569 if (!found)
572 return NULL; 570 return NULL;
573 if (cache_check(&rsc_cache, &found->h, NULL)) 571 if (cache_check(cd, &found->h, NULL))
574 return NULL; 572 return NULL;
575 return found; 573 return found;
576} 574}
@@ -931,16 +929,6 @@ struct gss_svc_data {
931 struct rsc *rsci; 929 struct rsc *rsci;
932}; 930};
933 931
934char *svc_gss_principal(struct svc_rqst *rqstp)
935{
936 struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data;
937
938 if (gd && gd->rsci)
939 return gd->rsci->client_name;
940 return NULL;
941}
942EXPORT_SYMBOL_GPL(svc_gss_principal);
943
944static int 932static int
945svcauth_gss_set_client(struct svc_rqst *rqstp) 933svcauth_gss_set_client(struct svc_rqst *rqstp)
946{ 934{
@@ -968,38 +956,31 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
968} 956}
969 957
970static inline int 958static inline int
971gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip) 959gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
960 struct xdr_netobj *out_handle, int *major_status)
972{ 961{
973 struct rsc *rsci; 962 struct rsc *rsci;
974 int rc; 963 int rc;
975 964
976 if (rsip->major_status != GSS_S_COMPLETE) 965 if (*major_status != GSS_S_COMPLETE)
977 return gss_write_null_verf(rqstp); 966 return gss_write_null_verf(rqstp);
978 rsci = gss_svc_searchbyctx(&rsip->out_handle); 967 rsci = gss_svc_searchbyctx(cd, out_handle);
979 if (rsci == NULL) { 968 if (rsci == NULL) {
980 rsip->major_status = GSS_S_NO_CONTEXT; 969 *major_status = GSS_S_NO_CONTEXT;
981 return gss_write_null_verf(rqstp); 970 return gss_write_null_verf(rqstp);
982 } 971 }
983 rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN); 972 rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
984 cache_put(&rsci->h, &rsc_cache); 973 cache_put(&rsci->h, cd);
985 return rc; 974 return rc;
986} 975}
987 976
988/* 977static inline int
989 * Having read the cred already and found we're in the context 978gss_read_verf(struct rpc_gss_wire_cred *gc,
990 * initiation case, read the verifier and initiate (or check the results 979 struct kvec *argv, __be32 *authp,
991 * of) upcalls to userspace for help with context initiation. If 980 struct xdr_netobj *in_handle,
992 * the upcall results are available, write the verifier and result. 981 struct xdr_netobj *in_token)
993 * Otherwise, drop the request pending an answer to the upcall.
994 */
995static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
996 struct rpc_gss_wire_cred *gc, __be32 *authp)
997{ 982{
998 struct kvec *argv = &rqstp->rq_arg.head[0];
999 struct kvec *resv = &rqstp->rq_res.head[0];
1000 struct xdr_netobj tmpobj; 983 struct xdr_netobj tmpobj;
1001 struct rsi *rsip, rsikey;
1002 int ret;
1003 984
1004 /* Read the verifier; should be NULL: */ 985 /* Read the verifier; should be NULL: */
1005 *authp = rpc_autherr_badverf; 986 *authp = rpc_autherr_badverf;
@@ -1009,53 +990,89 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
1009 return SVC_DENIED; 990 return SVC_DENIED;
1010 if (svc_getnl(argv) != 0) 991 if (svc_getnl(argv) != 0)
1011 return SVC_DENIED; 992 return SVC_DENIED;
1012
1013 /* Martial context handle and token for upcall: */ 993 /* Martial context handle and token for upcall: */
1014 *authp = rpc_autherr_badcred; 994 *authp = rpc_autherr_badcred;
1015 if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0) 995 if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
1016 return SVC_DENIED; 996 return SVC_DENIED;
1017 memset(&rsikey, 0, sizeof(rsikey)); 997 if (dup_netobj(in_handle, &gc->gc_ctx))
1018 if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
1019 return SVC_CLOSE; 998 return SVC_CLOSE;
1020 *authp = rpc_autherr_badverf; 999 *authp = rpc_autherr_badverf;
1021 if (svc_safe_getnetobj(argv, &tmpobj)) { 1000 if (svc_safe_getnetobj(argv, &tmpobj)) {
1022 kfree(rsikey.in_handle.data); 1001 kfree(in_handle->data);
1023 return SVC_DENIED; 1002 return SVC_DENIED;
1024 } 1003 }
1025 if (dup_netobj(&rsikey.in_token, &tmpobj)) { 1004 if (dup_netobj(in_token, &tmpobj)) {
1026 kfree(rsikey.in_handle.data); 1005 kfree(in_handle->data);
1027 return SVC_CLOSE; 1006 return SVC_CLOSE;
1028 } 1007 }
1029 1008
1009 return 0;
1010}
1011
1012static inline int
1013gss_write_resv(struct kvec *resv, size_t size_limit,
1014 struct xdr_netobj *out_handle, struct xdr_netobj *out_token,
1015 int major_status, int minor_status)
1016{
1017 if (resv->iov_len + 4 > size_limit)
1018 return -1;
1019 svc_putnl(resv, RPC_SUCCESS);
1020 if (svc_safe_putnetobj(resv, out_handle))
1021 return -1;
1022 if (resv->iov_len + 3 * 4 > size_limit)
1023 return -1;
1024 svc_putnl(resv, major_status);
1025 svc_putnl(resv, minor_status);
1026 svc_putnl(resv, GSS_SEQ_WIN);
1027 if (svc_safe_putnetobj(resv, out_token))
1028 return -1;
1029 return 0;
1030}
1031
1032/*
1033 * Having read the cred already and found we're in the context
1034 * initiation case, read the verifier and initiate (or check the results
1035 * of) upcalls to userspace for help with context initiation. If
1036 * the upcall results are available, write the verifier and result.
1037 * Otherwise, drop the request pending an answer to the upcall.
1038 */
1039static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
1040 struct rpc_gss_wire_cred *gc, __be32 *authp)
1041{
1042 struct kvec *argv = &rqstp->rq_arg.head[0];
1043 struct kvec *resv = &rqstp->rq_res.head[0];
1044 struct rsi *rsip, rsikey;
1045 int ret;
1046 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
1047
1048 memset(&rsikey, 0, sizeof(rsikey));
1049 ret = gss_read_verf(gc, argv, authp,
1050 &rsikey.in_handle, &rsikey.in_token);
1051 if (ret)
1052 return ret;
1053
1030 /* Perform upcall, or find upcall result: */ 1054 /* Perform upcall, or find upcall result: */
1031 rsip = rsi_lookup(&rsikey); 1055 rsip = rsi_lookup(sn->rsi_cache, &rsikey);
1032 rsi_free(&rsikey); 1056 rsi_free(&rsikey);
1033 if (!rsip) 1057 if (!rsip)
1034 return SVC_CLOSE; 1058 return SVC_CLOSE;
1035 if (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0) 1059 if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
1036 /* No upcall result: */ 1060 /* No upcall result: */
1037 return SVC_CLOSE; 1061 return SVC_CLOSE;
1038 1062
1039 ret = SVC_CLOSE; 1063 ret = SVC_CLOSE;
1040 /* Got an answer to the upcall; use it: */ 1064 /* Got an answer to the upcall; use it: */
1041 if (gss_write_init_verf(rqstp, rsip)) 1065 if (gss_write_init_verf(sn->rsc_cache, rqstp,
1042 goto out; 1066 &rsip->out_handle, &rsip->major_status))
1043 if (resv->iov_len + 4 > PAGE_SIZE)
1044 goto out;
1045 svc_putnl(resv, RPC_SUCCESS);
1046 if (svc_safe_putnetobj(resv, &rsip->out_handle))
1047 goto out; 1067 goto out;
1048 if (resv->iov_len + 3 * 4 > PAGE_SIZE) 1068 if (gss_write_resv(resv, PAGE_SIZE,
1049 goto out; 1069 &rsip->out_handle, &rsip->out_token,
1050 svc_putnl(resv, rsip->major_status); 1070 rsip->major_status, rsip->minor_status))
1051 svc_putnl(resv, rsip->minor_status);
1052 svc_putnl(resv, GSS_SEQ_WIN);
1053 if (svc_safe_putnetobj(resv, &rsip->out_token))
1054 goto out; 1071 goto out;
1055 1072
1056 ret = SVC_COMPLETE; 1073 ret = SVC_COMPLETE;
1057out: 1074out:
1058 cache_put(&rsip->h, &rsi_cache); 1075 cache_put(&rsip->h, sn->rsi_cache);
1059 return ret; 1076 return ret;
1060} 1077}
1061 1078
@@ -1079,6 +1096,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1079 __be32 *rpcstart; 1096 __be32 *rpcstart;
1080 __be32 *reject_stat = resv->iov_base + resv->iov_len; 1097 __be32 *reject_stat = resv->iov_base + resv->iov_len;
1081 int ret; 1098 int ret;
1099 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
1082 1100
1083 dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n", 1101 dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
1084 argv->iov_len); 1102 argv->iov_len);
@@ -1129,7 +1147,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1129 case RPC_GSS_PROC_DESTROY: 1147 case RPC_GSS_PROC_DESTROY:
1130 /* Look up the context, and check the verifier: */ 1148 /* Look up the context, and check the verifier: */
1131 *authp = rpcsec_gsserr_credproblem; 1149 *authp = rpcsec_gsserr_credproblem;
1132 rsci = gss_svc_searchbyctx(&gc->gc_ctx); 1150 rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx);
1133 if (!rsci) 1151 if (!rsci)
1134 goto auth_err; 1152 goto auth_err;
1135 switch (gss_verify_header(rqstp, rsci, rpcstart, gc, authp)) { 1153 switch (gss_verify_header(rqstp, rsci, rpcstart, gc, authp)) {
@@ -1189,7 +1207,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1189 } 1207 }
1190 svcdata->rsci = rsci; 1208 svcdata->rsci = rsci;
1191 cache_get(&rsci->h); 1209 cache_get(&rsci->h);
1192 rqstp->rq_flavor = gss_svc_to_pseudoflavor( 1210 rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
1193 rsci->mechctx->mech_type, gc->gc_svc); 1211 rsci->mechctx->mech_type, gc->gc_svc);
1194 ret = SVC_OK; 1212 ret = SVC_OK;
1195 goto out; 1213 goto out;
@@ -1209,7 +1227,7 @@ drop:
1209 ret = SVC_DROP; 1227 ret = SVC_DROP;
1210out: 1228out:
1211 if (rsci) 1229 if (rsci)
1212 cache_put(&rsci->h, &rsc_cache); 1230 cache_put(&rsci->h, sn->rsc_cache);
1213 return ret; 1231 return ret;
1214} 1232}
1215 1233
@@ -1362,6 +1380,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1362 struct rpc_gss_wire_cred *gc = &gsd->clcred; 1380 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1363 struct xdr_buf *resbuf = &rqstp->rq_res; 1381 struct xdr_buf *resbuf = &rqstp->rq_res;
1364 int stat = -EINVAL; 1382 int stat = -EINVAL;
1383 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
1365 1384
1366 if (gc->gc_proc != RPC_GSS_PROC_DATA) 1385 if (gc->gc_proc != RPC_GSS_PROC_DATA)
1367 goto out; 1386 goto out;
@@ -1404,7 +1423,7 @@ out_err:
1404 put_group_info(rqstp->rq_cred.cr_group_info); 1423 put_group_info(rqstp->rq_cred.cr_group_info);
1405 rqstp->rq_cred.cr_group_info = NULL; 1424 rqstp->rq_cred.cr_group_info = NULL;
1406 if (gsd->rsci) 1425 if (gsd->rsci)
1407 cache_put(&gsd->rsci->h, &rsc_cache); 1426 cache_put(&gsd->rsci->h, sn->rsc_cache);
1408 gsd->rsci = NULL; 1427 gsd->rsci = NULL;
1409 1428
1410 return stat; 1429 return stat;
@@ -1429,30 +1448,96 @@ static struct auth_ops svcauthops_gss = {
1429 .set_client = svcauth_gss_set_client, 1448 .set_client = svcauth_gss_set_client,
1430}; 1449};
1431 1450
1451static int rsi_cache_create_net(struct net *net)
1452{
1453 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1454 struct cache_detail *cd;
1455 int err;
1456
1457 cd = cache_create_net(&rsi_cache_template, net);
1458 if (IS_ERR(cd))
1459 return PTR_ERR(cd);
1460 err = cache_register_net(cd, net);
1461 if (err) {
1462 cache_destroy_net(cd, net);
1463 return err;
1464 }
1465 sn->rsi_cache = cd;
1466 return 0;
1467}
1468
1469static void rsi_cache_destroy_net(struct net *net)
1470{
1471 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1472 struct cache_detail *cd = sn->rsi_cache;
1473
1474 sn->rsi_cache = NULL;
1475 cache_purge(cd);
1476 cache_unregister_net(cd, net);
1477 cache_destroy_net(cd, net);
1478}
1479
1480static int rsc_cache_create_net(struct net *net)
1481{
1482 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1483 struct cache_detail *cd;
1484 int err;
1485
1486 cd = cache_create_net(&rsc_cache_template, net);
1487 if (IS_ERR(cd))
1488 return PTR_ERR(cd);
1489 err = cache_register_net(cd, net);
1490 if (err) {
1491 cache_destroy_net(cd, net);
1492 return err;
1493 }
1494 sn->rsc_cache = cd;
1495 return 0;
1496}
1497
1498static void rsc_cache_destroy_net(struct net *net)
1499{
1500 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1501 struct cache_detail *cd = sn->rsc_cache;
1502
1503 sn->rsc_cache = NULL;
1504 cache_purge(cd);
1505 cache_unregister_net(cd, net);
1506 cache_destroy_net(cd, net);
1507}
1508
1432int 1509int
1433gss_svc_init(void) 1510gss_svc_init_net(struct net *net)
1434{ 1511{
1435 int rv = svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss); 1512 int rv;
1513
1514 rv = rsc_cache_create_net(net);
1436 if (rv) 1515 if (rv)
1437 return rv; 1516 return rv;
1438 rv = cache_register(&rsc_cache); 1517 rv = rsi_cache_create_net(net);
1439 if (rv) 1518 if (rv)
1440 goto out1; 1519 goto out1;
1441 rv = cache_register(&rsi_cache);
1442 if (rv)
1443 goto out2;
1444 return 0; 1520 return 0;
1445out2:
1446 cache_unregister(&rsc_cache);
1447out1: 1521out1:
1448 svc_auth_unregister(RPC_AUTH_GSS); 1522 rsc_cache_destroy_net(net);
1449 return rv; 1523 return rv;
1450} 1524}
1451 1525
1452void 1526void
1527gss_svc_shutdown_net(struct net *net)
1528{
1529 rsi_cache_destroy_net(net);
1530 rsc_cache_destroy_net(net);
1531}
1532
1533int
1534gss_svc_init(void)
1535{
1536 return svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss);
1537}
1538
1539void
1453gss_svc_shutdown(void) 1540gss_svc_shutdown(void)
1454{ 1541{
1455 cache_unregister(&rsc_cache);
1456 cache_unregister(&rsi_cache);
1457 svc_auth_unregister(RPC_AUTH_GSS); 1542 svc_auth_unregister(RPC_AUTH_GSS);
1458} 1543}
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index e50502d8ceb7..52c5abdee211 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sunrpc/clnt.h> 13#include <linux/sunrpc/clnt.h>
14#include <linux/sunrpc/auth.h> 14#include <linux/sunrpc/auth.h>
15#include <linux/user_namespace.h>
15 16
16#define NFS_NGROUPS 16 17#define NFS_NGROUPS 16
17 18
@@ -78,8 +79,11 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
78 groups = NFS_NGROUPS; 79 groups = NFS_NGROUPS;
79 80
80 cred->uc_gid = acred->gid; 81 cred->uc_gid = acred->gid;
81 for (i = 0; i < groups; i++) 82 for (i = 0; i < groups; i++) {
82 cred->uc_gids[i] = GROUP_AT(acred->group_info, i); 83 gid_t gid;
84 gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i));
85 cred->uc_gids[i] = gid;
86 }
83 if (i < NFS_NGROUPS) 87 if (i < NFS_NGROUPS)
84 cred->uc_gids[i] = NOGROUP; 88 cred->uc_gids[i] = NOGROUP;
85 89
@@ -126,9 +130,12 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
126 groups = acred->group_info->ngroups; 130 groups = acred->group_info->ngroups;
127 if (groups > NFS_NGROUPS) 131 if (groups > NFS_NGROUPS)
128 groups = NFS_NGROUPS; 132 groups = NFS_NGROUPS;
129 for (i = 0; i < groups ; i++) 133 for (i = 0; i < groups ; i++) {
130 if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i)) 134 gid_t gid;
135 gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i));
136 if (cred->uc_gids[i] != gid)
131 return 0; 137 return 0;
138 }
132 if (groups < NFS_NGROUPS && 139 if (groups < NFS_NGROUPS &&
133 cred->uc_gids[groups] != NOGROUP) 140 cred->uc_gids[groups] != NOGROUP)
134 return 0; 141 return 0;
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 3ad435a14ada..31def68a0f6e 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -25,6 +25,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/sunrpc/xprt.h> 26#include <linux/sunrpc/xprt.h>
27#include <linux/export.h> 27#include <linux/export.h>
28#include <linux/sunrpc/bc_xprt.h>
28 29
29#ifdef RPC_DEBUG 30#ifdef RPC_DEBUG
30#define RPCDBG_FACILITY RPCDBG_TRANS 31#define RPCDBG_FACILITY RPCDBG_TRANS
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 465df9ae1046..47ad2666fdf6 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -344,7 +344,7 @@ static int current_index;
344static void do_cache_clean(struct work_struct *work); 344static void do_cache_clean(struct work_struct *work);
345static struct delayed_work cache_cleaner; 345static struct delayed_work cache_cleaner;
346 346
347static void sunrpc_init_cache_detail(struct cache_detail *cd) 347void sunrpc_init_cache_detail(struct cache_detail *cd)
348{ 348{
349 rwlock_init(&cd->hash_lock); 349 rwlock_init(&cd->hash_lock);
350 INIT_LIST_HEAD(&cd->queue); 350 INIT_LIST_HEAD(&cd->queue);
@@ -360,8 +360,9 @@ static void sunrpc_init_cache_detail(struct cache_detail *cd)
360 /* start the cleaning process */ 360 /* start the cleaning process */
361 schedule_delayed_work(&cache_cleaner, 0); 361 schedule_delayed_work(&cache_cleaner, 0);
362} 362}
363EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
363 364
364static void sunrpc_destroy_cache_detail(struct cache_detail *cd) 365void sunrpc_destroy_cache_detail(struct cache_detail *cd)
365{ 366{
366 cache_purge(cd); 367 cache_purge(cd);
367 spin_lock(&cache_list_lock); 368 spin_lock(&cache_list_lock);
@@ -384,6 +385,7 @@ static void sunrpc_destroy_cache_detail(struct cache_detail *cd)
384out: 385out:
385 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); 386 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
386} 387}
388EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
387 389
388/* clean cache tries to find something to clean 390/* clean cache tries to find something to clean
389 * and cleans it. 391 * and cleans it.
@@ -828,6 +830,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
828{ 830{
829 ssize_t ret; 831 ssize_t ret;
830 832
833 if (count == 0)
834 return -EINVAL;
831 if (copy_from_user(kaddr, buf, count)) 835 if (copy_from_user(kaddr, buf, count))
832 return -EFAULT; 836 return -EFAULT;
833 kaddr[count] = '\0'; 837 kaddr[count] = '\0';
@@ -1269,7 +1273,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1269 __acquires(cd->hash_lock) 1273 __acquires(cd->hash_lock)
1270{ 1274{
1271 loff_t n = *pos; 1275 loff_t n = *pos;
1272 unsigned hash, entry; 1276 unsigned int hash, entry;
1273 struct cache_head *ch; 1277 struct cache_head *ch;
1274 struct cache_detail *cd = ((struct handle*)m->private)->cd; 1278 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1275 1279
@@ -1643,12 +1647,6 @@ int cache_register_net(struct cache_detail *cd, struct net *net)
1643} 1647}
1644EXPORT_SYMBOL_GPL(cache_register_net); 1648EXPORT_SYMBOL_GPL(cache_register_net);
1645 1649
1646int cache_register(struct cache_detail *cd)
1647{
1648 return cache_register_net(cd, &init_net);
1649}
1650EXPORT_SYMBOL_GPL(cache_register);
1651
1652void cache_unregister_net(struct cache_detail *cd, struct net *net) 1650void cache_unregister_net(struct cache_detail *cd, struct net *net)
1653{ 1651{
1654 remove_cache_proc_entries(cd, net); 1652 remove_cache_proc_entries(cd, net);
@@ -1656,11 +1654,31 @@ void cache_unregister_net(struct cache_detail *cd, struct net *net)
1656} 1654}
1657EXPORT_SYMBOL_GPL(cache_unregister_net); 1655EXPORT_SYMBOL_GPL(cache_unregister_net);
1658 1656
1659void cache_unregister(struct cache_detail *cd) 1657struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1658{
1659 struct cache_detail *cd;
1660
1661 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1662 if (cd == NULL)
1663 return ERR_PTR(-ENOMEM);
1664
1665 cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
1666 GFP_KERNEL);
1667 if (cd->hash_table == NULL) {
1668 kfree(cd);
1669 return ERR_PTR(-ENOMEM);
1670 }
1671 cd->net = net;
1672 return cd;
1673}
1674EXPORT_SYMBOL_GPL(cache_create_net);
1675
1676void cache_destroy_net(struct cache_detail *cd, struct net *net)
1660{ 1677{
1661 cache_unregister_net(cd, &init_net); 1678 kfree(cd->hash_table);
1679 kfree(cd);
1662} 1680}
1663EXPORT_SYMBOL_GPL(cache_unregister); 1681EXPORT_SYMBOL_GPL(cache_destroy_net);
1664 1682
1665static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, 1683static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1666 size_t count, loff_t *ppos) 1684 size_t count, loff_t *ppos)
@@ -1787,17 +1805,14 @@ int sunrpc_cache_register_pipefs(struct dentry *parent,
1787 struct dentry *dir; 1805 struct dentry *dir;
1788 int ret = 0; 1806 int ret = 0;
1789 1807
1790 sunrpc_init_cache_detail(cd);
1791 q.name = name; 1808 q.name = name;
1792 q.len = strlen(name); 1809 q.len = strlen(name);
1793 q.hash = full_name_hash(q.name, q.len); 1810 q.hash = full_name_hash(q.name, q.len);
1794 dir = rpc_create_cache_dir(parent, &q, umode, cd); 1811 dir = rpc_create_cache_dir(parent, &q, umode, cd);
1795 if (!IS_ERR(dir)) 1812 if (!IS_ERR(dir))
1796 cd->u.pipefs.dir = dir; 1813 cd->u.pipefs.dir = dir;
1797 else { 1814 else
1798 sunrpc_destroy_cache_detail(cd);
1799 ret = PTR_ERR(dir); 1815 ret = PTR_ERR(dir);
1800 }
1801 return ret; 1816 return ret;
1802} 1817}
1803EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); 1818EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
@@ -1806,7 +1821,6 @@ void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1806{ 1821{
1807 rpc_remove_cache_dir(cd->u.pipefs.dir); 1822 rpc_remove_cache_dir(cd->u.pipefs.dir);
1808 cd->u.pipefs.dir = NULL; 1823 cd->u.pipefs.dir = NULL;
1809 sunrpc_destroy_cache_detail(cd);
1810} 1824}
1811EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); 1825EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1812 1826
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f0268ea7e711..f56f045778ae 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -17,7 +17,6 @@
17 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 17 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
18 */ 18 */
19 19
20#include <asm/system.h>
21 20
22#include <linux/module.h> 21#include <linux/module.h>
23#include <linux/types.h> 22#include <linux/types.h>
@@ -31,13 +30,16 @@
31#include <linux/in.h> 30#include <linux/in.h>
32#include <linux/in6.h> 31#include <linux/in6.h>
33#include <linux/un.h> 32#include <linux/un.h>
33#include <linux/rcupdate.h>
34 34
35#include <linux/sunrpc/clnt.h> 35#include <linux/sunrpc/clnt.h>
36#include <linux/sunrpc/rpc_pipe_fs.h> 36#include <linux/sunrpc/rpc_pipe_fs.h>
37#include <linux/sunrpc/metrics.h> 37#include <linux/sunrpc/metrics.h>
38#include <linux/sunrpc/bc_xprt.h> 38#include <linux/sunrpc/bc_xprt.h>
39#include <trace/events/sunrpc.h>
39 40
40#include "sunrpc.h" 41#include "sunrpc.h"
42#include "netns.h"
41 43
42#ifdef RPC_DEBUG 44#ifdef RPC_DEBUG
43# define RPCDBG_FACILITY RPCDBG_CALL 45# define RPCDBG_FACILITY RPCDBG_CALL
@@ -50,8 +52,6 @@
50/* 52/*
51 * All RPC clients are linked into this list 53 * All RPC clients are linked into this list
52 */ 54 */
53static LIST_HEAD(all_clients);
54static DEFINE_SPINLOCK(rpc_client_lock);
55 55
56static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 56static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
57 57
@@ -81,82 +81,219 @@ static int rpc_ping(struct rpc_clnt *clnt);
81 81
82static void rpc_register_client(struct rpc_clnt *clnt) 82static void rpc_register_client(struct rpc_clnt *clnt)
83{ 83{
84 spin_lock(&rpc_client_lock); 84 struct net *net = rpc_net_ns(clnt);
85 list_add(&clnt->cl_clients, &all_clients); 85 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
86 spin_unlock(&rpc_client_lock); 86
87 spin_lock(&sn->rpc_client_lock);
88 list_add(&clnt->cl_clients, &sn->all_clients);
89 spin_unlock(&sn->rpc_client_lock);
87} 90}
88 91
89static void rpc_unregister_client(struct rpc_clnt *clnt) 92static void rpc_unregister_client(struct rpc_clnt *clnt)
90{ 93{
91 spin_lock(&rpc_client_lock); 94 struct net *net = rpc_net_ns(clnt);
95 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
96
97 spin_lock(&sn->rpc_client_lock);
92 list_del(&clnt->cl_clients); 98 list_del(&clnt->cl_clients);
93 spin_unlock(&rpc_client_lock); 99 spin_unlock(&sn->rpc_client_lock);
94} 100}
95 101
96static int 102static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
97rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 103{
104 if (clnt->cl_dentry) {
105 if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy)
106 clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth);
107 rpc_remove_client_dir(clnt->cl_dentry);
108 }
109 clnt->cl_dentry = NULL;
110}
111
112static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
113{
114 struct net *net = rpc_net_ns(clnt);
115 struct super_block *pipefs_sb;
116
117 pipefs_sb = rpc_get_sb_net(net);
118 if (pipefs_sb) {
119 __rpc_clnt_remove_pipedir(clnt);
120 rpc_put_sb_net(net);
121 }
122}
123
124static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
125 struct rpc_clnt *clnt,
126 const char *dir_name)
98{ 127{
99 static uint32_t clntid; 128 static uint32_t clntid;
100 struct path path, dir;
101 char name[15]; 129 char name[15];
102 struct qstr q = { 130 struct qstr q = { .name = name };
103 .name = name, 131 struct dentry *dir, *dentry;
104 };
105 int error; 132 int error;
106 133
107 clnt->cl_path.mnt = ERR_PTR(-ENOENT); 134 dir = rpc_d_lookup_sb(sb, dir_name);
108 clnt->cl_path.dentry = ERR_PTR(-ENOENT); 135 if (dir == NULL)
109 if (dir_name == NULL) 136 return dir;
110 return 0;
111
112 path.mnt = rpc_get_mount();
113 if (IS_ERR(path.mnt))
114 return PTR_ERR(path.mnt);
115 error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &dir);
116 if (error)
117 goto err;
118
119 for (;;) { 137 for (;;) {
120 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 138 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
121 name[sizeof(name) - 1] = '\0'; 139 name[sizeof(name) - 1] = '\0';
122 q.hash = full_name_hash(q.name, q.len); 140 q.hash = full_name_hash(q.name, q.len);
123 path.dentry = rpc_create_client_dir(dir.dentry, &q, clnt); 141 dentry = rpc_create_client_dir(dir, &q, clnt);
124 if (!IS_ERR(path.dentry)) 142 if (!IS_ERR(dentry))
125 break; 143 break;
126 error = PTR_ERR(path.dentry); 144 error = PTR_ERR(dentry);
127 if (error != -EEXIST) { 145 if (error != -EEXIST) {
128 printk(KERN_INFO "RPC: Couldn't create pipefs entry" 146 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
129 " %s/%s, error %d\n", 147 " %s/%s, error %d\n",
130 dir_name, name, error); 148 dir_name, name, error);
131 goto err_path_put; 149 break;
132 } 150 }
133 } 151 }
134 path_put(&dir); 152 dput(dir);
135 clnt->cl_path = path; 153 return dentry;
154}
155
156static int
157rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
158{
159 struct net *net = rpc_net_ns(clnt);
160 struct super_block *pipefs_sb;
161 struct dentry *dentry;
162
163 clnt->cl_dentry = NULL;
164 if (dir_name == NULL)
165 return 0;
166 pipefs_sb = rpc_get_sb_net(net);
167 if (!pipefs_sb)
168 return 0;
169 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name);
170 rpc_put_sb_net(net);
171 if (IS_ERR(dentry))
172 return PTR_ERR(dentry);
173 clnt->cl_dentry = dentry;
136 return 0; 174 return 0;
137err_path_put: 175}
138 path_put(&dir); 176
139err: 177static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
140 rpc_put_mount(); 178{
179 if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
180 ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
181 return 1;
182 return 0;
183}
184
185static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
186 struct super_block *sb)
187{
188 struct dentry *dentry;
189 int err = 0;
190
191 switch (event) {
192 case RPC_PIPEFS_MOUNT:
193 dentry = rpc_setup_pipedir_sb(sb, clnt,
194 clnt->cl_program->pipe_dir_name);
195 BUG_ON(dentry == NULL);
196 if (IS_ERR(dentry))
197 return PTR_ERR(dentry);
198 clnt->cl_dentry = dentry;
199 if (clnt->cl_auth->au_ops->pipes_create) {
200 err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth);
201 if (err)
202 __rpc_clnt_remove_pipedir(clnt);
203 }
204 break;
205 case RPC_PIPEFS_UMOUNT:
206 __rpc_clnt_remove_pipedir(clnt);
207 break;
208 default:
209 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
210 return -ENOTSUPP;
211 }
212 return err;
213}
214
215static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
216 struct super_block *sb)
217{
218 int error = 0;
219
220 for (;; clnt = clnt->cl_parent) {
221 if (!rpc_clnt_skip_event(clnt, event))
222 error = __rpc_clnt_handle_event(clnt, event, sb);
223 if (error || clnt == clnt->cl_parent)
224 break;
225 }
141 return error; 226 return error;
142} 227}
143 228
229static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
230{
231 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
232 struct rpc_clnt *clnt;
233
234 spin_lock(&sn->rpc_client_lock);
235 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
236 if (clnt->cl_program->pipe_dir_name == NULL)
237 break;
238 if (rpc_clnt_skip_event(clnt, event))
239 continue;
240 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
241 continue;
242 spin_unlock(&sn->rpc_client_lock);
243 return clnt;
244 }
245 spin_unlock(&sn->rpc_client_lock);
246 return NULL;
247}
248
249static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
250 void *ptr)
251{
252 struct super_block *sb = ptr;
253 struct rpc_clnt *clnt;
254 int error = 0;
255
256 while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
257 error = __rpc_pipefs_event(clnt, event, sb);
258 rpc_release_client(clnt);
259 if (error)
260 break;
261 }
262 return error;
263}
264
265static struct notifier_block rpc_clients_block = {
266 .notifier_call = rpc_pipefs_event,
267 .priority = SUNRPC_PIPEFS_RPC_PRIO,
268};
269
270int rpc_clients_notifier_register(void)
271{
272 return rpc_pipefs_notifier_register(&rpc_clients_block);
273}
274
275void rpc_clients_notifier_unregister(void)
276{
277 return rpc_pipefs_notifier_unregister(&rpc_clients_block);
278}
279
280static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
281{
282 clnt->cl_nodelen = strlen(nodename);
283 if (clnt->cl_nodelen > UNX_MAXNODENAME)
284 clnt->cl_nodelen = UNX_MAXNODENAME;
285 memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
286}
287
144static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) 288static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
145{ 289{
146 struct rpc_program *program = args->program; 290 const struct rpc_program *program = args->program;
147 struct rpc_version *version; 291 const struct rpc_version *version;
148 struct rpc_clnt *clnt = NULL; 292 struct rpc_clnt *clnt = NULL;
149 struct rpc_auth *auth; 293 struct rpc_auth *auth;
150 int err; 294 int err;
151 size_t len;
152 295
153 /* sanity check the name before trying to print it */ 296 /* sanity check the name before trying to print it */
154 err = -EINVAL;
155 len = strlen(args->servername);
156 if (len > RPC_MAXNETNAMELEN)
157 goto out_no_rpciod;
158 len++;
159
160 dprintk("RPC: creating %s client for %s (xprt %p)\n", 297 dprintk("RPC: creating %s client for %s (xprt %p)\n",
161 program->name, args->servername, xprt); 298 program->name, args->servername, xprt);
162 299
@@ -179,17 +316,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
179 goto out_err; 316 goto out_err;
180 clnt->cl_parent = clnt; 317 clnt->cl_parent = clnt;
181 318
182 clnt->cl_server = clnt->cl_inline_name; 319 rcu_assign_pointer(clnt->cl_xprt, xprt);
183 if (len > sizeof(clnt->cl_inline_name)) {
184 char *buf = kmalloc(len, GFP_KERNEL);
185 if (buf != NULL)
186 clnt->cl_server = buf;
187 else
188 len = sizeof(clnt->cl_inline_name);
189 }
190 strlcpy(clnt->cl_server, args->servername, len);
191
192 clnt->cl_xprt = xprt;
193 clnt->cl_procinfo = version->procs; 320 clnt->cl_procinfo = version->procs;
194 clnt->cl_maxproc = version->nrprocs; 321 clnt->cl_maxproc = version->nrprocs;
195 clnt->cl_protname = program->name; 322 clnt->cl_protname = program->name;
@@ -204,7 +331,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
204 INIT_LIST_HEAD(&clnt->cl_tasks); 331 INIT_LIST_HEAD(&clnt->cl_tasks);
205 spin_lock_init(&clnt->cl_lock); 332 spin_lock_init(&clnt->cl_lock);
206 333
207 if (!xprt_bound(clnt->cl_xprt)) 334 if (!xprt_bound(xprt))
208 clnt->cl_autobind = 1; 335 clnt->cl_autobind = 1;
209 336
210 clnt->cl_timeout = xprt->timeout; 337 clnt->cl_timeout = xprt->timeout;
@@ -238,25 +365,17 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
238 } 365 }
239 366
240 /* save the nodename */ 367 /* save the nodename */
241 clnt->cl_nodelen = strlen(init_utsname()->nodename); 368 rpc_clnt_set_nodename(clnt, utsname()->nodename);
242 if (clnt->cl_nodelen > UNX_MAXNODENAME)
243 clnt->cl_nodelen = UNX_MAXNODENAME;
244 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
245 rpc_register_client(clnt); 369 rpc_register_client(clnt);
246 return clnt; 370 return clnt;
247 371
248out_no_auth: 372out_no_auth:
249 if (!IS_ERR(clnt->cl_path.dentry)) { 373 rpc_clnt_remove_pipedir(clnt);
250 rpc_remove_client_dir(clnt->cl_path.dentry);
251 rpc_put_mount();
252 }
253out_no_path: 374out_no_path:
254 kfree(clnt->cl_principal); 375 kfree(clnt->cl_principal);
255out_no_principal: 376out_no_principal:
256 rpc_free_iostats(clnt->cl_metrics); 377 rpc_free_iostats(clnt->cl_metrics);
257out_no_stats: 378out_no_stats:
258 if (clnt->cl_server != clnt->cl_inline_name)
259 kfree(clnt->cl_server);
260 kfree(clnt); 379 kfree(clnt);
261out_err: 380out_err:
262 xprt_put(xprt); 381 xprt_put(xprt);
@@ -286,6 +405,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
286 .srcaddr = args->saddress, 405 .srcaddr = args->saddress,
287 .dstaddr = args->address, 406 .dstaddr = args->address,
288 .addrlen = args->addrsize, 407 .addrlen = args->addrsize,
408 .servername = args->servername,
289 .bc_xprt = args->bc_xprt, 409 .bc_xprt = args->bc_xprt,
290 }; 410 };
291 char servername[48]; 411 char servername[48];
@@ -294,7 +414,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
294 * If the caller chooses not to specify a hostname, whip 414 * If the caller chooses not to specify a hostname, whip
295 * up a string representation of the passed-in address. 415 * up a string representation of the passed-in address.
296 */ 416 */
297 if (args->servername == NULL) { 417 if (xprtargs.servername == NULL) {
298 struct sockaddr_un *sun = 418 struct sockaddr_un *sun =
299 (struct sockaddr_un *)args->address; 419 (struct sockaddr_un *)args->address;
300 struct sockaddr_in *sin = 420 struct sockaddr_in *sin =
@@ -321,7 +441,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
321 * address family isn't recognized. */ 441 * address family isn't recognized. */
322 return ERR_PTR(-EINVAL); 442 return ERR_PTR(-EINVAL);
323 } 443 }
324 args->servername = servername; 444 xprtargs.servername = servername;
325 } 445 }
326 446
327 xprt = xprt_create_transport(&xprtargs); 447 xprt = xprt_create_transport(&xprtargs);
@@ -374,6 +494,7 @@ struct rpc_clnt *
374rpc_clone_client(struct rpc_clnt *clnt) 494rpc_clone_client(struct rpc_clnt *clnt)
375{ 495{
376 struct rpc_clnt *new; 496 struct rpc_clnt *new;
497 struct rpc_xprt *xprt;
377 int err = -ENOMEM; 498 int err = -ENOMEM;
378 499
379 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 500 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
@@ -393,18 +514,26 @@ rpc_clone_client(struct rpc_clnt *clnt)
393 if (new->cl_principal == NULL) 514 if (new->cl_principal == NULL)
394 goto out_no_principal; 515 goto out_no_principal;
395 } 516 }
517 rcu_read_lock();
518 xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
519 rcu_read_unlock();
520 if (xprt == NULL)
521 goto out_no_transport;
522 rcu_assign_pointer(new->cl_xprt, xprt);
396 atomic_set(&new->cl_count, 1); 523 atomic_set(&new->cl_count, 1);
397 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 524 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
398 if (err != 0) 525 if (err != 0)
399 goto out_no_path; 526 goto out_no_path;
527 rpc_clnt_set_nodename(new, utsname()->nodename);
400 if (new->cl_auth) 528 if (new->cl_auth)
401 atomic_inc(&new->cl_auth->au_count); 529 atomic_inc(&new->cl_auth->au_count);
402 xprt_get(clnt->cl_xprt);
403 atomic_inc(&clnt->cl_count); 530 atomic_inc(&clnt->cl_count);
404 rpc_register_client(new); 531 rpc_register_client(new);
405 rpciod_up(); 532 rpciod_up();
406 return new; 533 return new;
407out_no_path: 534out_no_path:
535 xprt_put(xprt);
536out_no_transport:
408 kfree(new->cl_principal); 537 kfree(new->cl_principal);
409out_no_principal: 538out_no_principal:
410 rpc_free_iostats(new->cl_metrics); 539 rpc_free_iostats(new->cl_metrics);
@@ -453,8 +582,9 @@ EXPORT_SYMBOL_GPL(rpc_killall_tasks);
453 */ 582 */
454void rpc_shutdown_client(struct rpc_clnt *clnt) 583void rpc_shutdown_client(struct rpc_clnt *clnt)
455{ 584{
456 dprintk("RPC: shutting down %s client for %s\n", 585 dprintk_rcu("RPC: shutting down %s client for %s\n",
457 clnt->cl_protname, clnt->cl_server); 586 clnt->cl_protname,
587 rcu_dereference(clnt->cl_xprt)->servername);
458 588
459 while (!list_empty(&clnt->cl_tasks)) { 589 while (!list_empty(&clnt->cl_tasks)) {
460 rpc_killall_tasks(clnt); 590 rpc_killall_tasks(clnt);
@@ -472,24 +602,17 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
472static void 602static void
473rpc_free_client(struct rpc_clnt *clnt) 603rpc_free_client(struct rpc_clnt *clnt)
474{ 604{
475 dprintk("RPC: destroying %s client for %s\n", 605 dprintk_rcu("RPC: destroying %s client for %s\n",
476 clnt->cl_protname, clnt->cl_server); 606 clnt->cl_protname,
477 if (!IS_ERR(clnt->cl_path.dentry)) { 607 rcu_dereference(clnt->cl_xprt)->servername);
478 rpc_remove_client_dir(clnt->cl_path.dentry); 608 if (clnt->cl_parent != clnt)
479 rpc_put_mount();
480 }
481 if (clnt->cl_parent != clnt) {
482 rpc_release_client(clnt->cl_parent); 609 rpc_release_client(clnt->cl_parent);
483 goto out_free;
484 }
485 if (clnt->cl_server != clnt->cl_inline_name)
486 kfree(clnt->cl_server);
487out_free:
488 rpc_unregister_client(clnt); 610 rpc_unregister_client(clnt);
611 rpc_clnt_remove_pipedir(clnt);
489 rpc_free_iostats(clnt->cl_metrics); 612 rpc_free_iostats(clnt->cl_metrics);
490 kfree(clnt->cl_principal); 613 kfree(clnt->cl_principal);
491 clnt->cl_metrics = NULL; 614 clnt->cl_metrics = NULL;
492 xprt_put(clnt->cl_xprt); 615 xprt_put(rcu_dereference_raw(clnt->cl_xprt));
493 rpciod_down(); 616 rpciod_down();
494 kfree(clnt); 617 kfree(clnt);
495} 618}
@@ -542,11 +665,11 @@ rpc_release_client(struct rpc_clnt *clnt)
542 * The Sun NFSv2/v3 ACL protocol can do this. 665 * The Sun NFSv2/v3 ACL protocol can do this.
543 */ 666 */
544struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 667struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
545 struct rpc_program *program, 668 const struct rpc_program *program,
546 u32 vers) 669 u32 vers)
547{ 670{
548 struct rpc_clnt *clnt; 671 struct rpc_clnt *clnt;
549 struct rpc_version *version; 672 const struct rpc_version *version;
550 int err; 673 int err;
551 674
552 BUG_ON(vers >= program->nrvers || !program->version[vers]); 675 BUG_ON(vers >= program->nrvers || !program->version[vers]);
@@ -778,13 +901,18 @@ EXPORT_SYMBOL_GPL(rpc_call_start);
778size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 901size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
779{ 902{
780 size_t bytes; 903 size_t bytes;
781 struct rpc_xprt *xprt = clnt->cl_xprt; 904 struct rpc_xprt *xprt;
905
906 rcu_read_lock();
907 xprt = rcu_dereference(clnt->cl_xprt);
782 908
783 bytes = sizeof(xprt->addr); 909 bytes = xprt->addrlen;
784 if (bytes > bufsize) 910 if (bytes > bufsize)
785 bytes = bufsize; 911 bytes = bufsize;
786 memcpy(buf, &clnt->cl_xprt->addr, bytes); 912 memcpy(buf, &xprt->addr, bytes);
787 return xprt->addrlen; 913 rcu_read_unlock();
914
915 return bytes;
788} 916}
789EXPORT_SYMBOL_GPL(rpc_peeraddr); 917EXPORT_SYMBOL_GPL(rpc_peeraddr);
790 918
@@ -793,11 +921,16 @@ EXPORT_SYMBOL_GPL(rpc_peeraddr);
793 * @clnt: RPC client structure 921 * @clnt: RPC client structure
794 * @format: address format 922 * @format: address format
795 * 923 *
924 * NB: the lifetime of the memory referenced by the returned pointer is
925 * the same as the rpc_xprt itself. As long as the caller uses this
926 * pointer, it must hold the RCU read lock.
796 */ 927 */
797const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 928const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
798 enum rpc_display_format_t format) 929 enum rpc_display_format_t format)
799{ 930{
800 struct rpc_xprt *xprt = clnt->cl_xprt; 931 struct rpc_xprt *xprt;
932
933 xprt = rcu_dereference(clnt->cl_xprt);
801 934
802 if (xprt->address_strings[format] != NULL) 935 if (xprt->address_strings[format] != NULL)
803 return xprt->address_strings[format]; 936 return xprt->address_strings[format];
@@ -806,17 +939,203 @@ const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
806} 939}
807EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 940EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
808 941
942static const struct sockaddr_in rpc_inaddr_loopback = {
943 .sin_family = AF_INET,
944 .sin_addr.s_addr = htonl(INADDR_ANY),
945};
946
947static const struct sockaddr_in6 rpc_in6addr_loopback = {
948 .sin6_family = AF_INET6,
949 .sin6_addr = IN6ADDR_ANY_INIT,
950};
951
952/*
953 * Try a getsockname() on a connected datagram socket. Using a
954 * connected datagram socket prevents leaving a socket in TIME_WAIT.
955 * This conserves the ephemeral port number space.
956 *
957 * Returns zero and fills in "buf" if successful; otherwise, a
958 * negative errno is returned.
959 */
960static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
961 struct sockaddr *buf, int buflen)
962{
963 struct socket *sock;
964 int err;
965
966 err = __sock_create(net, sap->sa_family,
967 SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
968 if (err < 0) {
969 dprintk("RPC: can't create UDP socket (%d)\n", err);
970 goto out;
971 }
972
973 switch (sap->sa_family) {
974 case AF_INET:
975 err = kernel_bind(sock,
976 (struct sockaddr *)&rpc_inaddr_loopback,
977 sizeof(rpc_inaddr_loopback));
978 break;
979 case AF_INET6:
980 err = kernel_bind(sock,
981 (struct sockaddr *)&rpc_in6addr_loopback,
982 sizeof(rpc_in6addr_loopback));
983 break;
984 default:
985 err = -EAFNOSUPPORT;
986 goto out;
987 }
988 if (err < 0) {
989 dprintk("RPC: can't bind UDP socket (%d)\n", err);
990 goto out_release;
991 }
992
993 err = kernel_connect(sock, sap, salen, 0);
994 if (err < 0) {
995 dprintk("RPC: can't connect UDP socket (%d)\n", err);
996 goto out_release;
997 }
998
999 err = kernel_getsockname(sock, buf, &buflen);
1000 if (err < 0) {
1001 dprintk("RPC: getsockname failed (%d)\n", err);
1002 goto out_release;
1003 }
1004
1005 err = 0;
1006 if (buf->sa_family == AF_INET6) {
1007 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1008 sin6->sin6_scope_id = 0;
1009 }
1010 dprintk("RPC: %s succeeded\n", __func__);
1011
1012out_release:
1013 sock_release(sock);
1014out:
1015 return err;
1016}
1017
1018/*
1019 * Scraping a connected socket failed, so we don't have a useable
1020 * local address. Fallback: generate an address that will prevent
1021 * the server from calling us back.
1022 *
1023 * Returns zero and fills in "buf" if successful; otherwise, a
1024 * negative errno is returned.
1025 */
1026static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1027{
1028 switch (family) {
1029 case AF_INET:
1030 if (buflen < sizeof(rpc_inaddr_loopback))
1031 return -EINVAL;
1032 memcpy(buf, &rpc_inaddr_loopback,
1033 sizeof(rpc_inaddr_loopback));
1034 break;
1035 case AF_INET6:
1036 if (buflen < sizeof(rpc_in6addr_loopback))
1037 return -EINVAL;
1038 memcpy(buf, &rpc_in6addr_loopback,
1039 sizeof(rpc_in6addr_loopback));
1040 default:
1041 dprintk("RPC: %s: address family not supported\n",
1042 __func__);
1043 return -EAFNOSUPPORT;
1044 }
1045 dprintk("RPC: %s: succeeded\n", __func__);
1046 return 0;
1047}
1048
1049/**
1050 * rpc_localaddr - discover local endpoint address for an RPC client
1051 * @clnt: RPC client structure
1052 * @buf: target buffer
1053 * @buflen: size of target buffer, in bytes
1054 *
1055 * Returns zero and fills in "buf" and "buflen" if successful;
1056 * otherwise, a negative errno is returned.
1057 *
1058 * This works even if the underlying transport is not currently connected,
1059 * or if the upper layer never previously provided a source address.
1060 *
1061 * The result of this function call is transient: multiple calls in
1062 * succession may give different results, depending on how local
1063 * networking configuration changes over time.
1064 */
1065int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1066{
1067 struct sockaddr_storage address;
1068 struct sockaddr *sap = (struct sockaddr *)&address;
1069 struct rpc_xprt *xprt;
1070 struct net *net;
1071 size_t salen;
1072 int err;
1073
1074 rcu_read_lock();
1075 xprt = rcu_dereference(clnt->cl_xprt);
1076 salen = xprt->addrlen;
1077 memcpy(sap, &xprt->addr, salen);
1078 net = get_net(xprt->xprt_net);
1079 rcu_read_unlock();
1080
1081 rpc_set_port(sap, 0);
1082 err = rpc_sockname(net, sap, salen, buf, buflen);
1083 put_net(net);
1084 if (err != 0)
1085 /* Couldn't discover local address, return ANYADDR */
1086 return rpc_anyaddr(sap->sa_family, buf, buflen);
1087 return 0;
1088}
1089EXPORT_SYMBOL_GPL(rpc_localaddr);
1090
809void 1091void
810rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 1092rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
811{ 1093{
812 struct rpc_xprt *xprt = clnt->cl_xprt; 1094 struct rpc_xprt *xprt;
1095
1096 rcu_read_lock();
1097 xprt = rcu_dereference(clnt->cl_xprt);
813 if (xprt->ops->set_buffer_size) 1098 if (xprt->ops->set_buffer_size)
814 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 1099 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1100 rcu_read_unlock();
815} 1101}
816EXPORT_SYMBOL_GPL(rpc_setbufsize); 1102EXPORT_SYMBOL_GPL(rpc_setbufsize);
817 1103
818/* 1104/**
819 * Return size of largest payload RPC client can support, in bytes 1105 * rpc_protocol - Get transport protocol number for an RPC client
1106 * @clnt: RPC client to query
1107 *
1108 */
1109int rpc_protocol(struct rpc_clnt *clnt)
1110{
1111 int protocol;
1112
1113 rcu_read_lock();
1114 protocol = rcu_dereference(clnt->cl_xprt)->prot;
1115 rcu_read_unlock();
1116 return protocol;
1117}
1118EXPORT_SYMBOL_GPL(rpc_protocol);
1119
1120/**
1121 * rpc_net_ns - Get the network namespace for this RPC client
1122 * @clnt: RPC client to query
1123 *
1124 */
1125struct net *rpc_net_ns(struct rpc_clnt *clnt)
1126{
1127 struct net *ret;
1128
1129 rcu_read_lock();
1130 ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1131 rcu_read_unlock();
1132 return ret;
1133}
1134EXPORT_SYMBOL_GPL(rpc_net_ns);
1135
1136/**
1137 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1138 * @clnt: RPC client to query
820 * 1139 *
821 * For stream transports, this is one RPC record fragment (see RFC 1140 * For stream transports, this is one RPC record fragment (see RFC
822 * 1831), as we don't support multi-record requests yet. For datagram 1141 * 1831), as we don't support multi-record requests yet. For datagram
@@ -825,7 +1144,12 @@ EXPORT_SYMBOL_GPL(rpc_setbufsize);
825 */ 1144 */
826size_t rpc_max_payload(struct rpc_clnt *clnt) 1145size_t rpc_max_payload(struct rpc_clnt *clnt)
827{ 1146{
828 return clnt->cl_xprt->max_payload; 1147 size_t ret;
1148
1149 rcu_read_lock();
1150 ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1151 rcu_read_unlock();
1152 return ret;
829} 1153}
830EXPORT_SYMBOL_GPL(rpc_max_payload); 1154EXPORT_SYMBOL_GPL(rpc_max_payload);
831 1155
@@ -836,8 +1160,11 @@ EXPORT_SYMBOL_GPL(rpc_max_payload);
836 */ 1160 */
837void rpc_force_rebind(struct rpc_clnt *clnt) 1161void rpc_force_rebind(struct rpc_clnt *clnt)
838{ 1162{
839 if (clnt->cl_autobind) 1163 if (clnt->cl_autobind) {
840 xprt_clear_bound(clnt->cl_xprt); 1164 rcu_read_lock();
1165 xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1166 rcu_read_unlock();
1167 }
841} 1168}
842EXPORT_SYMBOL_GPL(rpc_force_rebind); 1169EXPORT_SYMBOL_GPL(rpc_force_rebind);
843 1170
@@ -959,6 +1286,8 @@ call_reserveresult(struct rpc_task *task)
959 } 1286 }
960 1287
961 switch (status) { 1288 switch (status) {
1289 case -ENOMEM:
1290 rpc_delay(task, HZ >> 2);
962 case -EAGAIN: /* woken up; retry */ 1291 case -EAGAIN: /* woken up; retry */
963 task->tk_action = call_reserve; 1292 task->tk_action = call_reserve;
964 return; 1293 return;
@@ -1163,6 +1492,7 @@ call_bind_status(struct rpc_task *task)
1163 return; 1492 return;
1164 } 1493 }
1165 1494
1495 trace_rpc_bind_status(task);
1166 switch (task->tk_status) { 1496 switch (task->tk_status) {
1167 case -ENOMEM: 1497 case -ENOMEM:
1168 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1498 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
@@ -1262,6 +1592,7 @@ call_connect_status(struct rpc_task *task)
1262 return; 1592 return;
1263 } 1593 }
1264 1594
1595 trace_rpc_connect_status(task, status);
1265 switch (status) { 1596 switch (status) {
1266 /* if soft mounted, test if we've timed out */ 1597 /* if soft mounted, test if we've timed out */
1267 case -ETIMEDOUT: 1598 case -ETIMEDOUT:
@@ -1450,6 +1781,7 @@ call_status(struct rpc_task *task)
1450 return; 1781 return;
1451 } 1782 }
1452 1783
1784 trace_rpc_call_status(task);
1453 task->tk_status = 0; 1785 task->tk_status = 0;
1454 switch(status) { 1786 switch(status) {
1455 case -EHOSTDOWN: 1787 case -EHOSTDOWN:
@@ -1513,8 +1845,11 @@ call_timeout(struct rpc_task *task)
1513 } 1845 }
1514 if (RPC_IS_SOFT(task)) { 1846 if (RPC_IS_SOFT(task)) {
1515 if (clnt->cl_chatty) 1847 if (clnt->cl_chatty)
1848 rcu_read_lock();
1516 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1849 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1517 clnt->cl_protname, clnt->cl_server); 1850 clnt->cl_protname,
1851 rcu_dereference(clnt->cl_xprt)->servername);
1852 rcu_read_unlock();
1518 if (task->tk_flags & RPC_TASK_TIMEOUT) 1853 if (task->tk_flags & RPC_TASK_TIMEOUT)
1519 rpc_exit(task, -ETIMEDOUT); 1854 rpc_exit(task, -ETIMEDOUT);
1520 else 1855 else
@@ -1524,9 +1859,13 @@ call_timeout(struct rpc_task *task)
1524 1859
1525 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1860 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1526 task->tk_flags |= RPC_CALL_MAJORSEEN; 1861 task->tk_flags |= RPC_CALL_MAJORSEEN;
1527 if (clnt->cl_chatty) 1862 if (clnt->cl_chatty) {
1863 rcu_read_lock();
1528 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1864 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1529 clnt->cl_protname, clnt->cl_server); 1865 clnt->cl_protname,
1866 rcu_dereference(clnt->cl_xprt)->servername);
1867 rcu_read_unlock();
1868 }
1530 } 1869 }
1531 rpc_force_rebind(clnt); 1870 rpc_force_rebind(clnt);
1532 /* 1871 /*
@@ -1555,9 +1894,13 @@ call_decode(struct rpc_task *task)
1555 dprint_status(task); 1894 dprint_status(task);
1556 1895
1557 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1896 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1558 if (clnt->cl_chatty) 1897 if (clnt->cl_chatty) {
1898 rcu_read_lock();
1559 printk(KERN_NOTICE "%s: server %s OK\n", 1899 printk(KERN_NOTICE "%s: server %s OK\n",
1560 clnt->cl_protname, clnt->cl_server); 1900 clnt->cl_protname,
1901 rcu_dereference(clnt->cl_xprt)->servername);
1902 rcu_read_unlock();
1903 }
1561 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1904 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1562 } 1905 }
1563 1906
@@ -1635,6 +1978,7 @@ rpc_encode_header(struct rpc_task *task)
1635static __be32 * 1978static __be32 *
1636rpc_verify_header(struct rpc_task *task) 1979rpc_verify_header(struct rpc_task *task)
1637{ 1980{
1981 struct rpc_clnt *clnt = task->tk_client;
1638 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1982 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1639 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1983 int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1640 __be32 *p = iov->iov_base; 1984 __be32 *p = iov->iov_base;
@@ -1707,8 +2051,11 @@ rpc_verify_header(struct rpc_task *task)
1707 task->tk_action = call_bind; 2051 task->tk_action = call_bind;
1708 goto out_retry; 2052 goto out_retry;
1709 case RPC_AUTH_TOOWEAK: 2053 case RPC_AUTH_TOOWEAK:
2054 rcu_read_lock();
1710 printk(KERN_NOTICE "RPC: server %s requires stronger " 2055 printk(KERN_NOTICE "RPC: server %s requires stronger "
1711 "authentication.\n", task->tk_client->cl_server); 2056 "authentication.\n",
2057 rcu_dereference(clnt->cl_xprt)->servername);
2058 rcu_read_unlock();
1712 break; 2059 break;
1713 default: 2060 default:
1714 dprintk("RPC: %5u %s: unknown auth error: %x\n", 2061 dprintk("RPC: %5u %s: unknown auth error: %x\n",
@@ -1731,28 +2078,27 @@ rpc_verify_header(struct rpc_task *task)
1731 case RPC_SUCCESS: 2078 case RPC_SUCCESS:
1732 return p; 2079 return p;
1733 case RPC_PROG_UNAVAIL: 2080 case RPC_PROG_UNAVAIL:
1734 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", 2081 dprintk_rcu("RPC: %5u %s: program %u is unsupported "
1735 task->tk_pid, __func__, 2082 "by server %s\n", task->tk_pid, __func__,
1736 (unsigned int)task->tk_client->cl_prog, 2083 (unsigned int)clnt->cl_prog,
1737 task->tk_client->cl_server); 2084 rcu_dereference(clnt->cl_xprt)->servername);
1738 error = -EPFNOSUPPORT; 2085 error = -EPFNOSUPPORT;
1739 goto out_err; 2086 goto out_err;
1740 case RPC_PROG_MISMATCH: 2087 case RPC_PROG_MISMATCH:
1741 dprintk("RPC: %5u %s: program %u, version %u unsupported by " 2088 dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
1742 "server %s\n", task->tk_pid, __func__, 2089 "by server %s\n", task->tk_pid, __func__,
1743 (unsigned int)task->tk_client->cl_prog, 2090 (unsigned int)clnt->cl_prog,
1744 (unsigned int)task->tk_client->cl_vers, 2091 (unsigned int)clnt->cl_vers,
1745 task->tk_client->cl_server); 2092 rcu_dereference(clnt->cl_xprt)->servername);
1746 error = -EPROTONOSUPPORT; 2093 error = -EPROTONOSUPPORT;
1747 goto out_err; 2094 goto out_err;
1748 case RPC_PROC_UNAVAIL: 2095 case RPC_PROC_UNAVAIL:
1749 dprintk("RPC: %5u %s: proc %s unsupported by program %u, " 2096 dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
1750 "version %u on server %s\n", 2097 "version %u on server %s\n",
1751 task->tk_pid, __func__, 2098 task->tk_pid, __func__,
1752 rpc_proc_name(task), 2099 rpc_proc_name(task),
1753 task->tk_client->cl_prog, 2100 clnt->cl_prog, clnt->cl_vers,
1754 task->tk_client->cl_vers, 2101 rcu_dereference(clnt->cl_xprt)->servername);
1755 task->tk_client->cl_server);
1756 error = -EOPNOTSUPP; 2102 error = -EOPNOTSUPP;
1757 goto out_err; 2103 goto out_err;
1758 case RPC_GARBAGE_ARGS: 2104 case RPC_GARBAGE_ARGS:
@@ -1766,7 +2112,7 @@ rpc_verify_header(struct rpc_task *task)
1766 } 2112 }
1767 2113
1768out_garbage: 2114out_garbage:
1769 task->tk_client->cl_stats->rpcgarbage++; 2115 clnt->cl_stats->rpcgarbage++;
1770 if (task->tk_garb_retry) { 2116 if (task->tk_garb_retry) {
1771 task->tk_garb_retry--; 2117 task->tk_garb_retry--;
1772 dprintk("RPC: %5u %s: retrying\n", 2118 dprintk("RPC: %5u %s: retrying\n",
@@ -1852,14 +2198,15 @@ static void rpc_show_task(const struct rpc_clnt *clnt,
1852 task->tk_action, rpc_waitq); 2198 task->tk_action, rpc_waitq);
1853} 2199}
1854 2200
1855void rpc_show_tasks(void) 2201void rpc_show_tasks(struct net *net)
1856{ 2202{
1857 struct rpc_clnt *clnt; 2203 struct rpc_clnt *clnt;
1858 struct rpc_task *task; 2204 struct rpc_task *task;
1859 int header = 0; 2205 int header = 0;
2206 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1860 2207
1861 spin_lock(&rpc_client_lock); 2208 spin_lock(&sn->rpc_client_lock);
1862 list_for_each_entry(clnt, &all_clients, cl_clients) { 2209 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
1863 spin_lock(&clnt->cl_lock); 2210 spin_lock(&clnt->cl_lock);
1864 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 2211 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
1865 if (!header) { 2212 if (!header) {
@@ -1870,6 +2217,6 @@ void rpc_show_tasks(void)
1870 } 2217 }
1871 spin_unlock(&clnt->cl_lock); 2218 spin_unlock(&clnt->cl_lock);
1872 } 2219 }
1873 spin_unlock(&rpc_client_lock); 2220 spin_unlock(&sn->rpc_client_lock);
1874} 2221}
1875#endif 2222#endif
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index d013bf211cae..ce7bd449173d 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -9,6 +9,20 @@ struct cache_detail;
9struct sunrpc_net { 9struct sunrpc_net {
10 struct proc_dir_entry *proc_net_rpc; 10 struct proc_dir_entry *proc_net_rpc;
11 struct cache_detail *ip_map_cache; 11 struct cache_detail *ip_map_cache;
12 struct cache_detail *unix_gid_cache;
13 struct cache_detail *rsc_cache;
14 struct cache_detail *rsi_cache;
15
16 struct super_block *pipefs_sb;
17 struct mutex pipefs_sb_lock;
18
19 struct list_head all_clients;
20 spinlock_t rpc_client_lock;
21
22 struct rpc_clnt *rpcb_local_clnt;
23 struct rpc_clnt *rpcb_local_clnt4;
24 spinlock_t rpcb_clnt_lock;
25 unsigned int rpcb_users;
12}; 26};
13 27
14extern int sunrpc_net_id; 28extern int sunrpc_net_id;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 63a7a7add21e..04040476082e 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -16,9 +16,9 @@
16#include <linux/namei.h> 16#include <linux/namei.h>
17#include <linux/fsnotify.h> 17#include <linux/fsnotify.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/rcupdate.h>
19 20
20#include <asm/ioctls.h> 21#include <asm/ioctls.h>
21#include <linux/fs.h>
22#include <linux/poll.h> 22#include <linux/poll.h>
23#include <linux/wait.h> 23#include <linux/wait.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
@@ -27,9 +27,15 @@
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/sunrpc/rpc_pipe_fs.h> 28#include <linux/sunrpc/rpc_pipe_fs.h>
29#include <linux/sunrpc/cache.h> 29#include <linux/sunrpc/cache.h>
30#include <linux/nsproxy.h>
31#include <linux/notifier.h>
30 32
31static struct vfsmount *rpc_mnt __read_mostly; 33#include "netns.h"
32static int rpc_mount_count; 34#include "sunrpc.h"
35
36#define RPCDBG_FACILITY RPCDBG_DEBUG
37
38#define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "")
33 39
34static struct file_system_type rpc_pipe_fs_type; 40static struct file_system_type rpc_pipe_fs_type;
35 41
@@ -38,7 +44,21 @@ static struct kmem_cache *rpc_inode_cachep __read_mostly;
38 44
39#define RPC_UPCALL_TIMEOUT (30*HZ) 45#define RPC_UPCALL_TIMEOUT (30*HZ)
40 46
41static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, 47static BLOCKING_NOTIFIER_HEAD(rpc_pipefs_notifier_list);
48
49int rpc_pipefs_notifier_register(struct notifier_block *nb)
50{
51 return blocking_notifier_chain_cond_register(&rpc_pipefs_notifier_list, nb);
52}
53EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_register);
54
55void rpc_pipefs_notifier_unregister(struct notifier_block *nb)
56{
57 blocking_notifier_chain_unregister(&rpc_pipefs_notifier_list, nb);
58}
59EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister);
60
61static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head,
42 void (*destroy_msg)(struct rpc_pipe_msg *), int err) 62 void (*destroy_msg)(struct rpc_pipe_msg *), int err)
43{ 63{
44 struct rpc_pipe_msg *msg; 64 struct rpc_pipe_msg *msg;
@@ -51,30 +71,31 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
51 msg->errno = err; 71 msg->errno = err;
52 destroy_msg(msg); 72 destroy_msg(msg);
53 } while (!list_empty(head)); 73 } while (!list_empty(head));
54 wake_up(&rpci->waitq); 74 wake_up(waitq);
55} 75}
56 76
57static void 77static void
58rpc_timeout_upcall_queue(struct work_struct *work) 78rpc_timeout_upcall_queue(struct work_struct *work)
59{ 79{
60 LIST_HEAD(free_list); 80 LIST_HEAD(free_list);
61 struct rpc_inode *rpci = 81 struct rpc_pipe *pipe =
62 container_of(work, struct rpc_inode, queue_timeout.work); 82 container_of(work, struct rpc_pipe, queue_timeout.work);
63 struct inode *inode = &rpci->vfs_inode;
64 void (*destroy_msg)(struct rpc_pipe_msg *); 83 void (*destroy_msg)(struct rpc_pipe_msg *);
84 struct dentry *dentry;
65 85
66 spin_lock(&inode->i_lock); 86 spin_lock(&pipe->lock);
67 if (rpci->ops == NULL) { 87 destroy_msg = pipe->ops->destroy_msg;
68 spin_unlock(&inode->i_lock); 88 if (pipe->nreaders == 0) {
69 return; 89 list_splice_init(&pipe->pipe, &free_list);
90 pipe->pipelen = 0;
70 } 91 }
71 destroy_msg = rpci->ops->destroy_msg; 92 dentry = dget(pipe->dentry);
72 if (rpci->nreaders == 0) { 93 spin_unlock(&pipe->lock);
73 list_splice_init(&rpci->pipe, &free_list); 94 if (dentry) {
74 rpci->pipelen = 0; 95 rpc_purge_list(&RPC_I(dentry->d_inode)->waitq,
96 &free_list, destroy_msg, -ETIMEDOUT);
97 dput(dentry);
75 } 98 }
76 spin_unlock(&inode->i_lock);
77 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
78} 99}
79 100
80ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, 101ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
@@ -99,7 +120,7 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
99 120
100/** 121/**
101 * rpc_queue_upcall - queue an upcall message to userspace 122 * rpc_queue_upcall - queue an upcall message to userspace
102 * @inode: inode of upcall pipe on which to queue given message 123 * @pipe: upcall pipe on which to queue given message
103 * @msg: message to queue 124 * @msg: message to queue
104 * 125 *
105 * Call with an @inode created by rpc_mkpipe() to queue an upcall. 126 * Call with an @inode created by rpc_mkpipe() to queue an upcall.
@@ -108,30 +129,31 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
108 * initialize the fields of @msg (other than @msg->list) appropriately. 129 * initialize the fields of @msg (other than @msg->list) appropriately.
109 */ 130 */
110int 131int
111rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) 132rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg)
112{ 133{
113 struct rpc_inode *rpci = RPC_I(inode);
114 int res = -EPIPE; 134 int res = -EPIPE;
135 struct dentry *dentry;
115 136
116 spin_lock(&inode->i_lock); 137 spin_lock(&pipe->lock);
117 if (rpci->ops == NULL) 138 if (pipe->nreaders) {
118 goto out; 139 list_add_tail(&msg->list, &pipe->pipe);
119 if (rpci->nreaders) { 140 pipe->pipelen += msg->len;
120 list_add_tail(&msg->list, &rpci->pipe);
121 rpci->pipelen += msg->len;
122 res = 0; 141 res = 0;
123 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { 142 } else if (pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) {
124 if (list_empty(&rpci->pipe)) 143 if (list_empty(&pipe->pipe))
125 queue_delayed_work(rpciod_workqueue, 144 queue_delayed_work(rpciod_workqueue,
126 &rpci->queue_timeout, 145 &pipe->queue_timeout,
127 RPC_UPCALL_TIMEOUT); 146 RPC_UPCALL_TIMEOUT);
128 list_add_tail(&msg->list, &rpci->pipe); 147 list_add_tail(&msg->list, &pipe->pipe);
129 rpci->pipelen += msg->len; 148 pipe->pipelen += msg->len;
130 res = 0; 149 res = 0;
131 } 150 }
132out: 151 dentry = dget(pipe->dentry);
133 spin_unlock(&inode->i_lock); 152 spin_unlock(&pipe->lock);
134 wake_up(&rpci->waitq); 153 if (dentry) {
154 wake_up(&RPC_I(dentry->d_inode)->waitq);
155 dput(dentry);
156 }
135 return res; 157 return res;
136} 158}
137EXPORT_SYMBOL_GPL(rpc_queue_upcall); 159EXPORT_SYMBOL_GPL(rpc_queue_upcall);
@@ -145,29 +167,26 @@ rpc_inode_setowner(struct inode *inode, void *private)
145static void 167static void
146rpc_close_pipes(struct inode *inode) 168rpc_close_pipes(struct inode *inode)
147{ 169{
148 struct rpc_inode *rpci = RPC_I(inode); 170 struct rpc_pipe *pipe = RPC_I(inode)->pipe;
149 const struct rpc_pipe_ops *ops;
150 int need_release; 171 int need_release;
172 LIST_HEAD(free_list);
151 173
152 mutex_lock(&inode->i_mutex); 174 mutex_lock(&inode->i_mutex);
153 ops = rpci->ops; 175 spin_lock(&pipe->lock);
154 if (ops != NULL) { 176 need_release = pipe->nreaders != 0 || pipe->nwriters != 0;
155 LIST_HEAD(free_list); 177 pipe->nreaders = 0;
156 spin_lock(&inode->i_lock); 178 list_splice_init(&pipe->in_upcall, &free_list);
157 need_release = rpci->nreaders != 0 || rpci->nwriters != 0; 179 list_splice_init(&pipe->pipe, &free_list);
158 rpci->nreaders = 0; 180 pipe->pipelen = 0;
159 list_splice_init(&rpci->in_upcall, &free_list); 181 pipe->dentry = NULL;
160 list_splice_init(&rpci->pipe, &free_list); 182 spin_unlock(&pipe->lock);
161 rpci->pipelen = 0; 183 rpc_purge_list(&RPC_I(inode)->waitq, &free_list, pipe->ops->destroy_msg, -EPIPE);
162 rpci->ops = NULL; 184 pipe->nwriters = 0;
163 spin_unlock(&inode->i_lock); 185 if (need_release && pipe->ops->release_pipe)
164 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); 186 pipe->ops->release_pipe(inode);
165 rpci->nwriters = 0; 187 cancel_delayed_work_sync(&pipe->queue_timeout);
166 if (need_release && ops->release_pipe)
167 ops->release_pipe(inode);
168 cancel_delayed_work_sync(&rpci->queue_timeout);
169 }
170 rpc_inode_setowner(inode, NULL); 188 rpc_inode_setowner(inode, NULL);
189 RPC_I(inode)->pipe = NULL;
171 mutex_unlock(&inode->i_mutex); 190 mutex_unlock(&inode->i_mutex);
172} 191}
173 192
@@ -197,23 +216,24 @@ rpc_destroy_inode(struct inode *inode)
197static int 216static int
198rpc_pipe_open(struct inode *inode, struct file *filp) 217rpc_pipe_open(struct inode *inode, struct file *filp)
199{ 218{
200 struct rpc_inode *rpci = RPC_I(inode); 219 struct rpc_pipe *pipe;
201 int first_open; 220 int first_open;
202 int res = -ENXIO; 221 int res = -ENXIO;
203 222
204 mutex_lock(&inode->i_mutex); 223 mutex_lock(&inode->i_mutex);
205 if (rpci->ops == NULL) 224 pipe = RPC_I(inode)->pipe;
225 if (pipe == NULL)
206 goto out; 226 goto out;
207 first_open = rpci->nreaders == 0 && rpci->nwriters == 0; 227 first_open = pipe->nreaders == 0 && pipe->nwriters == 0;
208 if (first_open && rpci->ops->open_pipe) { 228 if (first_open && pipe->ops->open_pipe) {
209 res = rpci->ops->open_pipe(inode); 229 res = pipe->ops->open_pipe(inode);
210 if (res) 230 if (res)
211 goto out; 231 goto out;
212 } 232 }
213 if (filp->f_mode & FMODE_READ) 233 if (filp->f_mode & FMODE_READ)
214 rpci->nreaders++; 234 pipe->nreaders++;
215 if (filp->f_mode & FMODE_WRITE) 235 if (filp->f_mode & FMODE_WRITE)
216 rpci->nwriters++; 236 pipe->nwriters++;
217 res = 0; 237 res = 0;
218out: 238out:
219 mutex_unlock(&inode->i_mutex); 239 mutex_unlock(&inode->i_mutex);
@@ -223,38 +243,39 @@ out:
223static int 243static int
224rpc_pipe_release(struct inode *inode, struct file *filp) 244rpc_pipe_release(struct inode *inode, struct file *filp)
225{ 245{
226 struct rpc_inode *rpci = RPC_I(inode); 246 struct rpc_pipe *pipe;
227 struct rpc_pipe_msg *msg; 247 struct rpc_pipe_msg *msg;
228 int last_close; 248 int last_close;
229 249
230 mutex_lock(&inode->i_mutex); 250 mutex_lock(&inode->i_mutex);
231 if (rpci->ops == NULL) 251 pipe = RPC_I(inode)->pipe;
252 if (pipe == NULL)
232 goto out; 253 goto out;
233 msg = filp->private_data; 254 msg = filp->private_data;
234 if (msg != NULL) { 255 if (msg != NULL) {
235 spin_lock(&inode->i_lock); 256 spin_lock(&pipe->lock);
236 msg->errno = -EAGAIN; 257 msg->errno = -EAGAIN;
237 list_del_init(&msg->list); 258 list_del_init(&msg->list);
238 spin_unlock(&inode->i_lock); 259 spin_unlock(&pipe->lock);
239 rpci->ops->destroy_msg(msg); 260 pipe->ops->destroy_msg(msg);
240 } 261 }
241 if (filp->f_mode & FMODE_WRITE) 262 if (filp->f_mode & FMODE_WRITE)
242 rpci->nwriters --; 263 pipe->nwriters --;
243 if (filp->f_mode & FMODE_READ) { 264 if (filp->f_mode & FMODE_READ) {
244 rpci->nreaders --; 265 pipe->nreaders --;
245 if (rpci->nreaders == 0) { 266 if (pipe->nreaders == 0) {
246 LIST_HEAD(free_list); 267 LIST_HEAD(free_list);
247 spin_lock(&inode->i_lock); 268 spin_lock(&pipe->lock);
248 list_splice_init(&rpci->pipe, &free_list); 269 list_splice_init(&pipe->pipe, &free_list);
249 rpci->pipelen = 0; 270 pipe->pipelen = 0;
250 spin_unlock(&inode->i_lock); 271 spin_unlock(&pipe->lock);
251 rpc_purge_list(rpci, &free_list, 272 rpc_purge_list(&RPC_I(inode)->waitq, &free_list,
252 rpci->ops->destroy_msg, -EAGAIN); 273 pipe->ops->destroy_msg, -EAGAIN);
253 } 274 }
254 } 275 }
255 last_close = rpci->nwriters == 0 && rpci->nreaders == 0; 276 last_close = pipe->nwriters == 0 && pipe->nreaders == 0;
256 if (last_close && rpci->ops->release_pipe) 277 if (last_close && pipe->ops->release_pipe)
257 rpci->ops->release_pipe(inode); 278 pipe->ops->release_pipe(inode);
258out: 279out:
259 mutex_unlock(&inode->i_mutex); 280 mutex_unlock(&inode->i_mutex);
260 return 0; 281 return 0;
@@ -264,39 +285,40 @@ static ssize_t
264rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) 285rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
265{ 286{
266 struct inode *inode = filp->f_path.dentry->d_inode; 287 struct inode *inode = filp->f_path.dentry->d_inode;
267 struct rpc_inode *rpci = RPC_I(inode); 288 struct rpc_pipe *pipe;
268 struct rpc_pipe_msg *msg; 289 struct rpc_pipe_msg *msg;
269 int res = 0; 290 int res = 0;
270 291
271 mutex_lock(&inode->i_mutex); 292 mutex_lock(&inode->i_mutex);
272 if (rpci->ops == NULL) { 293 pipe = RPC_I(inode)->pipe;
294 if (pipe == NULL) {
273 res = -EPIPE; 295 res = -EPIPE;
274 goto out_unlock; 296 goto out_unlock;
275 } 297 }
276 msg = filp->private_data; 298 msg = filp->private_data;
277 if (msg == NULL) { 299 if (msg == NULL) {
278 spin_lock(&inode->i_lock); 300 spin_lock(&pipe->lock);
279 if (!list_empty(&rpci->pipe)) { 301 if (!list_empty(&pipe->pipe)) {
280 msg = list_entry(rpci->pipe.next, 302 msg = list_entry(pipe->pipe.next,
281 struct rpc_pipe_msg, 303 struct rpc_pipe_msg,
282 list); 304 list);
283 list_move(&msg->list, &rpci->in_upcall); 305 list_move(&msg->list, &pipe->in_upcall);
284 rpci->pipelen -= msg->len; 306 pipe->pipelen -= msg->len;
285 filp->private_data = msg; 307 filp->private_data = msg;
286 msg->copied = 0; 308 msg->copied = 0;
287 } 309 }
288 spin_unlock(&inode->i_lock); 310 spin_unlock(&pipe->lock);
289 if (msg == NULL) 311 if (msg == NULL)
290 goto out_unlock; 312 goto out_unlock;
291 } 313 }
292 /* NOTE: it is up to the callback to update msg->copied */ 314 /* NOTE: it is up to the callback to update msg->copied */
293 res = rpci->ops->upcall(filp, msg, buf, len); 315 res = pipe->ops->upcall(filp, msg, buf, len);
294 if (res < 0 || msg->len == msg->copied) { 316 if (res < 0 || msg->len == msg->copied) {
295 filp->private_data = NULL; 317 filp->private_data = NULL;
296 spin_lock(&inode->i_lock); 318 spin_lock(&pipe->lock);
297 list_del_init(&msg->list); 319 list_del_init(&msg->list);
298 spin_unlock(&inode->i_lock); 320 spin_unlock(&pipe->lock);
299 rpci->ops->destroy_msg(msg); 321 pipe->ops->destroy_msg(msg);
300 } 322 }
301out_unlock: 323out_unlock:
302 mutex_unlock(&inode->i_mutex); 324 mutex_unlock(&inode->i_mutex);
@@ -307,13 +329,12 @@ static ssize_t
307rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) 329rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
308{ 330{
309 struct inode *inode = filp->f_path.dentry->d_inode; 331 struct inode *inode = filp->f_path.dentry->d_inode;
310 struct rpc_inode *rpci = RPC_I(inode);
311 int res; 332 int res;
312 333
313 mutex_lock(&inode->i_mutex); 334 mutex_lock(&inode->i_mutex);
314 res = -EPIPE; 335 res = -EPIPE;
315 if (rpci->ops != NULL) 336 if (RPC_I(inode)->pipe != NULL)
316 res = rpci->ops->downcall(filp, buf, len); 337 res = RPC_I(inode)->pipe->ops->downcall(filp, buf, len);
317 mutex_unlock(&inode->i_mutex); 338 mutex_unlock(&inode->i_mutex);
318 return res; 339 return res;
319} 340}
@@ -321,17 +342,18 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of
321static unsigned int 342static unsigned int
322rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) 343rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
323{ 344{
324 struct rpc_inode *rpci; 345 struct inode *inode = filp->f_path.dentry->d_inode;
325 unsigned int mask = 0; 346 struct rpc_inode *rpci = RPC_I(inode);
347 unsigned int mask = POLLOUT | POLLWRNORM;
326 348
327 rpci = RPC_I(filp->f_path.dentry->d_inode);
328 poll_wait(filp, &rpci->waitq, wait); 349 poll_wait(filp, &rpci->waitq, wait);
329 350
330 mask = POLLOUT | POLLWRNORM; 351 mutex_lock(&inode->i_mutex);
331 if (rpci->ops == NULL) 352 if (rpci->pipe == NULL)
332 mask |= POLLERR | POLLHUP; 353 mask |= POLLERR | POLLHUP;
333 if (filp->private_data || !list_empty(&rpci->pipe)) 354 else if (filp->private_data || !list_empty(&rpci->pipe->pipe))
334 mask |= POLLIN | POLLRDNORM; 355 mask |= POLLIN | POLLRDNORM;
356 mutex_unlock(&inode->i_mutex);
335 return mask; 357 return mask;
336} 358}
337 359
@@ -339,23 +361,26 @@ static long
339rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 361rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
340{ 362{
341 struct inode *inode = filp->f_path.dentry->d_inode; 363 struct inode *inode = filp->f_path.dentry->d_inode;
342 struct rpc_inode *rpci = RPC_I(inode); 364 struct rpc_pipe *pipe;
343 int len; 365 int len;
344 366
345 switch (cmd) { 367 switch (cmd) {
346 case FIONREAD: 368 case FIONREAD:
347 spin_lock(&inode->i_lock); 369 mutex_lock(&inode->i_mutex);
348 if (rpci->ops == NULL) { 370 pipe = RPC_I(inode)->pipe;
349 spin_unlock(&inode->i_lock); 371 if (pipe == NULL) {
372 mutex_unlock(&inode->i_mutex);
350 return -EPIPE; 373 return -EPIPE;
351 } 374 }
352 len = rpci->pipelen; 375 spin_lock(&pipe->lock);
376 len = pipe->pipelen;
353 if (filp->private_data) { 377 if (filp->private_data) {
354 struct rpc_pipe_msg *msg; 378 struct rpc_pipe_msg *msg;
355 msg = filp->private_data; 379 msg = filp->private_data;
356 len += msg->len - msg->copied; 380 len += msg->len - msg->copied;
357 } 381 }
358 spin_unlock(&inode->i_lock); 382 spin_unlock(&pipe->lock);
383 mutex_unlock(&inode->i_mutex);
359 return put_user(len, (int __user *)arg); 384 return put_user(len, (int __user *)arg);
360 default: 385 default:
361 return -EINVAL; 386 return -EINVAL;
@@ -378,12 +403,15 @@ rpc_show_info(struct seq_file *m, void *v)
378{ 403{
379 struct rpc_clnt *clnt = m->private; 404 struct rpc_clnt *clnt = m->private;
380 405
381 seq_printf(m, "RPC server: %s\n", clnt->cl_server); 406 rcu_read_lock();
407 seq_printf(m, "RPC server: %s\n",
408 rcu_dereference(clnt->cl_xprt)->servername);
382 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, 409 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
383 clnt->cl_prog, clnt->cl_vers); 410 clnt->cl_prog, clnt->cl_vers);
384 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); 411 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
385 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO)); 412 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO));
386 seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT)); 413 seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT));
414 rcu_read_unlock();
387 return 0; 415 return 0;
388} 416}
389 417
@@ -440,23 +468,6 @@ struct rpc_filelist {
440 umode_t mode; 468 umode_t mode;
441}; 469};
442 470
443struct vfsmount *rpc_get_mount(void)
444{
445 int err;
446
447 err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mnt, &rpc_mount_count);
448 if (err != 0)
449 return ERR_PTR(err);
450 return rpc_mnt;
451}
452EXPORT_SYMBOL_GPL(rpc_get_mount);
453
454void rpc_put_mount(void)
455{
456 simple_release_fs(&rpc_mnt, &rpc_mount_count);
457}
458EXPORT_SYMBOL_GPL(rpc_put_mount);
459
460static int rpc_delete_dentry(const struct dentry *dentry) 471static int rpc_delete_dentry(const struct dentry *dentry)
461{ 472{
462 return 1; 473 return 1;
@@ -540,12 +551,47 @@ static int __rpc_mkdir(struct inode *dir, struct dentry *dentry,
540 return 0; 551 return 0;
541} 552}
542 553
543static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry, 554static void
544 umode_t mode, 555init_pipe(struct rpc_pipe *pipe)
545 const struct file_operations *i_fop, 556{
546 void *private, 557 pipe->nreaders = 0;
547 const struct rpc_pipe_ops *ops, 558 pipe->nwriters = 0;
548 int flags) 559 INIT_LIST_HEAD(&pipe->in_upcall);
560 INIT_LIST_HEAD(&pipe->in_downcall);
561 INIT_LIST_HEAD(&pipe->pipe);
562 pipe->pipelen = 0;
563 INIT_DELAYED_WORK(&pipe->queue_timeout,
564 rpc_timeout_upcall_queue);
565 pipe->ops = NULL;
566 spin_lock_init(&pipe->lock);
567 pipe->dentry = NULL;
568}
569
570void rpc_destroy_pipe_data(struct rpc_pipe *pipe)
571{
572 kfree(pipe);
573}
574EXPORT_SYMBOL_GPL(rpc_destroy_pipe_data);
575
576struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags)
577{
578 struct rpc_pipe *pipe;
579
580 pipe = kzalloc(sizeof(struct rpc_pipe), GFP_KERNEL);
581 if (!pipe)
582 return ERR_PTR(-ENOMEM);
583 init_pipe(pipe);
584 pipe->ops = ops;
585 pipe->flags = flags;
586 return pipe;
587}
588EXPORT_SYMBOL_GPL(rpc_mkpipe_data);
589
590static int __rpc_mkpipe_dentry(struct inode *dir, struct dentry *dentry,
591 umode_t mode,
592 const struct file_operations *i_fop,
593 void *private,
594 struct rpc_pipe *pipe)
549{ 595{
550 struct rpc_inode *rpci; 596 struct rpc_inode *rpci;
551 int err; 597 int err;
@@ -554,10 +600,8 @@ static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry,
554 if (err) 600 if (err)
555 return err; 601 return err;
556 rpci = RPC_I(dentry->d_inode); 602 rpci = RPC_I(dentry->d_inode);
557 rpci->nkern_readwriters = 1;
558 rpci->private = private; 603 rpci->private = private;
559 rpci->flags = flags; 604 rpci->pipe = pipe;
560 rpci->ops = ops;
561 fsnotify_create(dir, dentry); 605 fsnotify_create(dir, dentry);
562 return 0; 606 return 0;
563} 607}
@@ -573,6 +617,22 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry)
573 return ret; 617 return ret;
574} 618}
575 619
620int rpc_rmdir(struct dentry *dentry)
621{
622 struct dentry *parent;
623 struct inode *dir;
624 int error;
625
626 parent = dget_parent(dentry);
627 dir = parent->d_inode;
628 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
629 error = __rpc_rmdir(dir, dentry);
630 mutex_unlock(&dir->i_mutex);
631 dput(parent);
632 return error;
633}
634EXPORT_SYMBOL_GPL(rpc_rmdir);
635
576static int __rpc_unlink(struct inode *dir, struct dentry *dentry) 636static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
577{ 637{
578 int ret; 638 int ret;
@@ -587,16 +647,12 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
587static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry) 647static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
588{ 648{
589 struct inode *inode = dentry->d_inode; 649 struct inode *inode = dentry->d_inode;
590 struct rpc_inode *rpci = RPC_I(inode);
591 650
592 rpci->nkern_readwriters--;
593 if (rpci->nkern_readwriters != 0)
594 return 0;
595 rpc_close_pipes(inode); 651 rpc_close_pipes(inode);
596 return __rpc_unlink(dir, dentry); 652 return __rpc_unlink(dir, dentry);
597} 653}
598 654
599static struct dentry *__rpc_lookup_create(struct dentry *parent, 655static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
600 struct qstr *name) 656 struct qstr *name)
601{ 657{
602 struct dentry *dentry; 658 struct dentry *dentry;
@@ -604,27 +660,13 @@ static struct dentry *__rpc_lookup_create(struct dentry *parent,
604 dentry = d_lookup(parent, name); 660 dentry = d_lookup(parent, name);
605 if (!dentry) { 661 if (!dentry) {
606 dentry = d_alloc(parent, name); 662 dentry = d_alloc(parent, name);
607 if (!dentry) { 663 if (!dentry)
608 dentry = ERR_PTR(-ENOMEM); 664 return ERR_PTR(-ENOMEM);
609 goto out_err;
610 }
611 } 665 }
612 if (!dentry->d_inode) 666 if (dentry->d_inode == NULL) {
613 d_set_d_op(dentry, &rpc_dentry_operations); 667 d_set_d_op(dentry, &rpc_dentry_operations);
614out_err:
615 return dentry;
616}
617
618static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
619 struct qstr *name)
620{
621 struct dentry *dentry;
622
623 dentry = __rpc_lookup_create(parent, name);
624 if (IS_ERR(dentry))
625 return dentry;
626 if (dentry->d_inode == NULL)
627 return dentry; 668 return dentry;
669 }
628 dput(dentry); 670 dput(dentry);
629 return ERR_PTR(-EEXIST); 671 return ERR_PTR(-EEXIST);
630} 672}
@@ -777,9 +819,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
777 * @parent: dentry of directory to create new "pipe" in 819 * @parent: dentry of directory to create new "pipe" in
778 * @name: name of pipe 820 * @name: name of pipe
779 * @private: private data to associate with the pipe, for the caller's use 821 * @private: private data to associate with the pipe, for the caller's use
780 * @ops: operations defining the behavior of the pipe: upcall, downcall, 822 * @pipe: &rpc_pipe containing input parameters
781 * release_pipe, open_pipe, and destroy_msg.
782 * @flags: rpc_inode flags
783 * 823 *
784 * Data is made available for userspace to read by calls to 824 * Data is made available for userspace to read by calls to
785 * rpc_queue_upcall(). The actual reads will result in calls to 825 * rpc_queue_upcall(). The actual reads will result in calls to
@@ -792,9 +832,8 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
792 * The @private argument passed here will be available to all these methods 832 * The @private argument passed here will be available to all these methods
793 * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private. 833 * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private.
794 */ 834 */
795struct dentry *rpc_mkpipe(struct dentry *parent, const char *name, 835struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
796 void *private, const struct rpc_pipe_ops *ops, 836 void *private, struct rpc_pipe *pipe)
797 int flags)
798{ 837{
799 struct dentry *dentry; 838 struct dentry *dentry;
800 struct inode *dir = parent->d_inode; 839 struct inode *dir = parent->d_inode;
@@ -802,9 +841,9 @@ struct dentry *rpc_mkpipe(struct dentry *parent, const char *name,
802 struct qstr q; 841 struct qstr q;
803 int err; 842 int err;
804 843
805 if (ops->upcall == NULL) 844 if (pipe->ops->upcall == NULL)
806 umode &= ~S_IRUGO; 845 umode &= ~S_IRUGO;
807 if (ops->downcall == NULL) 846 if (pipe->ops->downcall == NULL)
808 umode &= ~S_IWUGO; 847 umode &= ~S_IWUGO;
809 848
810 q.name = name; 849 q.name = name;
@@ -812,24 +851,11 @@ struct dentry *rpc_mkpipe(struct dentry *parent, const char *name,
812 q.hash = full_name_hash(q.name, q.len), 851 q.hash = full_name_hash(q.name, q.len),
813 852
814 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 853 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
815 dentry = __rpc_lookup_create(parent, &q); 854 dentry = __rpc_lookup_create_exclusive(parent, &q);
816 if (IS_ERR(dentry)) 855 if (IS_ERR(dentry))
817 goto out; 856 goto out;
818 if (dentry->d_inode) { 857 err = __rpc_mkpipe_dentry(dir, dentry, umode, &rpc_pipe_fops,
819 struct rpc_inode *rpci = RPC_I(dentry->d_inode); 858 private, pipe);
820 if (rpci->private != private ||
821 rpci->ops != ops ||
822 rpci->flags != flags) {
823 dput (dentry);
824 err = -EBUSY;
825 goto out_err;
826 }
827 rpci->nkern_readwriters++;
828 goto out;
829 }
830
831 err = __rpc_mkpipe(dir, dentry, umode, &rpc_pipe_fops,
832 private, ops, flags);
833 if (err) 859 if (err)
834 goto out_err; 860 goto out_err;
835out: 861out:
@@ -842,7 +868,7 @@ out_err:
842 err); 868 err);
843 goto out; 869 goto out;
844} 870}
845EXPORT_SYMBOL_GPL(rpc_mkpipe); 871EXPORT_SYMBOL_GPL(rpc_mkpipe_dentry);
846 872
847/** 873/**
848 * rpc_unlink - remove a pipe 874 * rpc_unlink - remove a pipe
@@ -915,7 +941,7 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry,
915 941
916/** 942/**
917 * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir() 943 * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
918 * @dentry: directory to remove 944 * @dentry: dentry for the pipe
919 */ 945 */
920int rpc_remove_client_dir(struct dentry *dentry) 946int rpc_remove_client_dir(struct dentry *dentry)
921{ 947{
@@ -986,6 +1012,7 @@ enum {
986 RPCAUTH_statd, 1012 RPCAUTH_statd,
987 RPCAUTH_nfsd4_cb, 1013 RPCAUTH_nfsd4_cb,
988 RPCAUTH_cache, 1014 RPCAUTH_cache,
1015 RPCAUTH_nfsd,
989 RPCAUTH_RootEOF 1016 RPCAUTH_RootEOF
990}; 1017};
991 1018
@@ -1018,13 +1045,67 @@ static const struct rpc_filelist files[] = {
1018 .name = "cache", 1045 .name = "cache",
1019 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1046 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
1020 }, 1047 },
1048 [RPCAUTH_nfsd] = {
1049 .name = "nfsd",
1050 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
1051 },
1021}; 1052};
1022 1053
1054/*
1055 * This call can be used only in RPC pipefs mount notification hooks.
1056 */
1057struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
1058 const unsigned char *dir_name)
1059{
1060 struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name));
1061
1062 dir.hash = full_name_hash(dir.name, dir.len);
1063 return d_lookup(sb->s_root, &dir);
1064}
1065EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
1066
1067void rpc_pipefs_init_net(struct net *net)
1068{
1069 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1070
1071 mutex_init(&sn->pipefs_sb_lock);
1072}
1073
1074/*
1075 * This call will be used for per network namespace operations calls.
1076 * Note: Function will be returned with pipefs_sb_lock taken if superblock was
1077 * found. This lock have to be released by rpc_put_sb_net() when all operations
1078 * will be completed.
1079 */
1080struct super_block *rpc_get_sb_net(const struct net *net)
1081{
1082 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1083
1084 mutex_lock(&sn->pipefs_sb_lock);
1085 if (sn->pipefs_sb)
1086 return sn->pipefs_sb;
1087 mutex_unlock(&sn->pipefs_sb_lock);
1088 return NULL;
1089}
1090EXPORT_SYMBOL_GPL(rpc_get_sb_net);
1091
1092void rpc_put_sb_net(const struct net *net)
1093{
1094 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1095
1096 BUG_ON(sn->pipefs_sb == NULL);
1097 mutex_unlock(&sn->pipefs_sb_lock);
1098}
1099EXPORT_SYMBOL_GPL(rpc_put_sb_net);
1100
1023static int 1101static int
1024rpc_fill_super(struct super_block *sb, void *data, int silent) 1102rpc_fill_super(struct super_block *sb, void *data, int silent)
1025{ 1103{
1026 struct inode *inode; 1104 struct inode *inode;
1027 struct dentry *root; 1105 struct dentry *root;
1106 struct net *net = data;
1107 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1108 int err;
1028 1109
1029 sb->s_blocksize = PAGE_CACHE_SIZE; 1110 sb->s_blocksize = PAGE_CACHE_SIZE;
1030 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1111 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1032,31 +1113,61 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1032 sb->s_op = &s_ops; 1113 sb->s_op = &s_ops;
1033 sb->s_time_gran = 1; 1114 sb->s_time_gran = 1;
1034 1115
1035 inode = rpc_get_inode(sb, S_IFDIR | 0755); 1116 inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
1036 if (!inode) 1117 sb->s_root = root = d_make_root(inode);
1037 return -ENOMEM; 1118 if (!root)
1038 sb->s_root = root = d_alloc_root(inode);
1039 if (!root) {
1040 iput(inode);
1041 return -ENOMEM; 1119 return -ENOMEM;
1042 }
1043 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) 1120 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
1044 return -ENOMEM; 1121 return -ENOMEM;
1122 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net,
1123 NET_NAME(net));
1124 sn->pipefs_sb = sb;
1125 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1126 RPC_PIPEFS_MOUNT,
1127 sb);
1128 if (err)
1129 goto err_depopulate;
1130 sb->s_fs_info = get_net(net);
1045 return 0; 1131 return 0;
1132
1133err_depopulate:
1134 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1135 RPC_PIPEFS_UMOUNT,
1136 sb);
1137 sn->pipefs_sb = NULL;
1138 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
1139 return err;
1046} 1140}
1047 1141
1048static struct dentry * 1142static struct dentry *
1049rpc_mount(struct file_system_type *fs_type, 1143rpc_mount(struct file_system_type *fs_type,
1050 int flags, const char *dev_name, void *data) 1144 int flags, const char *dev_name, void *data)
1051{ 1145{
1052 return mount_single(fs_type, flags, data, rpc_fill_super); 1146 return mount_ns(fs_type, flags, current->nsproxy->net_ns, rpc_fill_super);
1147}
1148
1149static void rpc_kill_sb(struct super_block *sb)
1150{
1151 struct net *net = sb->s_fs_info;
1152 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1153
1154 mutex_lock(&sn->pipefs_sb_lock);
1155 sn->pipefs_sb = NULL;
1156 mutex_unlock(&sn->pipefs_sb_lock);
1157 put_net(net);
1158 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net,
1159 NET_NAME(net));
1160 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1161 RPC_PIPEFS_UMOUNT,
1162 sb);
1163 kill_litter_super(sb);
1053} 1164}
1054 1165
1055static struct file_system_type rpc_pipe_fs_type = { 1166static struct file_system_type rpc_pipe_fs_type = {
1056 .owner = THIS_MODULE, 1167 .owner = THIS_MODULE,
1057 .name = "rpc_pipefs", 1168 .name = "rpc_pipefs",
1058 .mount = rpc_mount, 1169 .mount = rpc_mount,
1059 .kill_sb = kill_litter_super, 1170 .kill_sb = rpc_kill_sb,
1060}; 1171};
1061 1172
1062static void 1173static void
@@ -1066,16 +1177,8 @@ init_once(void *foo)
1066 1177
1067 inode_init_once(&rpci->vfs_inode); 1178 inode_init_once(&rpci->vfs_inode);
1068 rpci->private = NULL; 1179 rpci->private = NULL;
1069 rpci->nreaders = 0; 1180 rpci->pipe = NULL;
1070 rpci->nwriters = 0;
1071 INIT_LIST_HEAD(&rpci->in_upcall);
1072 INIT_LIST_HEAD(&rpci->in_downcall);
1073 INIT_LIST_HEAD(&rpci->pipe);
1074 rpci->pipelen = 0;
1075 init_waitqueue_head(&rpci->waitq); 1181 init_waitqueue_head(&rpci->waitq);
1076 INIT_DELAYED_WORK(&rpci->queue_timeout,
1077 rpc_timeout_upcall_queue);
1078 rpci->ops = NULL;
1079} 1182}
1080 1183
1081int register_rpc_pipefs(void) 1184int register_rpc_pipefs(void)
@@ -1089,17 +1192,24 @@ int register_rpc_pipefs(void)
1089 init_once); 1192 init_once);
1090 if (!rpc_inode_cachep) 1193 if (!rpc_inode_cachep)
1091 return -ENOMEM; 1194 return -ENOMEM;
1195 err = rpc_clients_notifier_register();
1196 if (err)
1197 goto err_notifier;
1092 err = register_filesystem(&rpc_pipe_fs_type); 1198 err = register_filesystem(&rpc_pipe_fs_type);
1093 if (err) { 1199 if (err)
1094 kmem_cache_destroy(rpc_inode_cachep); 1200 goto err_register;
1095 return err;
1096 }
1097
1098 return 0; 1201 return 0;
1202
1203err_register:
1204 rpc_clients_notifier_unregister();
1205err_notifier:
1206 kmem_cache_destroy(rpc_inode_cachep);
1207 return err;
1099} 1208}
1100 1209
1101void unregister_rpc_pipefs(void) 1210void unregister_rpc_pipefs(void)
1102{ 1211{
1212 rpc_clients_notifier_unregister();
1103 kmem_cache_destroy(rpc_inode_cachep); 1213 kmem_cache_destroy(rpc_inode_cachep);
1104 unregister_filesystem(&rpc_pipe_fs_type); 1214 unregister_filesystem(&rpc_pipe_fs_type);
1105} 1215}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 8761bf8e36fc..92509ffe15fc 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -23,12 +23,15 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/nsproxy.h>
26#include <net/ipv6.h> 27#include <net/ipv6.h>
27 28
28#include <linux/sunrpc/clnt.h> 29#include <linux/sunrpc/clnt.h>
29#include <linux/sunrpc/sched.h> 30#include <linux/sunrpc/sched.h>
30#include <linux/sunrpc/xprtsock.h> 31#include <linux/sunrpc/xprtsock.h>
31 32
33#include "netns.h"
34
32#ifdef RPC_DEBUG 35#ifdef RPC_DEBUG
33# define RPCDBG_FACILITY RPCDBG_BIND 36# define RPCDBG_FACILITY RPCDBG_BIND
34#endif 37#endif
@@ -109,13 +112,7 @@ enum {
109 112
110static void rpcb_getport_done(struct rpc_task *, void *); 113static void rpcb_getport_done(struct rpc_task *, void *);
111static void rpcb_map_release(void *data); 114static void rpcb_map_release(void *data);
112static struct rpc_program rpcb_program; 115static const struct rpc_program rpcb_program;
113
114static struct rpc_clnt * rpcb_local_clnt;
115static struct rpc_clnt * rpcb_local_clnt4;
116
117DEFINE_SPINLOCK(rpcb_clnt_lock);
118unsigned int rpcb_users;
119 116
120struct rpcbind_args { 117struct rpcbind_args {
121 struct rpc_xprt * r_xprt; 118 struct rpc_xprt * r_xprt;
@@ -140,8 +137,8 @@ struct rpcb_info {
140 struct rpc_procinfo * rpc_proc; 137 struct rpc_procinfo * rpc_proc;
141}; 138};
142 139
143static struct rpcb_info rpcb_next_version[]; 140static const struct rpcb_info rpcb_next_version[];
144static struct rpcb_info rpcb_next_version6[]; 141static const struct rpcb_info rpcb_next_version6[];
145 142
146static const struct rpc_call_ops rpcb_getport_ops = { 143static const struct rpc_call_ops rpcb_getport_ops = {
147 .rpc_call_done = rpcb_getport_done, 144 .rpc_call_done = rpcb_getport_done,
@@ -164,32 +161,36 @@ static void rpcb_map_release(void *data)
164 kfree(map); 161 kfree(map);
165} 162}
166 163
167static int rpcb_get_local(void) 164static int rpcb_get_local(struct net *net)
168{ 165{
169 int cnt; 166 int cnt;
167 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
170 168
171 spin_lock(&rpcb_clnt_lock); 169 spin_lock(&sn->rpcb_clnt_lock);
172 if (rpcb_users) 170 if (sn->rpcb_users)
173 rpcb_users++; 171 sn->rpcb_users++;
174 cnt = rpcb_users; 172 cnt = sn->rpcb_users;
175 spin_unlock(&rpcb_clnt_lock); 173 spin_unlock(&sn->rpcb_clnt_lock);
176 174
177 return cnt; 175 return cnt;
178} 176}
179 177
180void rpcb_put_local(void) 178void rpcb_put_local(struct net *net)
181{ 179{
182 struct rpc_clnt *clnt = rpcb_local_clnt; 180 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
183 struct rpc_clnt *clnt4 = rpcb_local_clnt4; 181 struct rpc_clnt *clnt = sn->rpcb_local_clnt;
184 int shutdown; 182 struct rpc_clnt *clnt4 = sn->rpcb_local_clnt4;
185 183 int shutdown = 0;
186 spin_lock(&rpcb_clnt_lock); 184
187 if (--rpcb_users == 0) { 185 spin_lock(&sn->rpcb_clnt_lock);
188 rpcb_local_clnt = NULL; 186 if (sn->rpcb_users) {
189 rpcb_local_clnt4 = NULL; 187 if (--sn->rpcb_users == 0) {
188 sn->rpcb_local_clnt = NULL;
189 sn->rpcb_local_clnt4 = NULL;
190 }
191 shutdown = !sn->rpcb_users;
190 } 192 }
191 shutdown = !rpcb_users; 193 spin_unlock(&sn->rpcb_clnt_lock);
192 spin_unlock(&rpcb_clnt_lock);
193 194
194 if (shutdown) { 195 if (shutdown) {
195 /* 196 /*
@@ -202,30 +203,34 @@ void rpcb_put_local(void)
202 } 203 }
203} 204}
204 205
205static void rpcb_set_local(struct rpc_clnt *clnt, struct rpc_clnt *clnt4) 206static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
207 struct rpc_clnt *clnt4)
206{ 208{
209 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
210
207 /* Protected by rpcb_create_local_mutex */ 211 /* Protected by rpcb_create_local_mutex */
208 rpcb_local_clnt = clnt; 212 sn->rpcb_local_clnt = clnt;
209 rpcb_local_clnt4 = clnt4; 213 sn->rpcb_local_clnt4 = clnt4;
210 smp_wmb(); 214 smp_wmb();
211 rpcb_users = 1; 215 sn->rpcb_users = 1;
212 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: " 216 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
213 "%p, rpcb_local_clnt4: %p)\n", rpcb_local_clnt, 217 "%p, rpcb_local_clnt4: %p) for net %p%s\n",
214 rpcb_local_clnt4); 218 sn->rpcb_local_clnt, sn->rpcb_local_clnt4,
219 net, (net == &init_net) ? " (init_net)" : "");
215} 220}
216 221
217/* 222/*
218 * Returns zero on success, otherwise a negative errno value 223 * Returns zero on success, otherwise a negative errno value
219 * is returned. 224 * is returned.
220 */ 225 */
221static int rpcb_create_local_unix(void) 226static int rpcb_create_local_unix(struct net *net)
222{ 227{
223 static const struct sockaddr_un rpcb_localaddr_rpcbind = { 228 static const struct sockaddr_un rpcb_localaddr_rpcbind = {
224 .sun_family = AF_LOCAL, 229 .sun_family = AF_LOCAL,
225 .sun_path = RPCBIND_SOCK_PATHNAME, 230 .sun_path = RPCBIND_SOCK_PATHNAME,
226 }; 231 };
227 struct rpc_create_args args = { 232 struct rpc_create_args args = {
228 .net = &init_net, 233 .net = net,
229 .protocol = XPRT_TRANSPORT_LOCAL, 234 .protocol = XPRT_TRANSPORT_LOCAL,
230 .address = (struct sockaddr *)&rpcb_localaddr_rpcbind, 235 .address = (struct sockaddr *)&rpcb_localaddr_rpcbind,
231 .addrsize = sizeof(rpcb_localaddr_rpcbind), 236 .addrsize = sizeof(rpcb_localaddr_rpcbind),
@@ -258,7 +263,7 @@ static int rpcb_create_local_unix(void)
258 clnt4 = NULL; 263 clnt4 = NULL;
259 } 264 }
260 265
261 rpcb_set_local(clnt, clnt4); 266 rpcb_set_local(net, clnt, clnt4);
262 267
263out: 268out:
264 return result; 269 return result;
@@ -268,7 +273,7 @@ out:
268 * Returns zero on success, otherwise a negative errno value 273 * Returns zero on success, otherwise a negative errno value
269 * is returned. 274 * is returned.
270 */ 275 */
271static int rpcb_create_local_net(void) 276static int rpcb_create_local_net(struct net *net)
272{ 277{
273 static const struct sockaddr_in rpcb_inaddr_loopback = { 278 static const struct sockaddr_in rpcb_inaddr_loopback = {
274 .sin_family = AF_INET, 279 .sin_family = AF_INET,
@@ -276,7 +281,7 @@ static int rpcb_create_local_net(void)
276 .sin_port = htons(RPCBIND_PORT), 281 .sin_port = htons(RPCBIND_PORT),
277 }; 282 };
278 struct rpc_create_args args = { 283 struct rpc_create_args args = {
279 .net = &init_net, 284 .net = net,
280 .protocol = XPRT_TRANSPORT_TCP, 285 .protocol = XPRT_TRANSPORT_TCP,
281 .address = (struct sockaddr *)&rpcb_inaddr_loopback, 286 .address = (struct sockaddr *)&rpcb_inaddr_loopback,
282 .addrsize = sizeof(rpcb_inaddr_loopback), 287 .addrsize = sizeof(rpcb_inaddr_loopback),
@@ -310,7 +315,7 @@ static int rpcb_create_local_net(void)
310 clnt4 = NULL; 315 clnt4 = NULL;
311 } 316 }
312 317
313 rpcb_set_local(clnt, clnt4); 318 rpcb_set_local(net, clnt, clnt4);
314 319
315out: 320out:
316 return result; 321 return result;
@@ -320,31 +325,32 @@ out:
320 * Returns zero on success, otherwise a negative errno value 325 * Returns zero on success, otherwise a negative errno value
321 * is returned. 326 * is returned.
322 */ 327 */
323int rpcb_create_local(void) 328int rpcb_create_local(struct net *net)
324{ 329{
325 static DEFINE_MUTEX(rpcb_create_local_mutex); 330 static DEFINE_MUTEX(rpcb_create_local_mutex);
326 int result = 0; 331 int result = 0;
327 332
328 if (rpcb_get_local()) 333 if (rpcb_get_local(net))
329 return result; 334 return result;
330 335
331 mutex_lock(&rpcb_create_local_mutex); 336 mutex_lock(&rpcb_create_local_mutex);
332 if (rpcb_get_local()) 337 if (rpcb_get_local(net))
333 goto out; 338 goto out;
334 339
335 if (rpcb_create_local_unix() != 0) 340 if (rpcb_create_local_unix(net) != 0)
336 result = rpcb_create_local_net(); 341 result = rpcb_create_local_net(net);
337 342
338out: 343out:
339 mutex_unlock(&rpcb_create_local_mutex); 344 mutex_unlock(&rpcb_create_local_mutex);
340 return result; 345 return result;
341} 346}
342 347
343static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, 348static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
344 size_t salen, int proto, u32 version) 349 struct sockaddr *srvaddr, size_t salen,
350 int proto, u32 version)
345{ 351{
346 struct rpc_create_args args = { 352 struct rpc_create_args args = {
347 .net = &init_net, 353 .net = net,
348 .protocol = proto, 354 .protocol = proto,
349 .address = srvaddr, 355 .address = srvaddr,
350 .addrsize = salen, 356 .addrsize = salen,
@@ -390,6 +396,7 @@ static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
390 396
391/** 397/**
392 * rpcb_register - set or unset a port registration with the local rpcbind svc 398 * rpcb_register - set or unset a port registration with the local rpcbind svc
399 * @net: target network namespace
393 * @prog: RPC program number to bind 400 * @prog: RPC program number to bind
394 * @vers: RPC version number to bind 401 * @vers: RPC version number to bind
395 * @prot: transport protocol to register 402 * @prot: transport protocol to register
@@ -420,7 +427,7 @@ static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
420 * IN6ADDR_ANY (ie available for all AF_INET and AF_INET6 427 * IN6ADDR_ANY (ie available for all AF_INET and AF_INET6
421 * addresses). 428 * addresses).
422 */ 429 */
423int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port) 430int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short port)
424{ 431{
425 struct rpcbind_args map = { 432 struct rpcbind_args map = {
426 .r_prog = prog, 433 .r_prog = prog,
@@ -431,6 +438,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
431 struct rpc_message msg = { 438 struct rpc_message msg = {
432 .rpc_argp = &map, 439 .rpc_argp = &map,
433 }; 440 };
441 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
434 442
435 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " 443 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
436 "rpcbind\n", (port ? "" : "un"), 444 "rpcbind\n", (port ? "" : "un"),
@@ -440,13 +448,14 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
440 if (port) 448 if (port)
441 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; 449 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
442 450
443 return rpcb_register_call(rpcb_local_clnt, &msg); 451 return rpcb_register_call(sn->rpcb_local_clnt, &msg);
444} 452}
445 453
446/* 454/*
447 * Fill in AF_INET family-specific arguments to register 455 * Fill in AF_INET family-specific arguments to register
448 */ 456 */
449static int rpcb_register_inet4(const struct sockaddr *sap, 457static int rpcb_register_inet4(struct sunrpc_net *sn,
458 const struct sockaddr *sap,
450 struct rpc_message *msg) 459 struct rpc_message *msg)
451{ 460{
452 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap; 461 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
@@ -465,7 +474,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
465 if (port) 474 if (port)
466 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 475 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
467 476
468 result = rpcb_register_call(rpcb_local_clnt4, msg); 477 result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
469 kfree(map->r_addr); 478 kfree(map->r_addr);
470 return result; 479 return result;
471} 480}
@@ -473,7 +482,8 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
473/* 482/*
474 * Fill in AF_INET6 family-specific arguments to register 483 * Fill in AF_INET6 family-specific arguments to register
475 */ 484 */
476static int rpcb_register_inet6(const struct sockaddr *sap, 485static int rpcb_register_inet6(struct sunrpc_net *sn,
486 const struct sockaddr *sap,
477 struct rpc_message *msg) 487 struct rpc_message *msg)
478{ 488{
479 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap; 489 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
@@ -492,12 +502,13 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
492 if (port) 502 if (port)
493 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 503 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
494 504
495 result = rpcb_register_call(rpcb_local_clnt4, msg); 505 result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
496 kfree(map->r_addr); 506 kfree(map->r_addr);
497 return result; 507 return result;
498} 508}
499 509
500static int rpcb_unregister_all_protofamilies(struct rpc_message *msg) 510static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
511 struct rpc_message *msg)
501{ 512{
502 struct rpcbind_args *map = msg->rpc_argp; 513 struct rpcbind_args *map = msg->rpc_argp;
503 514
@@ -508,11 +519,12 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
508 map->r_addr = ""; 519 map->r_addr = "";
509 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 520 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
510 521
511 return rpcb_register_call(rpcb_local_clnt4, msg); 522 return rpcb_register_call(sn->rpcb_local_clnt4, msg);
512} 523}
513 524
514/** 525/**
515 * rpcb_v4_register - set or unset a port registration with the local rpcbind 526 * rpcb_v4_register - set or unset a port registration with the local rpcbind
527 * @net: target network namespace
516 * @program: RPC program number of service to (un)register 528 * @program: RPC program number of service to (un)register
517 * @version: RPC version number of service to (un)register 529 * @version: RPC version number of service to (un)register
518 * @address: address family, IP address, and port to (un)register 530 * @address: address family, IP address, and port to (un)register
@@ -554,7 +566,7 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
554 * service on any IPv4 address, but not on IPv6. The latter 566 * service on any IPv4 address, but not on IPv6. The latter
555 * advertises the service on all IPv4 and IPv6 addresses. 567 * advertises the service on all IPv4 and IPv6 addresses.
556 */ 568 */
557int rpcb_v4_register(const u32 program, const u32 version, 569int rpcb_v4_register(struct net *net, const u32 program, const u32 version,
558 const struct sockaddr *address, const char *netid) 570 const struct sockaddr *address, const char *netid)
559{ 571{
560 struct rpcbind_args map = { 572 struct rpcbind_args map = {
@@ -566,18 +578,19 @@ int rpcb_v4_register(const u32 program, const u32 version,
566 struct rpc_message msg = { 578 struct rpc_message msg = {
567 .rpc_argp = &map, 579 .rpc_argp = &map,
568 }; 580 };
581 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
569 582
570 if (rpcb_local_clnt4 == NULL) 583 if (sn->rpcb_local_clnt4 == NULL)
571 return -EPROTONOSUPPORT; 584 return -EPROTONOSUPPORT;
572 585
573 if (address == NULL) 586 if (address == NULL)
574 return rpcb_unregister_all_protofamilies(&msg); 587 return rpcb_unregister_all_protofamilies(sn, &msg);
575 588
576 switch (address->sa_family) { 589 switch (address->sa_family) {
577 case AF_INET: 590 case AF_INET:
578 return rpcb_register_inet4(address, &msg); 591 return rpcb_register_inet4(sn, address, &msg);
579 case AF_INET6: 592 case AF_INET6:
580 return rpcb_register_inet6(address, &msg); 593 return rpcb_register_inet6(sn, address, &msg);
581 } 594 }
582 595
583 return -EAFNOSUPPORT; 596 return -EAFNOSUPPORT;
@@ -611,9 +624,10 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi
611static struct rpc_clnt *rpcb_find_transport_owner(struct rpc_clnt *clnt) 624static struct rpc_clnt *rpcb_find_transport_owner(struct rpc_clnt *clnt)
612{ 625{
613 struct rpc_clnt *parent = clnt->cl_parent; 626 struct rpc_clnt *parent = clnt->cl_parent;
627 struct rpc_xprt *xprt = rcu_dereference(clnt->cl_xprt);
614 628
615 while (parent != clnt) { 629 while (parent != clnt) {
616 if (parent->cl_xprt != clnt->cl_xprt) 630 if (rcu_dereference(parent->cl_xprt) != xprt)
617 break; 631 break;
618 if (clnt->cl_autobind) 632 if (clnt->cl_autobind)
619 break; 633 break;
@@ -644,12 +658,16 @@ void rpcb_getport_async(struct rpc_task *task)
644 size_t salen; 658 size_t salen;
645 int status; 659 int status;
646 660
647 clnt = rpcb_find_transport_owner(task->tk_client); 661 rcu_read_lock();
648 xprt = clnt->cl_xprt; 662 do {
663 clnt = rpcb_find_transport_owner(task->tk_client);
664 xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
665 } while (xprt == NULL);
666 rcu_read_unlock();
649 667
650 dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", 668 dprintk("RPC: %5u %s(%s, %u, %u, %d)\n",
651 task->tk_pid, __func__, 669 task->tk_pid, __func__,
652 clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); 670 xprt->servername, clnt->cl_prog, clnt->cl_vers, xprt->prot);
653 671
654 /* Put self on the wait queue to ensure we get notified if 672 /* Put self on the wait queue to ensure we get notified if
655 * some other task is already attempting to bind the port */ 673 * some other task is already attempting to bind the port */
@@ -658,6 +676,7 @@ void rpcb_getport_async(struct rpc_task *task)
658 if (xprt_test_and_set_binding(xprt)) { 676 if (xprt_test_and_set_binding(xprt)) {
659 dprintk("RPC: %5u %s: waiting for another binder\n", 677 dprintk("RPC: %5u %s: waiting for another binder\n",
660 task->tk_pid, __func__); 678 task->tk_pid, __func__);
679 xprt_put(xprt);
661 return; 680 return;
662 } 681 }
663 682
@@ -699,8 +718,8 @@ void rpcb_getport_async(struct rpc_task *task)
699 dprintk("RPC: %5u %s: trying rpcbind version %u\n", 718 dprintk("RPC: %5u %s: trying rpcbind version %u\n",
700 task->tk_pid, __func__, bind_version); 719 task->tk_pid, __func__, bind_version);
701 720
702 rpcb_clnt = rpcb_create(clnt->cl_server, sap, salen, xprt->prot, 721 rpcb_clnt = rpcb_create(xprt->xprt_net, xprt->servername, sap, salen,
703 bind_version); 722 xprt->prot, bind_version);
704 if (IS_ERR(rpcb_clnt)) { 723 if (IS_ERR(rpcb_clnt)) {
705 status = PTR_ERR(rpcb_clnt); 724 status = PTR_ERR(rpcb_clnt);
706 dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n", 725 dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n",
@@ -719,13 +738,13 @@ void rpcb_getport_async(struct rpc_task *task)
719 map->r_vers = clnt->cl_vers; 738 map->r_vers = clnt->cl_vers;
720 map->r_prot = xprt->prot; 739 map->r_prot = xprt->prot;
721 map->r_port = 0; 740 map->r_port = 0;
722 map->r_xprt = xprt_get(xprt); 741 map->r_xprt = xprt;
723 map->r_status = -EIO; 742 map->r_status = -EIO;
724 743
725 switch (bind_version) { 744 switch (bind_version) {
726 case RPCBVERS_4: 745 case RPCBVERS_4:
727 case RPCBVERS_3: 746 case RPCBVERS_3:
728 map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID); 747 map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
729 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC); 748 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
730 map->r_owner = ""; 749 map->r_owner = "";
731 break; 750 break;
@@ -754,6 +773,7 @@ bailout_release_client:
754bailout_nofree: 773bailout_nofree:
755 rpcb_wake_rpcbind_waiters(xprt, status); 774 rpcb_wake_rpcbind_waiters(xprt, status);
756 task->tk_status = status; 775 task->tk_status = status;
776 xprt_put(xprt);
757} 777}
758EXPORT_SYMBOL_GPL(rpcb_getport_async); 778EXPORT_SYMBOL_GPL(rpcb_getport_async);
759 779
@@ -801,11 +821,11 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
801static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr, 821static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
802 const struct rpcbind_args *rpcb) 822 const struct rpcbind_args *rpcb)
803{ 823{
804 struct rpc_task *task = req->rq_task;
805 __be32 *p; 824 __be32 *p;
806 825
807 dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n", 826 dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
808 task->tk_pid, task->tk_msg.rpc_proc->p_name, 827 req->rq_task->tk_pid,
828 req->rq_task->tk_msg.rpc_proc->p_name,
809 rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port); 829 rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
810 830
811 p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2); 831 p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
@@ -818,7 +838,6 @@ static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
818static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr, 838static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
819 struct rpcbind_args *rpcb) 839 struct rpcbind_args *rpcb)
820{ 840{
821 struct rpc_task *task = req->rq_task;
822 unsigned long port; 841 unsigned long port;
823 __be32 *p; 842 __be32 *p;
824 843
@@ -829,8 +848,8 @@ static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
829 return -EIO; 848 return -EIO;
830 849
831 port = be32_to_cpup(p); 850 port = be32_to_cpup(p);
832 dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, 851 dprintk("RPC: %5u PMAP_%s result: %lu\n", req->rq_task->tk_pid,
833 task->tk_msg.rpc_proc->p_name, port); 852 req->rq_task->tk_msg.rpc_proc->p_name, port);
834 if (unlikely(port > USHRT_MAX)) 853 if (unlikely(port > USHRT_MAX))
835 return -EIO; 854 return -EIO;
836 855
@@ -841,7 +860,6 @@ static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
841static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr, 860static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
842 unsigned int *boolp) 861 unsigned int *boolp)
843{ 862{
844 struct rpc_task *task = req->rq_task;
845 __be32 *p; 863 __be32 *p;
846 864
847 p = xdr_inline_decode(xdr, 4); 865 p = xdr_inline_decode(xdr, 4);
@@ -853,7 +871,8 @@ static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
853 *boolp = 1; 871 *boolp = 1;
854 872
855 dprintk("RPC: %5u RPCB_%s call %s\n", 873 dprintk("RPC: %5u RPCB_%s call %s\n",
856 task->tk_pid, task->tk_msg.rpc_proc->p_name, 874 req->rq_task->tk_pid,
875 req->rq_task->tk_msg.rpc_proc->p_name,
857 (*boolp ? "succeeded" : "failed")); 876 (*boolp ? "succeeded" : "failed"));
858 return 0; 877 return 0;
859} 878}
@@ -873,11 +892,11 @@ static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
873static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, 892static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
874 const struct rpcbind_args *rpcb) 893 const struct rpcbind_args *rpcb)
875{ 894{
876 struct rpc_task *task = req->rq_task;
877 __be32 *p; 895 __be32 *p;
878 896
879 dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n", 897 dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
880 task->tk_pid, task->tk_msg.rpc_proc->p_name, 898 req->rq_task->tk_pid,
899 req->rq_task->tk_msg.rpc_proc->p_name,
881 rpcb->r_prog, rpcb->r_vers, 900 rpcb->r_prog, rpcb->r_vers,
882 rpcb->r_netid, rpcb->r_addr); 901 rpcb->r_netid, rpcb->r_addr);
883 902
@@ -895,7 +914,6 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
895{ 914{
896 struct sockaddr_storage address; 915 struct sockaddr_storage address;
897 struct sockaddr *sap = (struct sockaddr *)&address; 916 struct sockaddr *sap = (struct sockaddr *)&address;
898 struct rpc_task *task = req->rq_task;
899 __be32 *p; 917 __be32 *p;
900 u32 len; 918 u32 len;
901 919
@@ -912,7 +930,7 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
912 */ 930 */
913 if (len == 0) { 931 if (len == 0) {
914 dprintk("RPC: %5u RPCB reply: program not registered\n", 932 dprintk("RPC: %5u RPCB reply: program not registered\n",
915 task->tk_pid); 933 req->rq_task->tk_pid);
916 return 0; 934 return 0;
917 } 935 }
918 936
@@ -922,10 +940,11 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
922 p = xdr_inline_decode(xdr, len); 940 p = xdr_inline_decode(xdr, len);
923 if (unlikely(p == NULL)) 941 if (unlikely(p == NULL))
924 goto out_fail; 942 goto out_fail;
925 dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid, 943 dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid,
926 task->tk_msg.rpc_proc->p_name, (char *)p); 944 req->rq_task->tk_msg.rpc_proc->p_name, (char *)p);
927 945
928 if (rpc_uaddr2sockaddr((char *)p, len, sap, sizeof(address)) == 0) 946 if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len,
947 sap, sizeof(address)) == 0)
929 goto out_fail; 948 goto out_fail;
930 rpcb->r_port = rpc_get_port(sap); 949 rpcb->r_port = rpc_get_port(sap);
931 950
@@ -933,7 +952,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
933 952
934out_fail: 953out_fail:
935 dprintk("RPC: %5u malformed RPCB_%s reply\n", 954 dprintk("RPC: %5u malformed RPCB_%s reply\n",
936 task->tk_pid, task->tk_msg.rpc_proc->p_name); 955 req->rq_task->tk_pid,
956 req->rq_task->tk_msg.rpc_proc->p_name);
937 return -EIO; 957 return -EIO;
938} 958}
939 959
@@ -1041,7 +1061,7 @@ static struct rpc_procinfo rpcb_procedures4[] = {
1041 }, 1061 },
1042}; 1062};
1043 1063
1044static struct rpcb_info rpcb_next_version[] = { 1064static const struct rpcb_info rpcb_next_version[] = {
1045 { 1065 {
1046 .rpc_vers = RPCBVERS_2, 1066 .rpc_vers = RPCBVERS_2,
1047 .rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT], 1067 .rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT],
@@ -1051,7 +1071,7 @@ static struct rpcb_info rpcb_next_version[] = {
1051 }, 1071 },
1052}; 1072};
1053 1073
1054static struct rpcb_info rpcb_next_version6[] = { 1074static const struct rpcb_info rpcb_next_version6[] = {
1055 { 1075 {
1056 .rpc_vers = RPCBVERS_4, 1076 .rpc_vers = RPCBVERS_4,
1057 .rpc_proc = &rpcb_procedures4[RPCBPROC_GETADDR], 1077 .rpc_proc = &rpcb_procedures4[RPCBPROC_GETADDR],
@@ -1065,25 +1085,25 @@ static struct rpcb_info rpcb_next_version6[] = {
1065 }, 1085 },
1066}; 1086};
1067 1087
1068static struct rpc_version rpcb_version2 = { 1088static const struct rpc_version rpcb_version2 = {
1069 .number = RPCBVERS_2, 1089 .number = RPCBVERS_2,
1070 .nrprocs = ARRAY_SIZE(rpcb_procedures2), 1090 .nrprocs = ARRAY_SIZE(rpcb_procedures2),
1071 .procs = rpcb_procedures2 1091 .procs = rpcb_procedures2
1072}; 1092};
1073 1093
1074static struct rpc_version rpcb_version3 = { 1094static const struct rpc_version rpcb_version3 = {
1075 .number = RPCBVERS_3, 1095 .number = RPCBVERS_3,
1076 .nrprocs = ARRAY_SIZE(rpcb_procedures3), 1096 .nrprocs = ARRAY_SIZE(rpcb_procedures3),
1077 .procs = rpcb_procedures3 1097 .procs = rpcb_procedures3
1078}; 1098};
1079 1099
1080static struct rpc_version rpcb_version4 = { 1100static const struct rpc_version rpcb_version4 = {
1081 .number = RPCBVERS_4, 1101 .number = RPCBVERS_4,
1082 .nrprocs = ARRAY_SIZE(rpcb_procedures4), 1102 .nrprocs = ARRAY_SIZE(rpcb_procedures4),
1083 .procs = rpcb_procedures4 1103 .procs = rpcb_procedures4
1084}; 1104};
1085 1105
1086static struct rpc_version *rpcb_version[] = { 1106static const struct rpc_version *rpcb_version[] = {
1087 NULL, 1107 NULL,
1088 NULL, 1108 NULL,
1089 &rpcb_version2, 1109 &rpcb_version2,
@@ -1093,7 +1113,7 @@ static struct rpc_version *rpcb_version[] = {
1093 1113
1094static struct rpc_stat rpcb_stats; 1114static struct rpc_stat rpcb_stats;
1095 1115
1096static struct rpc_program rpcb_program = { 1116static const struct rpc_program rpcb_program = {
1097 .name = "rpcbind", 1117 .name = "rpcbind",
1098 .number = RPCBIND_PROGRAM, 1118 .number = RPCBIND_PROGRAM,
1099 .nrvers = ARRAY_SIZE(rpcb_version), 1119 .nrvers = ARRAY_SIZE(rpcb_version),
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 3341d8962786..994cfea2bad6 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -28,6 +28,9 @@
28#define RPCDBG_FACILITY RPCDBG_SCHED 28#define RPCDBG_FACILITY RPCDBG_SCHED
29#endif 29#endif
30 30
31#define CREATE_TRACE_POINTS
32#include <trace/events/sunrpc.h>
33
31/* 34/*
32 * RPC slabs and memory pools 35 * RPC slabs and memory pools
33 */ 36 */
@@ -205,9 +208,7 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c
205 queue->qlen = 0; 208 queue->qlen = 0;
206 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); 209 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
207 INIT_LIST_HEAD(&queue->timer_list.list); 210 INIT_LIST_HEAD(&queue->timer_list.list);
208#ifdef RPC_DEBUG 211 rpc_assign_waitqueue_name(queue, qname);
209 queue->name = qname;
210#endif
211} 212}
212 213
213void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 214void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
@@ -251,6 +252,8 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
251 252
252static void rpc_set_active(struct rpc_task *task) 253static void rpc_set_active(struct rpc_task *task)
253{ 254{
255 trace_rpc_task_begin(task->tk_client, task, NULL);
256
254 rpc_task_set_debuginfo(task); 257 rpc_task_set_debuginfo(task);
255 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 258 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
256} 259}
@@ -267,6 +270,8 @@ static int rpc_complete_task(struct rpc_task *task)
267 unsigned long flags; 270 unsigned long flags;
268 int ret; 271 int ret;
269 272
273 trace_rpc_task_complete(task->tk_client, task, NULL);
274
270 spin_lock_irqsave(&wq->lock, flags); 275 spin_lock_irqsave(&wq->lock, flags);
271 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 276 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
272 ret = atomic_dec_and_test(&task->tk_count); 277 ret = atomic_dec_and_test(&task->tk_count);
@@ -324,6 +329,8 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
324 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 329 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
325 task->tk_pid, rpc_qname(q), jiffies); 330 task->tk_pid, rpc_qname(q), jiffies);
326 331
332 trace_rpc_task_sleep(task->tk_client, task, q);
333
327 __rpc_add_wait_queue(q, task, queue_priority); 334 __rpc_add_wait_queue(q, task, queue_priority);
328 335
329 BUG_ON(task->tk_callback != NULL); 336 BUG_ON(task->tk_callback != NULL);
@@ -378,6 +385,8 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
378 return; 385 return;
379 } 386 }
380 387
388 trace_rpc_task_wakeup(task->tk_client, task, queue);
389
381 __rpc_remove_wait_queue(queue, task); 390 __rpc_remove_wait_queue(queue, task);
382 391
383 rpc_make_runnable(task); 392 rpc_make_runnable(task);
@@ -422,7 +431,7 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
422/* 431/*
423 * Wake up the next task on a priority queue. 432 * Wake up the next task on a priority queue.
424 */ 433 */
425static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) 434static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
426{ 435{
427 struct list_head *q; 436 struct list_head *q;
428 struct rpc_task *task; 437 struct rpc_task *task;
@@ -467,30 +476,54 @@ new_queue:
467new_owner: 476new_owner:
468 rpc_set_waitqueue_owner(queue, task->tk_owner); 477 rpc_set_waitqueue_owner(queue, task->tk_owner);
469out: 478out:
470 rpc_wake_up_task_queue_locked(queue, task);
471 return task; 479 return task;
472} 480}
473 481
482static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
483{
484 if (RPC_IS_PRIORITY(queue))
485 return __rpc_find_next_queued_priority(queue);
486 if (!list_empty(&queue->tasks[0]))
487 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
488 return NULL;
489}
490
474/* 491/*
475 * Wake up the next task on the wait queue. 492 * Wake up the first task on the wait queue.
476 */ 493 */
477struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) 494struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
495 bool (*func)(struct rpc_task *, void *), void *data)
478{ 496{
479 struct rpc_task *task = NULL; 497 struct rpc_task *task = NULL;
480 498
481 dprintk("RPC: wake_up_next(%p \"%s\")\n", 499 dprintk("RPC: wake_up_first(%p \"%s\")\n",
482 queue, rpc_qname(queue)); 500 queue, rpc_qname(queue));
483 spin_lock_bh(&queue->lock); 501 spin_lock_bh(&queue->lock);
484 if (RPC_IS_PRIORITY(queue)) 502 task = __rpc_find_next_queued(queue);
485 task = __rpc_wake_up_next_priority(queue); 503 if (task != NULL) {
486 else { 504 if (func(task, data))
487 task_for_first(task, &queue->tasks[0])
488 rpc_wake_up_task_queue_locked(queue, task); 505 rpc_wake_up_task_queue_locked(queue, task);
506 else
507 task = NULL;
489 } 508 }
490 spin_unlock_bh(&queue->lock); 509 spin_unlock_bh(&queue->lock);
491 510
492 return task; 511 return task;
493} 512}
513EXPORT_SYMBOL_GPL(rpc_wake_up_first);
514
515static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
516{
517 return true;
518}
519
520/*
521 * Wake up the next task on the wait queue.
522*/
523struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
524{
525 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
526}
494EXPORT_SYMBOL_GPL(rpc_wake_up_next); 527EXPORT_SYMBOL_GPL(rpc_wake_up_next);
495 528
496/** 529/**
@@ -501,14 +534,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
501 */ 534 */
502void rpc_wake_up(struct rpc_wait_queue *queue) 535void rpc_wake_up(struct rpc_wait_queue *queue)
503{ 536{
504 struct rpc_task *task, *next;
505 struct list_head *head; 537 struct list_head *head;
506 538
507 spin_lock_bh(&queue->lock); 539 spin_lock_bh(&queue->lock);
508 head = &queue->tasks[queue->maxpriority]; 540 head = &queue->tasks[queue->maxpriority];
509 for (;;) { 541 for (;;) {
510 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 542 while (!list_empty(head)) {
543 struct rpc_task *task;
544 task = list_first_entry(head,
545 struct rpc_task,
546 u.tk_wait.list);
511 rpc_wake_up_task_queue_locked(queue, task); 547 rpc_wake_up_task_queue_locked(queue, task);
548 }
512 if (head == &queue->tasks[0]) 549 if (head == &queue->tasks[0])
513 break; 550 break;
514 head--; 551 head--;
@@ -526,13 +563,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
526 */ 563 */
527void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 564void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
528{ 565{
529 struct rpc_task *task, *next;
530 struct list_head *head; 566 struct list_head *head;
531 567
532 spin_lock_bh(&queue->lock); 568 spin_lock_bh(&queue->lock);
533 head = &queue->tasks[queue->maxpriority]; 569 head = &queue->tasks[queue->maxpriority];
534 for (;;) { 570 for (;;) {
535 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 571 while (!list_empty(head)) {
572 struct rpc_task *task;
573 task = list_first_entry(head,
574 struct rpc_task,
575 u.tk_wait.list);
536 task->tk_status = status; 576 task->tk_status = status;
537 rpc_wake_up_task_queue_locked(queue, task); 577 rpc_wake_up_task_queue_locked(queue, task);
538 } 578 }
@@ -677,6 +717,7 @@ static void __rpc_execute(struct rpc_task *task)
677 if (do_action == NULL) 717 if (do_action == NULL)
678 break; 718 break;
679 } 719 }
720 trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
680 do_action(task); 721 do_action(task);
681 722
682 /* 723 /*
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 145e6784f508..0a648c502fc3 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -114,7 +114,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
114 } 114 }
115 115
116 len = PAGE_CACHE_SIZE; 116 len = PAGE_CACHE_SIZE;
117 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); 117 kaddr = kmap_atomic(*ppage);
118 if (base) { 118 if (base) {
119 len -= base; 119 len -= base;
120 if (pglen < len) 120 if (pglen < len)
@@ -127,7 +127,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
127 ret = copy_actor(desc, kaddr, len); 127 ret = copy_actor(desc, kaddr, len);
128 } 128 }
129 flush_dcache_page(*ppage); 129 flush_dcache_page(*ppage);
130 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); 130 kunmap_atomic(kaddr);
131 copied += ret; 131 copied += ret;
132 if (ret != len || !desc->count) 132 if (ret != len || !desc->count)
133 goto out; 133 goto out;
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 80df89d957ba..bc2068ee795b 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -22,6 +22,7 @@
22#include <linux/sunrpc/clnt.h> 22#include <linux/sunrpc/clnt.h>
23#include <linux/sunrpc/svcsock.h> 23#include <linux/sunrpc/svcsock.h>
24#include <linux/sunrpc/metrics.h> 24#include <linux/sunrpc/metrics.h>
25#include <linux/rcupdate.h>
25 26
26#include "netns.h" 27#include "netns.h"
27 28
@@ -133,20 +134,19 @@ EXPORT_SYMBOL_GPL(rpc_free_iostats);
133/** 134/**
134 * rpc_count_iostats - tally up per-task stats 135 * rpc_count_iostats - tally up per-task stats
135 * @task: completed rpc_task 136 * @task: completed rpc_task
137 * @stats: array of stat structures
136 * 138 *
137 * Relies on the caller for serialization. 139 * Relies on the caller for serialization.
138 */ 140 */
139void rpc_count_iostats(struct rpc_task *task) 141void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
140{ 142{
141 struct rpc_rqst *req = task->tk_rqstp; 143 struct rpc_rqst *req = task->tk_rqstp;
142 struct rpc_iostats *stats;
143 struct rpc_iostats *op_metrics; 144 struct rpc_iostats *op_metrics;
144 ktime_t delta; 145 ktime_t delta;
145 146
146 if (!task->tk_client || !task->tk_client->cl_metrics || !req) 147 if (!stats || !req)
147 return; 148 return;
148 149
149 stats = task->tk_client->cl_metrics;
150 op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx]; 150 op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
151 151
152 op_metrics->om_ops++; 152 op_metrics->om_ops++;
@@ -164,6 +164,7 @@ void rpc_count_iostats(struct rpc_task *task)
164 delta = ktime_sub(ktime_get(), task->tk_start); 164 delta = ktime_sub(ktime_get(), task->tk_start);
165 op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta); 165 op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta);
166} 166}
167EXPORT_SYMBOL_GPL(rpc_count_iostats);
167 168
168static void _print_name(struct seq_file *seq, unsigned int op, 169static void _print_name(struct seq_file *seq, unsigned int op,
169 struct rpc_procinfo *procs) 170 struct rpc_procinfo *procs)
@@ -179,7 +180,7 @@ static void _print_name(struct seq_file *seq, unsigned int op,
179void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) 180void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
180{ 181{
181 struct rpc_iostats *stats = clnt->cl_metrics; 182 struct rpc_iostats *stats = clnt->cl_metrics;
182 struct rpc_xprt *xprt = clnt->cl_xprt; 183 struct rpc_xprt *xprt;
183 unsigned int op, maxproc = clnt->cl_maxproc; 184 unsigned int op, maxproc = clnt->cl_maxproc;
184 185
185 if (!stats) 186 if (!stats)
@@ -189,8 +190,11 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
189 seq_printf(seq, "p/v: %u/%u (%s)\n", 190 seq_printf(seq, "p/v: %u/%u (%s)\n",
190 clnt->cl_prog, clnt->cl_vers, clnt->cl_protname); 191 clnt->cl_prog, clnt->cl_vers, clnt->cl_protname);
191 192
193 rcu_read_lock();
194 xprt = rcu_dereference(clnt->cl_xprt);
192 if (xprt) 195 if (xprt)
193 xprt->ops->print_stats(xprt, seq); 196 xprt->ops->print_stats(xprt, seq);
197 rcu_read_unlock();
194 198
195 seq_printf(seq, "\tper-op statistics\n"); 199 seq_printf(seq, "\tper-op statistics\n");
196 for (op = 0; op < maxproc; op++) { 200 for (op = 0; op < maxproc; op++) {
@@ -213,45 +217,46 @@ EXPORT_SYMBOL_GPL(rpc_print_iostats);
213 * Register/unregister RPC proc files 217 * Register/unregister RPC proc files
214 */ 218 */
215static inline struct proc_dir_entry * 219static inline struct proc_dir_entry *
216do_register(const char *name, void *data, const struct file_operations *fops) 220do_register(struct net *net, const char *name, void *data,
221 const struct file_operations *fops)
217{ 222{
218 struct sunrpc_net *sn; 223 struct sunrpc_net *sn;
219 224
220 dprintk("RPC: registering /proc/net/rpc/%s\n", name); 225 dprintk("RPC: registering /proc/net/rpc/%s\n", name);
221 sn = net_generic(&init_net, sunrpc_net_id); 226 sn = net_generic(net, sunrpc_net_id);
222 return proc_create_data(name, 0, sn->proc_net_rpc, fops, data); 227 return proc_create_data(name, 0, sn->proc_net_rpc, fops, data);
223} 228}
224 229
225struct proc_dir_entry * 230struct proc_dir_entry *
226rpc_proc_register(struct rpc_stat *statp) 231rpc_proc_register(struct net *net, struct rpc_stat *statp)
227{ 232{
228 return do_register(statp->program->name, statp, &rpc_proc_fops); 233 return do_register(net, statp->program->name, statp, &rpc_proc_fops);
229} 234}
230EXPORT_SYMBOL_GPL(rpc_proc_register); 235EXPORT_SYMBOL_GPL(rpc_proc_register);
231 236
232void 237void
233rpc_proc_unregister(const char *name) 238rpc_proc_unregister(struct net *net, const char *name)
234{ 239{
235 struct sunrpc_net *sn; 240 struct sunrpc_net *sn;
236 241
237 sn = net_generic(&init_net, sunrpc_net_id); 242 sn = net_generic(net, sunrpc_net_id);
238 remove_proc_entry(name, sn->proc_net_rpc); 243 remove_proc_entry(name, sn->proc_net_rpc);
239} 244}
240EXPORT_SYMBOL_GPL(rpc_proc_unregister); 245EXPORT_SYMBOL_GPL(rpc_proc_unregister);
241 246
242struct proc_dir_entry * 247struct proc_dir_entry *
243svc_proc_register(struct svc_stat *statp, const struct file_operations *fops) 248svc_proc_register(struct net *net, struct svc_stat *statp, const struct file_operations *fops)
244{ 249{
245 return do_register(statp->program->pg_name, statp, fops); 250 return do_register(net, statp->program->pg_name, statp, fops);
246} 251}
247EXPORT_SYMBOL_GPL(svc_proc_register); 252EXPORT_SYMBOL_GPL(svc_proc_register);
248 253
249void 254void
250svc_proc_unregister(const char *name) 255svc_proc_unregister(struct net *net, const char *name)
251{ 256{
252 struct sunrpc_net *sn; 257 struct sunrpc_net *sn;
253 258
254 sn = net_generic(&init_net, sunrpc_net_id); 259 sn = net_generic(net, sunrpc_net_id);
255 remove_proc_entry(name, sn->proc_net_rpc); 260 remove_proc_entry(name, sn->proc_net_rpc);
256} 261}
257EXPORT_SYMBOL_GPL(svc_proc_unregister); 262EXPORT_SYMBOL_GPL(svc_proc_unregister);
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
index 90c292e2738b..14c9f6d1c5ff 100644
--- a/net/sunrpc/sunrpc.h
+++ b/net/sunrpc/sunrpc.h
@@ -47,5 +47,7 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
47 struct page *headpage, unsigned long headoffset, 47 struct page *headpage, unsigned long headoffset,
48 struct page *tailpage, unsigned long tailoffset); 48 struct page *tailpage, unsigned long tailoffset);
49 49
50int rpc_clients_notifier_register(void);
51void rpc_clients_notifier_unregister(void);
50#endif /* _NET_SUNRPC_SUNRPC_H */ 52#endif /* _NET_SUNRPC_SUNRPC_H */
51 53
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8ec9778c3f4a..3d6498af9adc 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -25,10 +25,12 @@
25#include "netns.h" 25#include "netns.h"
26 26
27int sunrpc_net_id; 27int sunrpc_net_id;
28EXPORT_SYMBOL_GPL(sunrpc_net_id);
28 29
29static __net_init int sunrpc_init_net(struct net *net) 30static __net_init int sunrpc_init_net(struct net *net)
30{ 31{
31 int err; 32 int err;
33 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
32 34
33 err = rpc_proc_init(net); 35 err = rpc_proc_init(net);
34 if (err) 36 if (err)
@@ -38,8 +40,18 @@ static __net_init int sunrpc_init_net(struct net *net)
38 if (err) 40 if (err)
39 goto err_ipmap; 41 goto err_ipmap;
40 42
43 err = unix_gid_cache_create(net);
44 if (err)
45 goto err_unixgid;
46
47 rpc_pipefs_init_net(net);
48 INIT_LIST_HEAD(&sn->all_clients);
49 spin_lock_init(&sn->rpc_client_lock);
50 spin_lock_init(&sn->rpcb_clnt_lock);
41 return 0; 51 return 0;
42 52
53err_unixgid:
54 ip_map_cache_destroy(net);
43err_ipmap: 55err_ipmap:
44 rpc_proc_exit(net); 56 rpc_proc_exit(net);
45err_proc: 57err_proc:
@@ -48,6 +60,7 @@ err_proc:
48 60
49static __net_exit void sunrpc_exit_net(struct net *net) 61static __net_exit void sunrpc_exit_net(struct net *net)
50{ 62{
63 unix_gid_cache_destroy(net);
51 ip_map_cache_destroy(net); 64 ip_map_cache_destroy(net);
52 rpc_proc_exit(net); 65 rpc_proc_exit(net);
53} 66}
@@ -59,40 +72,38 @@ static struct pernet_operations sunrpc_net_ops = {
59 .size = sizeof(struct sunrpc_net), 72 .size = sizeof(struct sunrpc_net),
60}; 73};
61 74
62extern struct cache_detail unix_gid_cache;
63
64static int __init 75static int __init
65init_sunrpc(void) 76init_sunrpc(void)
66{ 77{
67 int err = register_rpc_pipefs(); 78 int err = rpc_init_mempool();
68 if (err) 79 if (err)
69 goto out; 80 goto out;
70 err = rpc_init_mempool();
71 if (err)
72 goto out2;
73 err = rpcauth_init_module(); 81 err = rpcauth_init_module();
74 if (err) 82 if (err)
75 goto out3; 83 goto out2;
76 84
77 cache_initialize(); 85 cache_initialize();
78 86
79 err = register_pernet_subsys(&sunrpc_net_ops); 87 err = register_pernet_subsys(&sunrpc_net_ops);
80 if (err) 88 if (err)
89 goto out3;
90
91 err = register_rpc_pipefs();
92 if (err)
81 goto out4; 93 goto out4;
82#ifdef RPC_DEBUG 94#ifdef RPC_DEBUG
83 rpc_register_sysctl(); 95 rpc_register_sysctl();
84#endif 96#endif
85 cache_register(&unix_gid_cache);
86 svc_init_xprt_sock(); /* svc sock transport */ 97 svc_init_xprt_sock(); /* svc sock transport */
87 init_socket_xprt(); /* clnt sock transport */ 98 init_socket_xprt(); /* clnt sock transport */
88 return 0; 99 return 0;
89 100
90out4: 101out4:
91 rpcauth_remove_module(); 102 unregister_pernet_subsys(&sunrpc_net_ops);
92out3: 103out3:
93 rpc_destroy_mempool(); 104 rpcauth_remove_module();
94out2: 105out2:
95 unregister_rpc_pipefs(); 106 rpc_destroy_mempool();
96out: 107out:
97 return err; 108 return err;
98} 109}
@@ -105,7 +116,6 @@ cleanup_sunrpc(void)
105 svc_cleanup_xprt_sock(); 116 svc_cleanup_xprt_sock();
106 unregister_rpc_pipefs(); 117 unregister_rpc_pipefs();
107 rpc_destroy_mempool(); 118 rpc_destroy_mempool();
108 cache_unregister(&unix_gid_cache);
109 unregister_pernet_subsys(&sunrpc_net_ops); 119 unregister_pernet_subsys(&sunrpc_net_ops);
110#ifdef RPC_DEBUG 120#ifdef RPC_DEBUG
111 rpc_unregister_sysctl(); 121 rpc_unregister_sysctl();
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index e4aabc02368b..7e9baaa1e543 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -20,6 +20,7 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/nsproxy.h>
23 24
24#include <linux/sunrpc/types.h> 25#include <linux/sunrpc/types.h>
25#include <linux/sunrpc/xdr.h> 26#include <linux/sunrpc/xdr.h>
@@ -30,7 +31,7 @@
30 31
31#define RPCDBG_FACILITY RPCDBG_SVCDSP 32#define RPCDBG_FACILITY RPCDBG_SVCDSP
32 33
33static void svc_unregister(const struct svc_serv *serv); 34static void svc_unregister(const struct svc_serv *serv, struct net *net);
34 35
35#define svc_serv_is_pooled(serv) ((serv)->sv_function) 36#define svc_serv_is_pooled(serv) ((serv)->sv_function)
36 37
@@ -368,23 +369,24 @@ svc_pool_for_cpu(struct svc_serv *serv, int cpu)
368 return &serv->sv_pools[pidx % serv->sv_nrpools]; 369 return &serv->sv_pools[pidx % serv->sv_nrpools];
369} 370}
370 371
371static int svc_rpcb_setup(struct svc_serv *serv) 372int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
372{ 373{
373 int err; 374 int err;
374 375
375 err = rpcb_create_local(); 376 err = rpcb_create_local(net);
376 if (err) 377 if (err)
377 return err; 378 return err;
378 379
379 /* Remove any stale portmap registrations */ 380 /* Remove any stale portmap registrations */
380 svc_unregister(serv); 381 svc_unregister(serv, net);
381 return 0; 382 return 0;
382} 383}
384EXPORT_SYMBOL_GPL(svc_rpcb_setup);
383 385
384void svc_rpcb_cleanup(struct svc_serv *serv) 386void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
385{ 387{
386 svc_unregister(serv); 388 svc_unregister(serv, net);
387 rpcb_put_local(); 389 rpcb_put_local(net);
388} 390}
389EXPORT_SYMBOL_GPL(svc_rpcb_cleanup); 391EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
390 392
@@ -405,12 +407,20 @@ static int svc_uses_rpcbind(struct svc_serv *serv)
405 return 0; 407 return 0;
406} 408}
407 409
410int svc_bind(struct svc_serv *serv, struct net *net)
411{
412 if (!svc_uses_rpcbind(serv))
413 return 0;
414 return svc_rpcb_setup(serv, net);
415}
416EXPORT_SYMBOL_GPL(svc_bind);
417
408/* 418/*
409 * Create an RPC service 419 * Create an RPC service
410 */ 420 */
411static struct svc_serv * 421static struct svc_serv *
412__svc_create(struct svc_program *prog, unsigned int bufsize, int npools, 422__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
413 void (*shutdown)(struct svc_serv *serv)) 423 void (*shutdown)(struct svc_serv *serv, struct net *net))
414{ 424{
415 struct svc_serv *serv; 425 struct svc_serv *serv;
416 unsigned int vers; 426 unsigned int vers;
@@ -469,22 +479,15 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
469 spin_lock_init(&pool->sp_lock); 479 spin_lock_init(&pool->sp_lock);
470 } 480 }
471 481
472 if (svc_uses_rpcbind(serv)) { 482 if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
473 if (svc_rpcb_setup(serv) < 0) { 483 serv->sv_shutdown = svc_rpcb_cleanup;
474 kfree(serv->sv_pools);
475 kfree(serv);
476 return NULL;
477 }
478 if (!serv->sv_shutdown)
479 serv->sv_shutdown = svc_rpcb_cleanup;
480 }
481 484
482 return serv; 485 return serv;
483} 486}
484 487
485struct svc_serv * 488struct svc_serv *
486svc_create(struct svc_program *prog, unsigned int bufsize, 489svc_create(struct svc_program *prog, unsigned int bufsize,
487 void (*shutdown)(struct svc_serv *serv)) 490 void (*shutdown)(struct svc_serv *serv, struct net *net))
488{ 491{
489 return __svc_create(prog, bufsize, /*npools*/1, shutdown); 492 return __svc_create(prog, bufsize, /*npools*/1, shutdown);
490} 493}
@@ -492,7 +495,7 @@ EXPORT_SYMBOL_GPL(svc_create);
492 495
493struct svc_serv * 496struct svc_serv *
494svc_create_pooled(struct svc_program *prog, unsigned int bufsize, 497svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
495 void (*shutdown)(struct svc_serv *serv), 498 void (*shutdown)(struct svc_serv *serv, struct net *net),
496 svc_thread_fn func, struct module *mod) 499 svc_thread_fn func, struct module *mod)
497{ 500{
498 struct svc_serv *serv; 501 struct svc_serv *serv;
@@ -509,6 +512,24 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
509} 512}
510EXPORT_SYMBOL_GPL(svc_create_pooled); 513EXPORT_SYMBOL_GPL(svc_create_pooled);
511 514
515void svc_shutdown_net(struct svc_serv *serv, struct net *net)
516{
517 /*
518 * The set of xprts (contained in the sv_tempsocks and
519 * sv_permsocks lists) is now constant, since it is modified
520 * only by accepting new sockets (done by service threads in
521 * svc_recv) or aging old ones (done by sv_temptimer), or
522 * configuration changes (excluded by whatever locking the
523 * caller is using--nfsd_mutex in the case of nfsd). So it's
524 * safe to traverse those lists and shut everything down:
525 */
526 svc_close_net(serv, net);
527
528 if (serv->sv_shutdown)
529 serv->sv_shutdown(serv, net);
530}
531EXPORT_SYMBOL_GPL(svc_shutdown_net);
532
512/* 533/*
513 * Destroy an RPC service. Should be called with appropriate locking to 534 * Destroy an RPC service. Should be called with appropriate locking to
514 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks. 535 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
@@ -529,19 +550,13 @@ svc_destroy(struct svc_serv *serv)
529 printk("svc_destroy: no threads for serv=%p!\n", serv); 550 printk("svc_destroy: no threads for serv=%p!\n", serv);
530 551
531 del_timer_sync(&serv->sv_temptimer); 552 del_timer_sync(&serv->sv_temptimer);
553
532 /* 554 /*
533 * The set of xprts (contained in the sv_tempsocks and 555 * The last user is gone and thus all sockets have to be destroyed to
534 * sv_permsocks lists) is now constant, since it is modified 556 * the point. Check this.
535 * only by accepting new sockets (done by service threads in
536 * svc_recv) or aging old ones (done by sv_temptimer), or
537 * configuration changes (excluded by whatever locking the
538 * caller is using--nfsd_mutex in the case of nfsd). So it's
539 * safe to traverse those lists and shut everything down:
540 */ 557 */
541 svc_close_all(serv); 558 BUG_ON(!list_empty(&serv->sv_permsocks));
542 559 BUG_ON(!list_empty(&serv->sv_tempsocks));
543 if (serv->sv_shutdown)
544 serv->sv_shutdown(serv);
545 560
546 cache_clean_deferred(serv); 561 cache_clean_deferred(serv);
547 562
@@ -795,7 +810,8 @@ EXPORT_SYMBOL_GPL(svc_exit_thread);
795 * Returns zero on success; a negative errno value is returned 810 * Returns zero on success; a negative errno value is returned
796 * if any error occurs. 811 * if any error occurs.
797 */ 812 */
798static int __svc_rpcb_register4(const u32 program, const u32 version, 813static int __svc_rpcb_register4(struct net *net, const u32 program,
814 const u32 version,
799 const unsigned short protocol, 815 const unsigned short protocol,
800 const unsigned short port) 816 const unsigned short port)
801{ 817{
@@ -818,7 +834,7 @@ static int __svc_rpcb_register4(const u32 program, const u32 version,
818 return -ENOPROTOOPT; 834 return -ENOPROTOOPT;
819 } 835 }
820 836
821 error = rpcb_v4_register(program, version, 837 error = rpcb_v4_register(net, program, version,
822 (const struct sockaddr *)&sin, netid); 838 (const struct sockaddr *)&sin, netid);
823 839
824 /* 840 /*
@@ -826,7 +842,7 @@ static int __svc_rpcb_register4(const u32 program, const u32 version,
826 * registration request with the legacy rpcbind v2 protocol. 842 * registration request with the legacy rpcbind v2 protocol.
827 */ 843 */
828 if (error == -EPROTONOSUPPORT) 844 if (error == -EPROTONOSUPPORT)
829 error = rpcb_register(program, version, protocol, port); 845 error = rpcb_register(net, program, version, protocol, port);
830 846
831 return error; 847 return error;
832} 848}
@@ -842,7 +858,8 @@ static int __svc_rpcb_register4(const u32 program, const u32 version,
842 * Returns zero on success; a negative errno value is returned 858 * Returns zero on success; a negative errno value is returned
843 * if any error occurs. 859 * if any error occurs.
844 */ 860 */
845static int __svc_rpcb_register6(const u32 program, const u32 version, 861static int __svc_rpcb_register6(struct net *net, const u32 program,
862 const u32 version,
846 const unsigned short protocol, 863 const unsigned short protocol,
847 const unsigned short port) 864 const unsigned short port)
848{ 865{
@@ -865,7 +882,7 @@ static int __svc_rpcb_register6(const u32 program, const u32 version,
865 return -ENOPROTOOPT; 882 return -ENOPROTOOPT;
866 } 883 }
867 884
868 error = rpcb_v4_register(program, version, 885 error = rpcb_v4_register(net, program, version,
869 (const struct sockaddr *)&sin6, netid); 886 (const struct sockaddr *)&sin6, netid);
870 887
871 /* 888 /*
@@ -885,7 +902,7 @@ static int __svc_rpcb_register6(const u32 program, const u32 version,
885 * Returns zero on success; a negative errno value is returned 902 * Returns zero on success; a negative errno value is returned
886 * if any error occurs. 903 * if any error occurs.
887 */ 904 */
888static int __svc_register(const char *progname, 905static int __svc_register(struct net *net, const char *progname,
889 const u32 program, const u32 version, 906 const u32 program, const u32 version,
890 const int family, 907 const int family,
891 const unsigned short protocol, 908 const unsigned short protocol,
@@ -895,12 +912,12 @@ static int __svc_register(const char *progname,
895 912
896 switch (family) { 913 switch (family) {
897 case PF_INET: 914 case PF_INET:
898 error = __svc_rpcb_register4(program, version, 915 error = __svc_rpcb_register4(net, program, version,
899 protocol, port); 916 protocol, port);
900 break; 917 break;
901#if IS_ENABLED(CONFIG_IPV6) 918#if IS_ENABLED(CONFIG_IPV6)
902 case PF_INET6: 919 case PF_INET6:
903 error = __svc_rpcb_register6(program, version, 920 error = __svc_rpcb_register6(net, program, version,
904 protocol, port); 921 protocol, port);
905#endif 922#endif
906 } 923 }
@@ -914,14 +931,16 @@ static int __svc_register(const char *progname,
914/** 931/**
915 * svc_register - register an RPC service with the local portmapper 932 * svc_register - register an RPC service with the local portmapper
916 * @serv: svc_serv struct for the service to register 933 * @serv: svc_serv struct for the service to register
934 * @net: net namespace for the service to register
917 * @family: protocol family of service's listener socket 935 * @family: protocol family of service's listener socket
918 * @proto: transport protocol number to advertise 936 * @proto: transport protocol number to advertise
919 * @port: port to advertise 937 * @port: port to advertise
920 * 938 *
921 * Service is registered for any address in the passed-in protocol family 939 * Service is registered for any address in the passed-in protocol family
922 */ 940 */
923int svc_register(const struct svc_serv *serv, const int family, 941int svc_register(const struct svc_serv *serv, struct net *net,
924 const unsigned short proto, const unsigned short port) 942 const int family, const unsigned short proto,
943 const unsigned short port)
925{ 944{
926 struct svc_program *progp; 945 struct svc_program *progp;
927 unsigned int i; 946 unsigned int i;
@@ -946,7 +965,7 @@ int svc_register(const struct svc_serv *serv, const int family,
946 if (progp->pg_vers[i]->vs_hidden) 965 if (progp->pg_vers[i]->vs_hidden)
947 continue; 966 continue;
948 967
949 error = __svc_register(progp->pg_name, progp->pg_prog, 968 error = __svc_register(net, progp->pg_name, progp->pg_prog,
950 i, family, proto, port); 969 i, family, proto, port);
951 if (error < 0) 970 if (error < 0)
952 break; 971 break;
@@ -963,19 +982,19 @@ int svc_register(const struct svc_serv *serv, const int family,
963 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient 982 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
964 * in this case to clear all existing entries for [program, version]. 983 * in this case to clear all existing entries for [program, version].
965 */ 984 */
966static void __svc_unregister(const u32 program, const u32 version, 985static void __svc_unregister(struct net *net, const u32 program, const u32 version,
967 const char *progname) 986 const char *progname)
968{ 987{
969 int error; 988 int error;
970 989
971 error = rpcb_v4_register(program, version, NULL, ""); 990 error = rpcb_v4_register(net, program, version, NULL, "");
972 991
973 /* 992 /*
974 * User space didn't support rpcbind v4, so retry this 993 * User space didn't support rpcbind v4, so retry this
975 * request with the legacy rpcbind v2 protocol. 994 * request with the legacy rpcbind v2 protocol.
976 */ 995 */
977 if (error == -EPROTONOSUPPORT) 996 if (error == -EPROTONOSUPPORT)
978 error = rpcb_register(program, version, 0, 0); 997 error = rpcb_register(net, program, version, 0, 0);
979 998
980 dprintk("svc: %s(%sv%u), error %d\n", 999 dprintk("svc: %s(%sv%u), error %d\n",
981 __func__, progname, version, error); 1000 __func__, progname, version, error);
@@ -989,7 +1008,7 @@ static void __svc_unregister(const u32 program, const u32 version,
989 * The result of unregistration is reported via dprintk for those who want 1008 * The result of unregistration is reported via dprintk for those who want
990 * verification of the result, but is otherwise not important. 1009 * verification of the result, but is otherwise not important.
991 */ 1010 */
992static void svc_unregister(const struct svc_serv *serv) 1011static void svc_unregister(const struct svc_serv *serv, struct net *net)
993{ 1012{
994 struct svc_program *progp; 1013 struct svc_program *progp;
995 unsigned long flags; 1014 unsigned long flags;
@@ -1006,7 +1025,7 @@ static void svc_unregister(const struct svc_serv *serv)
1006 1025
1007 dprintk("svc: attempting to unregister %sv%u\n", 1026 dprintk("svc: attempting to unregister %sv%u\n",
1008 progp->pg_name, i); 1027 progp->pg_name, i);
1009 __svc_unregister(progp->pg_prog, i, progp->pg_name); 1028 __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
1010 } 1029 }
1011 } 1030 }
1012 1031
@@ -1019,23 +1038,21 @@ static void svc_unregister(const struct svc_serv *serv)
1019 * Printk the given error with the address of the client that caused it. 1038 * Printk the given error with the address of the client that caused it.
1020 */ 1039 */
1021static __printf(2, 3) 1040static __printf(2, 3)
1022int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 1041void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1023{ 1042{
1043 struct va_format vaf;
1024 va_list args; 1044 va_list args;
1025 int r;
1026 char buf[RPC_MAX_ADDRBUFLEN]; 1045 char buf[RPC_MAX_ADDRBUFLEN];
1027 1046
1028 if (!net_ratelimit()) 1047 va_start(args, fmt);
1029 return 0;
1030 1048
1031 printk(KERN_WARNING "svc: %s: ", 1049 vaf.fmt = fmt;
1032 svc_print_addr(rqstp, buf, sizeof(buf))); 1050 vaf.va = &args;
1033 1051
1034 va_start(args, fmt); 1052 net_warn_ratelimited("svc: %s: %pV",
1035 r = vprintk(fmt, args); 1053 svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1036 va_end(args);
1037 1054
1038 return r; 1055 va_end(args);
1039} 1056}
1040 1057
1041/* 1058/*
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 74cb0d8e9ca1..88f2bf671960 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -544,14 +544,11 @@ static void svc_check_conn_limits(struct svc_serv *serv)
544 struct svc_xprt *xprt = NULL; 544 struct svc_xprt *xprt = NULL;
545 spin_lock_bh(&serv->sv_lock); 545 spin_lock_bh(&serv->sv_lock);
546 if (!list_empty(&serv->sv_tempsocks)) { 546 if (!list_empty(&serv->sv_tempsocks)) {
547 if (net_ratelimit()) { 547 /* Try to help the admin */
548 /* Try to help the admin */ 548 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
549 printk(KERN_NOTICE "%s: too many open " 549 serv->sv_name, serv->sv_maxconn ?
550 "connections, consider increasing %s\n", 550 "max number of connections" :
551 serv->sv_name, serv->sv_maxconn ? 551 "number of threads");
552 "the max number of connections." :
553 "the number of threads.");
554 }
555 /* 552 /*
556 * Always select the oldest connection. It's not fair, 553 * Always select the oldest connection. It's not fair,
557 * but so is life 554 * but so is life
@@ -601,6 +598,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
601 598
602 /* now allocate needed pages. If we get a failure, sleep briefly */ 599 /* now allocate needed pages. If we get a failure, sleep briefly */
603 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 600 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
601 BUG_ON(pages >= RPCSVC_MAXPAGES);
604 for (i = 0; i < pages ; i++) 602 for (i = 0; i < pages ; i++)
605 while (rqstp->rq_pages[i] == NULL) { 603 while (rqstp->rq_pages[i] == NULL) {
606 struct page *p = alloc_page(GFP_KERNEL); 604 struct page *p = alloc_page(GFP_KERNEL);
@@ -615,7 +613,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
615 rqstp->rq_pages[i] = p; 613 rqstp->rq_pages[i] = p;
616 } 614 }
617 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 615 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
618 BUG_ON(pages >= RPCSVC_MAXPAGES);
619 616
620 /* Make arg->head point to first page and arg->pages point to rest */ 617 /* Make arg->head point to first page and arg->pages point to rest */
621 arg = &rqstp->rq_arg; 618 arg = &rqstp->rq_arg;
@@ -922,48 +919,65 @@ void svc_close_xprt(struct svc_xprt *xprt)
922} 919}
923EXPORT_SYMBOL_GPL(svc_close_xprt); 920EXPORT_SYMBOL_GPL(svc_close_xprt);
924 921
925static void svc_close_list(struct list_head *xprt_list) 922static void svc_close_list(struct list_head *xprt_list, struct net *net)
926{ 923{
927 struct svc_xprt *xprt; 924 struct svc_xprt *xprt;
928 925
929 list_for_each_entry(xprt, xprt_list, xpt_list) { 926 list_for_each_entry(xprt, xprt_list, xpt_list) {
927 if (xprt->xpt_net != net)
928 continue;
930 set_bit(XPT_CLOSE, &xprt->xpt_flags); 929 set_bit(XPT_CLOSE, &xprt->xpt_flags);
931 set_bit(XPT_BUSY, &xprt->xpt_flags); 930 set_bit(XPT_BUSY, &xprt->xpt_flags);
932 } 931 }
933} 932}
934 933
935void svc_close_all(struct svc_serv *serv) 934static void svc_clear_pools(struct svc_serv *serv, struct net *net)
936{ 935{
937 struct svc_pool *pool; 936 struct svc_pool *pool;
938 struct svc_xprt *xprt; 937 struct svc_xprt *xprt;
939 struct svc_xprt *tmp; 938 struct svc_xprt *tmp;
940 int i; 939 int i;
941 940
942 svc_close_list(&serv->sv_tempsocks);
943 svc_close_list(&serv->sv_permsocks);
944
945 for (i = 0; i < serv->sv_nrpools; i++) { 941 for (i = 0; i < serv->sv_nrpools; i++) {
946 pool = &serv->sv_pools[i]; 942 pool = &serv->sv_pools[i];
947 943
948 spin_lock_bh(&pool->sp_lock); 944 spin_lock_bh(&pool->sp_lock);
949 while (!list_empty(&pool->sp_sockets)) { 945 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
950 xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready); 946 if (xprt->xpt_net != net)
947 continue;
951 list_del_init(&xprt->xpt_ready); 948 list_del_init(&xprt->xpt_ready);
952 } 949 }
953 spin_unlock_bh(&pool->sp_lock); 950 spin_unlock_bh(&pool->sp_lock);
954 } 951 }
952}
953
954static void svc_clear_list(struct list_head *xprt_list, struct net *net)
955{
956 struct svc_xprt *xprt;
957 struct svc_xprt *tmp;
958
959 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
960 if (xprt->xpt_net != net)
961 continue;
962 svc_delete_xprt(xprt);
963 }
964 list_for_each_entry(xprt, xprt_list, xpt_list)
965 BUG_ON(xprt->xpt_net == net);
966}
967
968void svc_close_net(struct svc_serv *serv, struct net *net)
969{
970 svc_close_list(&serv->sv_tempsocks, net);
971 svc_close_list(&serv->sv_permsocks, net);
972
973 svc_clear_pools(serv, net);
955 /* 974 /*
956 * At this point the sp_sockets lists will stay empty, since 975 * At this point the sp_sockets lists will stay empty, since
957 * svc_enqueue will not add new entries without taking the 976 * svc_xprt_enqueue will not add new entries without taking the
958 * sp_lock and checking XPT_BUSY. 977 * sp_lock and checking XPT_BUSY.
959 */ 978 */
960 list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list) 979 svc_clear_list(&serv->sv_tempsocks, net);
961 svc_delete_xprt(xprt); 980 svc_clear_list(&serv->sv_permsocks, net);
962 list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
963 svc_delete_xprt(xprt);
964
965 BUG_ON(!list_empty(&serv->sv_permsocks));
966 BUG_ON(!list_empty(&serv->sv_tempsocks));
967} 981}
968 982
969/* 983/*
@@ -1089,6 +1103,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1089 * svc_find_xprt - find an RPC transport instance 1103 * svc_find_xprt - find an RPC transport instance
1090 * @serv: pointer to svc_serv to search 1104 * @serv: pointer to svc_serv to search
1091 * @xcl_name: C string containing transport's class name 1105 * @xcl_name: C string containing transport's class name
1106 * @net: owner net pointer
1092 * @af: Address family of transport's local address 1107 * @af: Address family of transport's local address
1093 * @port: transport's IP port number 1108 * @port: transport's IP port number
1094 * 1109 *
@@ -1101,7 +1116,8 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1101 * service's list that has a matching class name. 1116 * service's list that has a matching class name.
1102 */ 1117 */
1103struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, 1118struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
1104 const sa_family_t af, const unsigned short port) 1119 struct net *net, const sa_family_t af,
1120 const unsigned short port)
1105{ 1121{
1106 struct svc_xprt *xprt; 1122 struct svc_xprt *xprt;
1107 struct svc_xprt *found = NULL; 1123 struct svc_xprt *found = NULL;
@@ -1112,6 +1128,8 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
1112 1128
1113 spin_lock_bh(&serv->sv_lock); 1129 spin_lock_bh(&serv->sv_lock);
1114 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1130 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1131 if (xprt->xpt_net != net)
1132 continue;
1115 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 1133 if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1116 continue; 1134 continue;
1117 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 1135 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 01153ead1dba..2777fa896645 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -14,6 +14,7 @@
14#include <net/sock.h> 14#include <net/sock.h>
15#include <net/ipv6.h> 15#include <net/ipv6.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/user_namespace.h>
17#define RPCDBG_FACILITY RPCDBG_AUTH 18#define RPCDBG_FACILITY RPCDBG_AUTH
18 19
19#include <linux/sunrpc/clnt.h> 20#include <linux/sunrpc/clnt.h>
@@ -211,7 +212,7 @@ static int ip_map_parse(struct cache_detail *cd,
211 len = qword_get(&mesg, buf, mlen); 212 len = qword_get(&mesg, buf, mlen);
212 if (len <= 0) return -EINVAL; 213 if (len <= 0) return -EINVAL;
213 214
214 if (rpc_pton(buf, len, &address.sa, sizeof(address)) == 0) 215 if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0)
215 return -EINVAL; 216 return -EINVAL;
216 switch (address.sa.sa_family) { 217 switch (address.sa.sa_family) {
217 case AF_INET: 218 case AF_INET:
@@ -346,17 +347,12 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
346 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); 347 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
347} 348}
348 349
349 350void svcauth_unix_purge(struct net *net)
350void svcauth_unix_purge(void)
351{ 351{
352 struct net *net; 352 struct sunrpc_net *sn;
353
354 for_each_net(net) {
355 struct sunrpc_net *sn;
356 353
357 sn = net_generic(net, sunrpc_net_id); 354 sn = net_generic(net, sunrpc_net_id);
358 cache_purge(sn->ip_map_cache); 355 cache_purge(sn->ip_map_cache);
359 }
360} 356}
361EXPORT_SYMBOL_GPL(svcauth_unix_purge); 357EXPORT_SYMBOL_GPL(svcauth_unix_purge);
362 358
@@ -436,7 +432,6 @@ struct unix_gid {
436 uid_t uid; 432 uid_t uid;
437 struct group_info *gi; 433 struct group_info *gi;
438}; 434};
439static struct cache_head *gid_table[GID_HASHMAX];
440 435
441static void unix_gid_put(struct kref *kref) 436static void unix_gid_put(struct kref *kref)
442{ 437{
@@ -494,8 +489,7 @@ static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
494 return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request); 489 return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
495} 490}
496 491
497static struct unix_gid *unix_gid_lookup(uid_t uid); 492static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, uid_t uid);
498extern struct cache_detail unix_gid_cache;
499 493
500static int unix_gid_parse(struct cache_detail *cd, 494static int unix_gid_parse(struct cache_detail *cd,
501 char *mesg, int mlen) 495 char *mesg, int mlen)
@@ -509,7 +503,7 @@ static int unix_gid_parse(struct cache_detail *cd,
509 time_t expiry; 503 time_t expiry;
510 struct unix_gid ug, *ugp; 504 struct unix_gid ug, *ugp;
511 505
512 if (mlen <= 0 || mesg[mlen-1] != '\n') 506 if (mesg[mlen - 1] != '\n')
513 return -EINVAL; 507 return -EINVAL;
514 mesg[mlen-1] = 0; 508 mesg[mlen-1] = 0;
515 509
@@ -532,26 +526,30 @@ static int unix_gid_parse(struct cache_detail *cd,
532 526
533 for (i = 0 ; i < gids ; i++) { 527 for (i = 0 ; i < gids ; i++) {
534 int gid; 528 int gid;
529 kgid_t kgid;
535 rv = get_int(&mesg, &gid); 530 rv = get_int(&mesg, &gid);
536 err = -EINVAL; 531 err = -EINVAL;
537 if (rv) 532 if (rv)
538 goto out; 533 goto out;
539 GROUP_AT(ug.gi, i) = gid; 534 kgid = make_kgid(&init_user_ns, gid);
535 if (!gid_valid(kgid))
536 goto out;
537 GROUP_AT(ug.gi, i) = kgid;
540 } 538 }
541 539
542 ugp = unix_gid_lookup(uid); 540 ugp = unix_gid_lookup(cd, uid);
543 if (ugp) { 541 if (ugp) {
544 struct cache_head *ch; 542 struct cache_head *ch;
545 ug.h.flags = 0; 543 ug.h.flags = 0;
546 ug.h.expiry_time = expiry; 544 ug.h.expiry_time = expiry;
547 ch = sunrpc_cache_update(&unix_gid_cache, 545 ch = sunrpc_cache_update(cd,
548 &ug.h, &ugp->h, 546 &ug.h, &ugp->h,
549 hash_long(uid, GID_HASHBITS)); 547 hash_long(uid, GID_HASHBITS));
550 if (!ch) 548 if (!ch)
551 err = -ENOMEM; 549 err = -ENOMEM;
552 else { 550 else {
553 err = 0; 551 err = 0;
554 cache_put(ch, &unix_gid_cache); 552 cache_put(ch, cd);
555 } 553 }
556 } else 554 } else
557 err = -ENOMEM; 555 err = -ENOMEM;
@@ -565,6 +563,7 @@ static int unix_gid_show(struct seq_file *m,
565 struct cache_detail *cd, 563 struct cache_detail *cd,
566 struct cache_head *h) 564 struct cache_head *h)
567{ 565{
566 struct user_namespace *user_ns = current_user_ns();
568 struct unix_gid *ug; 567 struct unix_gid *ug;
569 int i; 568 int i;
570 int glen; 569 int glen;
@@ -582,15 +581,14 @@ static int unix_gid_show(struct seq_file *m,
582 581
583 seq_printf(m, "%u %d:", ug->uid, glen); 582 seq_printf(m, "%u %d:", ug->uid, glen);
584 for (i = 0; i < glen; i++) 583 for (i = 0; i < glen; i++)
585 seq_printf(m, " %d", GROUP_AT(ug->gi, i)); 584 seq_printf(m, " %d", from_kgid_munged(user_ns, GROUP_AT(ug->gi, i)));
586 seq_printf(m, "\n"); 585 seq_printf(m, "\n");
587 return 0; 586 return 0;
588} 587}
589 588
590struct cache_detail unix_gid_cache = { 589static struct cache_detail unix_gid_cache_template = {
591 .owner = THIS_MODULE, 590 .owner = THIS_MODULE,
592 .hash_size = GID_HASHMAX, 591 .hash_size = GID_HASHMAX,
593 .hash_table = gid_table,
594 .name = "auth.unix.gid", 592 .name = "auth.unix.gid",
595 .cache_put = unix_gid_put, 593 .cache_put = unix_gid_put,
596 .cache_upcall = unix_gid_upcall, 594 .cache_upcall = unix_gid_upcall,
@@ -602,14 +600,42 @@ struct cache_detail unix_gid_cache = {
602 .alloc = unix_gid_alloc, 600 .alloc = unix_gid_alloc,
603}; 601};
604 602
605static struct unix_gid *unix_gid_lookup(uid_t uid) 603int unix_gid_cache_create(struct net *net)
604{
605 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
606 struct cache_detail *cd;
607 int err;
608
609 cd = cache_create_net(&unix_gid_cache_template, net);
610 if (IS_ERR(cd))
611 return PTR_ERR(cd);
612 err = cache_register_net(cd, net);
613 if (err) {
614 cache_destroy_net(cd, net);
615 return err;
616 }
617 sn->unix_gid_cache = cd;
618 return 0;
619}
620
621void unix_gid_cache_destroy(struct net *net)
622{
623 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
624 struct cache_detail *cd = sn->unix_gid_cache;
625
626 sn->unix_gid_cache = NULL;
627 cache_purge(cd);
628 cache_unregister_net(cd, net);
629 cache_destroy_net(cd, net);
630}
631
632static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, uid_t uid)
606{ 633{
607 struct unix_gid ug; 634 struct unix_gid ug;
608 struct cache_head *ch; 635 struct cache_head *ch;
609 636
610 ug.uid = uid; 637 ug.uid = uid;
611 ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h, 638 ch = sunrpc_cache_lookup(cd, &ug.h, hash_long(uid, GID_HASHBITS));
612 hash_long(uid, GID_HASHBITS));
613 if (ch) 639 if (ch)
614 return container_of(ch, struct unix_gid, h); 640 return container_of(ch, struct unix_gid, h);
615 else 641 else
@@ -621,11 +647,13 @@ static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
621 struct unix_gid *ug; 647 struct unix_gid *ug;
622 struct group_info *gi; 648 struct group_info *gi;
623 int ret; 649 int ret;
650 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net,
651 sunrpc_net_id);
624 652
625 ug = unix_gid_lookup(uid); 653 ug = unix_gid_lookup(sn->unix_gid_cache, uid);
626 if (!ug) 654 if (!ug)
627 return ERR_PTR(-EAGAIN); 655 return ERR_PTR(-EAGAIN);
628 ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle); 656 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle);
629 switch (ret) { 657 switch (ret) {
630 case -ENOENT: 658 case -ENOENT:
631 return ERR_PTR(-ENOENT); 659 return ERR_PTR(-ENOENT);
@@ -633,7 +661,7 @@ static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
633 return ERR_PTR(-ESHUTDOWN); 661 return ERR_PTR(-ESHUTDOWN);
634 case 0: 662 case 0:
635 gi = get_group_info(ug->gi); 663 gi = get_group_info(ug->gi);
636 cache_put(&ug->h, &unix_gid_cache); 664 cache_put(&ug->h, sn->unix_gid_cache);
637 return gi; 665 return gi;
638 default: 666 default:
639 return ERR_PTR(-EAGAIN); 667 return ERR_PTR(-EAGAIN);
@@ -718,6 +746,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
718 struct svc_cred *cred = &rqstp->rq_cred; 746 struct svc_cred *cred = &rqstp->rq_cred;
719 747
720 cred->cr_group_info = NULL; 748 cred->cr_group_info = NULL;
749 cred->cr_principal = NULL;
721 rqstp->rq_client = NULL; 750 rqstp->rq_client = NULL;
722 751
723 if (argv->iov_len < 3*4) 752 if (argv->iov_len < 3*4)
@@ -745,7 +774,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
745 svc_putnl(resv, RPC_AUTH_NULL); 774 svc_putnl(resv, RPC_AUTH_NULL);
746 svc_putnl(resv, 0); 775 svc_putnl(resv, 0);
747 776
748 rqstp->rq_flavor = RPC_AUTH_NULL; 777 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
749 return SVC_OK; 778 return SVC_OK;
750} 779}
751 780
@@ -783,6 +812,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
783 int len = argv->iov_len; 812 int len = argv->iov_len;
784 813
785 cred->cr_group_info = NULL; 814 cred->cr_group_info = NULL;
815 cred->cr_principal = NULL;
786 rqstp->rq_client = NULL; 816 rqstp->rq_client = NULL;
787 817
788 if ((len -= 3*4) < 0) 818 if ((len -= 3*4) < 0)
@@ -804,8 +834,12 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
804 cred->cr_group_info = groups_alloc(slen); 834 cred->cr_group_info = groups_alloc(slen);
805 if (cred->cr_group_info == NULL) 835 if (cred->cr_group_info == NULL)
806 return SVC_CLOSE; 836 return SVC_CLOSE;
807 for (i = 0; i < slen; i++) 837 for (i = 0; i < slen; i++) {
808 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv); 838 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
839 if (!gid_valid(kgid))
840 goto badcred;
841 GROUP_AT(cred->cr_group_info, i) = kgid;
842 }
809 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 843 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
810 *authp = rpc_autherr_badverf; 844 *authp = rpc_autherr_badverf;
811 return SVC_DENIED; 845 return SVC_DENIED;
@@ -815,7 +849,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
815 svc_putnl(resv, RPC_AUTH_NULL); 849 svc_putnl(resv, RPC_AUTH_NULL);
816 svc_putnl(resv, 0); 850 svc_putnl(resv, 0);
817 851
818 rqstp->rq_flavor = RPC_AUTH_UNIX; 852 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
819 return SVC_OK; 853 return SVC_OK;
820 854
821badcred: 855badcred:
@@ -849,56 +883,45 @@ struct auth_ops svcauth_unix = {
849 .set_client = svcauth_unix_set_client, 883 .set_client = svcauth_unix_set_client,
850}; 884};
851 885
886static struct cache_detail ip_map_cache_template = {
887 .owner = THIS_MODULE,
888 .hash_size = IP_HASHMAX,
889 .name = "auth.unix.ip",
890 .cache_put = ip_map_put,
891 .cache_upcall = ip_map_upcall,
892 .cache_parse = ip_map_parse,
893 .cache_show = ip_map_show,
894 .match = ip_map_match,
895 .init = ip_map_init,
896 .update = update,
897 .alloc = ip_map_alloc,
898};
899
852int ip_map_cache_create(struct net *net) 900int ip_map_cache_create(struct net *net)
853{ 901{
854 int err = -ENOMEM;
855 struct cache_detail *cd;
856 struct cache_head **tbl;
857 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 902 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
903 struct cache_detail *cd;
904 int err;
858 905
859 cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL); 906 cd = cache_create_net(&ip_map_cache_template, net);
860 if (cd == NULL) 907 if (IS_ERR(cd))
861 goto err_cd; 908 return PTR_ERR(cd);
862
863 tbl = kzalloc(IP_HASHMAX * sizeof(struct cache_head *), GFP_KERNEL);
864 if (tbl == NULL)
865 goto err_tbl;
866
867 cd->owner = THIS_MODULE,
868 cd->hash_size = IP_HASHMAX,
869 cd->hash_table = tbl,
870 cd->name = "auth.unix.ip",
871 cd->cache_put = ip_map_put,
872 cd->cache_upcall = ip_map_upcall,
873 cd->cache_parse = ip_map_parse,
874 cd->cache_show = ip_map_show,
875 cd->match = ip_map_match,
876 cd->init = ip_map_init,
877 cd->update = update,
878 cd->alloc = ip_map_alloc,
879
880 err = cache_register_net(cd, net); 909 err = cache_register_net(cd, net);
881 if (err) 910 if (err) {
882 goto err_reg; 911 cache_destroy_net(cd, net);
883 912 return err;
913 }
884 sn->ip_map_cache = cd; 914 sn->ip_map_cache = cd;
885 return 0; 915 return 0;
886
887err_reg:
888 kfree(tbl);
889err_tbl:
890 kfree(cd);
891err_cd:
892 return err;
893} 916}
894 917
895void ip_map_cache_destroy(struct net *net) 918void ip_map_cache_destroy(struct net *net)
896{ 919{
897 struct sunrpc_net *sn; 920 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
921 struct cache_detail *cd = sn->ip_map_cache;
898 922
899 sn = net_generic(net, sunrpc_net_id); 923 sn->ip_map_cache = NULL;
900 cache_purge(sn->ip_map_cache); 924 cache_purge(cd);
901 cache_unregister_net(sn->ip_map_cache, net); 925 cache_unregister_net(cd, net);
902 kfree(sn->ip_map_cache->hash_table); 926 cache_destroy_net(cd, net);
903 kfree(sn->ip_map_cache);
904} 927}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 464570906f80..a6de09de5d21 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
396 int buflen, unsigned int base) 396 int buflen, unsigned int base)
397{ 397{
398 size_t save_iovlen; 398 size_t save_iovlen;
399 void __user *save_iovbase; 399 void *save_iovbase;
400 unsigned int i; 400 unsigned int i;
401 int ret; 401 int ret;
402 402
@@ -617,11 +617,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
617 rqstp->rq_prot = IPPROTO_UDP; 617 rqstp->rq_prot = IPPROTO_UDP;
618 618
619 if (!svc_udp_get_dest_address(rqstp, cmh)) { 619 if (!svc_udp_get_dest_address(rqstp, cmh)) {
620 if (net_ratelimit()) 620 net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
621 printk(KERN_WARNING 621 cmh->cmsg_level, cmh->cmsg_type);
622 "svc: received unknown control message %d/%d; "
623 "dropping RPC reply datagram\n",
624 cmh->cmsg_level, cmh->cmsg_type);
625 skb_free_datagram_locked(svsk->sk_sk, skb); 622 skb_free_datagram_locked(svsk->sk_sk, skb);
626 return 0; 623 return 0;
627 } 624 }
@@ -871,18 +868,17 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
871 if (err == -ENOMEM) 868 if (err == -ENOMEM)
872 printk(KERN_WARNING "%s: no more sockets!\n", 869 printk(KERN_WARNING "%s: no more sockets!\n",
873 serv->sv_name); 870 serv->sv_name);
874 else if (err != -EAGAIN && net_ratelimit()) 871 else if (err != -EAGAIN)
875 printk(KERN_WARNING "%s: accept failed (err %d)!\n", 872 net_warn_ratelimited("%s: accept failed (err %d)!\n",
876 serv->sv_name, -err); 873 serv->sv_name, -err);
877 return NULL; 874 return NULL;
878 } 875 }
879 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); 876 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
880 877
881 err = kernel_getpeername(newsock, sin, &slen); 878 err = kernel_getpeername(newsock, sin, &slen);
882 if (err < 0) { 879 if (err < 0) {
883 if (net_ratelimit()) 880 net_warn_ratelimited("%s: peername failed (err %d)!\n",
884 printk(KERN_WARNING "%s: peername failed (err %d)!\n", 881 serv->sv_name, -err);
885 serv->sv_name, -err);
886 goto failed; /* aborted connection or whatever */ 882 goto failed; /* aborted connection or whatever */
887 } 883 }
888 884
@@ -1012,19 +1008,15 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
1012 * bit set in the fragment length header. 1008 * bit set in the fragment length header.
1013 * But apparently no known nfs clients send fragmented 1009 * But apparently no known nfs clients send fragmented
1014 * records. */ 1010 * records. */
1015 if (net_ratelimit()) 1011 net_notice_ratelimited("RPC: multiple fragments per record not supported\n");
1016 printk(KERN_NOTICE "RPC: multiple fragments "
1017 "per record not supported\n");
1018 goto err_delete; 1012 goto err_delete;
1019 } 1013 }
1020 1014
1021 svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; 1015 svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
1022 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 1016 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1023 if (svsk->sk_reclen > serv->sv_max_mesg) { 1017 if (svsk->sk_reclen > serv->sv_max_mesg) {
1024 if (net_ratelimit()) 1018 net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n",
1025 printk(KERN_NOTICE "RPC: " 1019 (unsigned long)svsk->sk_reclen);
1026 "fragment too large: 0x%08lx\n",
1027 (unsigned long)svsk->sk_reclen);
1028 goto err_delete; 1020 goto err_delete;
1029 } 1021 }
1030 } 1022 }
@@ -1381,8 +1373,6 @@ void svc_sock_update_bufs(struct svc_serv *serv)
1381 spin_lock_bh(&serv->sv_lock); 1373 spin_lock_bh(&serv->sv_lock);
1382 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) 1374 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
1383 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); 1375 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1384 list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list)
1385 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1386 spin_unlock_bh(&serv->sv_lock); 1376 spin_unlock_bh(&serv->sv_lock);
1387} 1377}
1388EXPORT_SYMBOL_GPL(svc_sock_update_bufs); 1378EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
@@ -1409,7 +1399,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1409 1399
1410 /* Register socket with portmapper */ 1400 /* Register socket with portmapper */
1411 if (*errp >= 0 && pmap_register) 1401 if (*errp >= 0 && pmap_register)
1412 *errp = svc_register(serv, inet->sk_family, inet->sk_protocol, 1402 *errp = svc_register(serv, sock_net(sock->sk), inet->sk_family,
1403 inet->sk_protocol,
1413 ntohs(inet_sk(inet)->inet_sport)); 1404 ntohs(inet_sk(inet)->inet_sport));
1414 1405
1415 if (*errp < 0) { 1406 if (*errp < 0) {
@@ -1557,7 +1548,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
1557 (char *)&val, sizeof(val)); 1548 (char *)&val, sizeof(val));
1558 1549
1559 if (type == SOCK_STREAM) 1550 if (type == SOCK_STREAM)
1560 sock->sk->sk_reuse = 1; /* allow address reuse */ 1551 sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
1561 error = kernel_bind(sock, sin, len); 1552 error = kernel_bind(sock, sin, len);
1562 if (error < 0) 1553 if (error < 0)
1563 goto bummer; 1554 goto bummer;
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index e65dcc613339..af7d339add9d 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -20,6 +20,8 @@
20#include <linux/sunrpc/stats.h> 20#include <linux/sunrpc/stats.h>
21#include <linux/sunrpc/svc_xprt.h> 21#include <linux/sunrpc/svc_xprt.h>
22 22
23#include "netns.h"
24
23/* 25/*
24 * Declare the debug flags here 26 * Declare the debug flags here
25 */ 27 */
@@ -110,7 +112,7 @@ proc_dodebug(ctl_table *table, int write,
110 *(unsigned int *) table->data = value; 112 *(unsigned int *) table->data = value;
111 /* Display the RPC tasks on writing to rpc_debug */ 113 /* Display the RPC tasks on writing to rpc_debug */
112 if (strcmp(table->procname, "rpc_debug") == 0) 114 if (strcmp(table->procname, "rpc_debug") == 0)
113 rpc_show_tasks(); 115 rpc_show_tasks(&init_net);
114 } else { 116 } else {
115 if (!access_ok(VERIFY_WRITE, buffer, left)) 117 if (!access_ok(VERIFY_WRITE, buffer, left))
116 return -EFAULT; 118 return -EFAULT;
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c
index dd824341c349..08881d0c9672 100644
--- a/net/sunrpc/timer.c
+++ b/net/sunrpc/timer.c
@@ -34,7 +34,7 @@
34void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) 34void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
35{ 35{
36 unsigned long init = 0; 36 unsigned long init = 0;
37 unsigned i; 37 unsigned int i;
38 38
39 rt->timeo = timeo; 39 rt->timeo = timeo;
40 40
@@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(rpc_init_rtt);
57 * NB: When computing the smoothed RTT and standard deviation, 57 * NB: When computing the smoothed RTT and standard deviation,
58 * be careful not to produce negative intermediate results. 58 * be careful not to produce negative intermediate results.
59 */ 59 */
60void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m) 60void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m)
61{ 61{
62 long *srtt, *sdrtt; 62 long *srtt, *sdrtt;
63 63
@@ -106,7 +106,7 @@ EXPORT_SYMBOL_GPL(rpc_update_rtt);
106 * read, write, commit - A+4D 106 * read, write, commit - A+4D
107 * other - timeo 107 * other - timeo
108 */ 108 */
109unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer) 109unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer)
110{ 110{
111 unsigned long res; 111 unsigned long res;
112 112
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 593f4c605305..fddcccfcdf76 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -122,9 +122,9 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122{ 122{
123 char *kaddr; 123 char *kaddr;
124 124
125 kaddr = kmap_atomic(buf->pages[0], KM_USER0); 125 kaddr = kmap_atomic(buf->pages[0]);
126 kaddr[buf->page_base + len] = '\0'; 126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr, KM_USER0); 127 kunmap_atomic(kaddr);
128} 128}
129EXPORT_SYMBOL_GPL(xdr_terminate_string); 129EXPORT_SYMBOL_GPL(xdr_terminate_string);
130 130
@@ -232,12 +232,12 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
232 pgto_base -= copy; 232 pgto_base -= copy;
233 pgfrom_base -= copy; 233 pgfrom_base -= copy;
234 234
235 vto = kmap_atomic(*pgto, KM_USER0); 235 vto = kmap_atomic(*pgto);
236 vfrom = kmap_atomic(*pgfrom, KM_USER1); 236 vfrom = kmap_atomic(*pgfrom);
237 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 237 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
238 flush_dcache_page(*pgto); 238 flush_dcache_page(*pgto);
239 kunmap_atomic(vfrom, KM_USER1); 239 kunmap_atomic(vfrom);
240 kunmap_atomic(vto, KM_USER0); 240 kunmap_atomic(vto);
241 241
242 } while ((len -= copy) != 0); 242 } while ((len -= copy) != 0);
243} 243}
@@ -267,9 +267,9 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
267 if (copy > len) 267 if (copy > len)
268 copy = len; 268 copy = len;
269 269
270 vto = kmap_atomic(*pgto, KM_USER0); 270 vto = kmap_atomic(*pgto);
271 memcpy(vto + pgbase, p, copy); 271 memcpy(vto + pgbase, p, copy);
272 kunmap_atomic(vto, KM_USER0); 272 kunmap_atomic(vto);
273 273
274 len -= copy; 274 len -= copy;
275 if (len == 0) 275 if (len == 0)
@@ -311,9 +311,9 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
311 if (copy > len) 311 if (copy > len)
312 copy = len; 312 copy = len;
313 313
314 vfrom = kmap_atomic(*pgfrom, KM_USER0); 314 vfrom = kmap_atomic(*pgfrom);
315 memcpy(p, vfrom + pgbase, copy); 315 memcpy(p, vfrom + pgbase, copy);
316 kunmap_atomic(vfrom, KM_USER0); 316 kunmap_atomic(vfrom);
317 317
318 pgbase += copy; 318 pgbase += copy;
319 if (pgbase == PAGE_CACHE_SIZE) { 319 if (pgbase == PAGE_CACHE_SIZE) {
@@ -1204,7 +1204,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1204 int (*actor)(struct scatterlist *, void *), void *data) 1204 int (*actor)(struct scatterlist *, void *), void *data)
1205{ 1205{
1206 int i, ret = 0; 1206 int i, ret = 0;
1207 unsigned page_len, thislen, page_offset; 1207 unsigned int page_len, thislen, page_offset;
1208 struct scatterlist sg[1]; 1208 struct scatterlist sg[1];
1209 1209
1210 sg_init_table(sg, 1); 1210 sg_init_table(sg, 1);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index c64c0ef519b5..3c83035cdaa9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -66,6 +66,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net);
66static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); 66static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
67static void xprt_connect_status(struct rpc_task *task); 67static void xprt_connect_status(struct rpc_task *task);
68static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 68static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69static void xprt_destroy(struct rpc_xprt *xprt);
69 70
70static DEFINE_SPINLOCK(xprt_list_lock); 71static DEFINE_SPINLOCK(xprt_list_lock);
71static LIST_HEAD(xprt_list); 72static LIST_HEAD(xprt_list);
@@ -292,54 +293,57 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
292 return retval; 293 return retval;
293} 294}
294 295
295static void __xprt_lock_write_next(struct rpc_xprt *xprt) 296static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
296{ 297{
297 struct rpc_task *task; 298 struct rpc_xprt *xprt = data;
298 struct rpc_rqst *req; 299 struct rpc_rqst *req;
299 300
300 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
301 return;
302
303 task = rpc_wake_up_next(&xprt->sending);
304 if (task == NULL)
305 goto out_unlock;
306
307 req = task->tk_rqstp; 301 req = task->tk_rqstp;
308 xprt->snd_task = task; 302 xprt->snd_task = task;
309 if (req) { 303 if (req) {
310 req->rq_bytes_sent = 0; 304 req->rq_bytes_sent = 0;
311 req->rq_ntrans++; 305 req->rq_ntrans++;
312 } 306 }
313 return; 307 return true;
308}
314 309
315out_unlock: 310static void __xprt_lock_write_next(struct rpc_xprt *xprt)
311{
312 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
313 return;
314
315 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
316 return;
316 xprt_clear_locked(xprt); 317 xprt_clear_locked(xprt);
317} 318}
318 319
319static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 320static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
320{ 321{
321 struct rpc_task *task; 322 struct rpc_xprt *xprt = data;
322 struct rpc_rqst *req; 323 struct rpc_rqst *req;
323 324
324 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
325 return;
326 if (RPCXPRT_CONGESTED(xprt))
327 goto out_unlock;
328 task = rpc_wake_up_next(&xprt->sending);
329 if (task == NULL)
330 goto out_unlock;
331
332 req = task->tk_rqstp; 325 req = task->tk_rqstp;
333 if (req == NULL) { 326 if (req == NULL) {
334 xprt->snd_task = task; 327 xprt->snd_task = task;
335 return; 328 return true;
336 } 329 }
337 if (__xprt_get_cong(xprt, task)) { 330 if (__xprt_get_cong(xprt, task)) {
338 xprt->snd_task = task; 331 xprt->snd_task = task;
339 req->rq_bytes_sent = 0; 332 req->rq_bytes_sent = 0;
340 req->rq_ntrans++; 333 req->rq_ntrans++;
341 return; 334 return true;
342 } 335 }
336 return false;
337}
338
339static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
340{
341 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
342 return;
343 if (RPCXPRT_CONGESTED(xprt))
344 goto out_unlock;
345 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
346 return;
343out_unlock: 347out_unlock:
344 xprt_clear_locked(xprt); 348 xprt_clear_locked(xprt);
345} 349}
@@ -712,9 +716,7 @@ void xprt_connect(struct rpc_task *task)
712 if (xprt_connected(xprt)) 716 if (xprt_connected(xprt))
713 xprt_release_write(xprt, task); 717 xprt_release_write(xprt, task);
714 else { 718 else {
715 if (task->tk_rqstp) 719 task->tk_rqstp->rq_bytes_sent = 0;
716 task->tk_rqstp->rq_bytes_sent = 0;
717
718 task->tk_timeout = task->tk_rqstp->rq_timeout; 720 task->tk_timeout = task->tk_rqstp->rq_timeout;
719 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); 721 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
720 722
@@ -750,7 +752,7 @@ static void xprt_connect_status(struct rpc_task *task)
750 default: 752 default:
751 dprintk("RPC: %5u xprt_connect_status: error %d connecting to " 753 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
752 "server %s\n", task->tk_pid, -task->tk_status, 754 "server %s\n", task->tk_pid, -task->tk_status,
753 task->tk_client->cl_server); 755 xprt->servername);
754 xprt_release_write(xprt, task); 756 xprt_release_write(xprt, task);
755 task->tk_status = -EIO; 757 task->tk_status = -EIO;
756 } 758 }
@@ -781,7 +783,7 @@ static void xprt_update_rtt(struct rpc_task *task)
781{ 783{
782 struct rpc_rqst *req = task->tk_rqstp; 784 struct rpc_rqst *req = task->tk_rqstp;
783 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 785 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
784 unsigned timer = task->tk_msg.rpc_proc->p_timer; 786 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
785 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 787 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
786 788
787 if (timer) { 789 if (timer) {
@@ -884,7 +886,7 @@ void xprt_transmit(struct rpc_task *task)
884{ 886{
885 struct rpc_rqst *req = task->tk_rqstp; 887 struct rpc_rqst *req = task->tk_rqstp;
886 struct rpc_xprt *xprt = req->rq_xprt; 888 struct rpc_xprt *xprt = req->rq_xprt;
887 int status; 889 int status, numreqs;
888 890
889 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 891 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
890 892
@@ -921,9 +923,14 @@ void xprt_transmit(struct rpc_task *task)
921 923
922 xprt->ops->set_retrans_timeout(task); 924 xprt->ops->set_retrans_timeout(task);
923 925
926 numreqs = atomic_read(&xprt->num_reqs);
927 if (numreqs > xprt->stat.max_slots)
928 xprt->stat.max_slots = numreqs;
924 xprt->stat.sends++; 929 xprt->stat.sends++;
925 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; 930 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
926 xprt->stat.bklog_u += xprt->backlog.qlen; 931 xprt->stat.bklog_u += xprt->backlog.qlen;
932 xprt->stat.sending_u += xprt->sending.qlen;
933 xprt->stat.pending_u += xprt->pending.qlen;
927 934
928 /* Don't race with disconnect */ 935 /* Don't race with disconnect */
929 if (!xprt_connected(xprt)) 936 if (!xprt_connected(xprt))
@@ -972,20 +979,21 @@ static void xprt_alloc_slot(struct rpc_task *task)
972 list_del(&req->rq_list); 979 list_del(&req->rq_list);
973 goto out_init_req; 980 goto out_init_req;
974 } 981 }
975 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT); 982 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
976 if (!IS_ERR(req)) 983 if (!IS_ERR(req))
977 goto out_init_req; 984 goto out_init_req;
978 switch (PTR_ERR(req)) { 985 switch (PTR_ERR(req)) {
979 case -ENOMEM: 986 case -ENOMEM:
980 rpc_delay(task, HZ >> 2);
981 dprintk("RPC: dynamic allocation of request slot " 987 dprintk("RPC: dynamic allocation of request slot "
982 "failed! Retrying\n"); 988 "failed! Retrying\n");
989 task->tk_status = -ENOMEM;
983 break; 990 break;
984 case -EAGAIN: 991 case -EAGAIN:
985 rpc_sleep_on(&xprt->backlog, task, NULL); 992 rpc_sleep_on(&xprt->backlog, task, NULL);
986 dprintk("RPC: waiting for request slot\n"); 993 dprintk("RPC: waiting for request slot\n");
994 default:
995 task->tk_status = -EAGAIN;
987 } 996 }
988 task->tk_status = -EAGAIN;
989 return; 997 return;
990out_init_req: 998out_init_req:
991 task->tk_status = 0; 999 task->tk_status = 0;
@@ -1131,7 +1139,10 @@ void xprt_release(struct rpc_task *task)
1131 return; 1139 return;
1132 1140
1133 xprt = req->rq_xprt; 1141 xprt = req->rq_xprt;
1134 rpc_count_iostats(task); 1142 if (task->tk_ops->rpc_count_stats != NULL)
1143 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1144 else if (task->tk_client)
1145 rpc_count_iostats(task, task->tk_client->cl_metrics);
1135 spin_lock_bh(&xprt->transport_lock); 1146 spin_lock_bh(&xprt->transport_lock);
1136 xprt->ops->release_xprt(xprt, task); 1147 xprt->ops->release_xprt(xprt, task);
1137 if (xprt->ops->release_request) 1148 if (xprt->ops->release_request)
@@ -1220,6 +1231,17 @@ found:
1220 (unsigned long)xprt); 1231 (unsigned long)xprt);
1221 else 1232 else
1222 init_timer(&xprt->timer); 1233 init_timer(&xprt->timer);
1234
1235 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1236 xprt_destroy(xprt);
1237 return ERR_PTR(-EINVAL);
1238 }
1239 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1240 if (xprt->servername == NULL) {
1241 xprt_destroy(xprt);
1242 return ERR_PTR(-ENOMEM);
1243 }
1244
1223 dprintk("RPC: created transport %p with %u slots\n", xprt, 1245 dprintk("RPC: created transport %p with %u slots\n", xprt,
1224 xprt->max_reqs); 1246 xprt->max_reqs);
1225out: 1247out:
@@ -1242,6 +1264,7 @@ static void xprt_destroy(struct rpc_xprt *xprt)
1242 rpc_destroy_wait_queue(&xprt->sending); 1264 rpc_destroy_wait_queue(&xprt->sending);
1243 rpc_destroy_wait_queue(&xprt->backlog); 1265 rpc_destroy_wait_queue(&xprt->backlog);
1244 cancel_work_sync(&xprt->task_cleanup); 1266 cancel_work_sync(&xprt->task_cleanup);
1267 kfree(xprt->servername);
1245 /* 1268 /*
1246 * Tear down transport state and free the rpc_xprt 1269 * Tear down transport state and free the rpc_xprt
1247 */ 1270 */
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 554d0814c875..558fbab574f0 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -338,9 +338,9 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
338 curlen = copy_len; 338 curlen = copy_len;
339 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", 339 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
340 __func__, i, destp, copy_len, curlen); 340 __func__, i, destp, copy_len, curlen);
341 srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA); 341 srcp = kmap_atomic(ppages[i]);
342 memcpy(destp, srcp+page_base, curlen); 342 memcpy(destp, srcp+page_base, curlen);
343 kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA); 343 kunmap_atomic(srcp);
344 rqst->rq_svec[0].iov_len += curlen; 344 rqst->rq_svec[0].iov_len += curlen;
345 destp += curlen; 345 destp += curlen;
346 copy_len -= curlen; 346 copy_len -= curlen;
@@ -639,10 +639,10 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
639 dprintk("RPC: %s: page %d" 639 dprintk("RPC: %s: page %d"
640 " srcp 0x%p len %d curlen %d\n", 640 " srcp 0x%p len %d curlen %d\n",
641 __func__, i, srcp, copy_len, curlen); 641 __func__, i, srcp, copy_len, curlen);
642 destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA); 642 destp = kmap_atomic(ppages[i]);
643 memcpy(destp + page_base, srcp, curlen); 643 memcpy(destp + page_base, srcp, curlen);
644 flush_dcache_page(ppages[i]); 644 flush_dcache_page(ppages[i]);
645 kunmap_atomic(destp, KM_SKB_SUNRPC_DATA); 645 kunmap_atomic(destp);
646 srcp += curlen; 646 srcp += curlen;
647 copy_len -= curlen; 647 copy_len -= curlen;
648 if (copy_len == 0) 648 if (copy_len == 0)
@@ -771,13 +771,18 @@ repost:
771 771
772 /* get request object */ 772 /* get request object */
773 req = rpcr_to_rdmar(rqst); 773 req = rpcr_to_rdmar(rqst);
774 if (req->rl_reply) {
775 spin_unlock(&xprt->transport_lock);
776 dprintk("RPC: %s: duplicate reply 0x%p to RPC "
777 "request 0x%p: xid 0x%08x\n", __func__, rep, req,
778 headerp->rm_xid);
779 goto repost;
780 }
774 781
775 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" 782 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
776 " RPC request 0x%p xid 0x%08x\n", 783 " RPC request 0x%p xid 0x%08x\n",
777 __func__, rep, req, rqst, headerp->rm_xid); 784 __func__, rep, req, rqst, headerp->rm_xid);
778 785
779 BUG_ON(!req || req->rl_reply);
780
781 /* from here on, the reply is no longer an orphan */ 786 /* from here on, the reply is no longer an orphan */
782 req->rl_reply = rep; 787 req->rl_reply = rep;
783 788
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 09af4fab1a45..8343737e85f4 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -47,6 +47,7 @@
47#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/sched.h> 48#include <linux/sunrpc/sched.h>
49#include <linux/sunrpc/svc_rdma.h> 49#include <linux/sunrpc/svc_rdma.h>
50#include "xprt_rdma.h"
50 51
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT 52#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52 53
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 9530ef2d40dc..8d2edddf48cf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; 60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
61 61
62 while (ch->rc_discrim != xdr_zero) { 62 while (ch->rc_discrim != xdr_zero) {
63 u64 ch_offset;
64
65 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) > 63 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
66 (unsigned long)vaend) { 64 (unsigned long)vaend) {
67 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch); 65 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
68 return NULL; 66 return NULL;
69 } 67 }
70
71 ch->rc_discrim = ntohl(ch->rc_discrim);
72 ch->rc_position = ntohl(ch->rc_position);
73 ch->rc_target.rs_handle = ntohl(ch->rc_target.rs_handle);
74 ch->rc_target.rs_length = ntohl(ch->rc_target.rs_length);
75 va = (u32 *)&ch->rc_target.rs_offset;
76 xdr_decode_hyper(va, &ch_offset);
77 put_unaligned(ch_offset, (u64 *)va);
78 ch++; 68 ch++;
79 } 69 }
80 return (u32 *)&ch->rc_position; 70 return (u32 *)&ch->rc_position;
@@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
91 *byte_count = 0; 81 *byte_count = 0;
92 *ch_count = 0; 82 *ch_count = 0;
93 for (; ch->rc_discrim != 0; ch++) { 83 for (; ch->rc_discrim != 0; ch++) {
94 *byte_count = *byte_count + ch->rc_target.rs_length; 84 *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
95 *ch_count = *ch_count + 1; 85 *ch_count = *ch_count + 1;
96 } 86 }
97} 87}
@@ -108,7 +98,8 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
108 */ 98 */
109static u32 *decode_write_list(u32 *va, u32 *vaend) 99static u32 *decode_write_list(u32 *va, u32 *vaend)
110{ 100{
111 int ch_no; 101 int nchunks;
102
112 struct rpcrdma_write_array *ary = 103 struct rpcrdma_write_array *ary =
113 (struct rpcrdma_write_array *)va; 104 (struct rpcrdma_write_array *)va;
114 105
@@ -121,37 +112,24 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
121 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); 112 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
122 return NULL; 113 return NULL;
123 } 114 }
124 ary->wc_discrim = ntohl(ary->wc_discrim); 115 nchunks = ntohl(ary->wc_nchunks);
125 ary->wc_nchunks = ntohl(ary->wc_nchunks);
126 if (((unsigned long)&ary->wc_array[0] + 116 if (((unsigned long)&ary->wc_array[0] +
127 (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > 117 (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
128 (unsigned long)vaend) { 118 (unsigned long)vaend) {
129 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", 119 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
130 ary, ary->wc_nchunks, vaend); 120 ary, nchunks, vaend);
131 return NULL; 121 return NULL;
132 } 122 }
133 for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
134 u64 ch_offset;
135
136 ary->wc_array[ch_no].wc_target.rs_handle =
137 ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
138 ary->wc_array[ch_no].wc_target.rs_length =
139 ntohl(ary->wc_array[ch_no].wc_target.rs_length);
140 va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
141 xdr_decode_hyper(va, &ch_offset);
142 put_unaligned(ch_offset, (u64 *)va);
143 }
144
145 /* 123 /*
146 * rs_length is the 2nd 4B field in wc_target and taking its 124 * rs_length is the 2nd 4B field in wc_target and taking its
147 * address skips the list terminator 125 * address skips the list terminator
148 */ 126 */
149 return (u32 *)&ary->wc_array[ch_no].wc_target.rs_length; 127 return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length;
150} 128}
151 129
152static u32 *decode_reply_array(u32 *va, u32 *vaend) 130static u32 *decode_reply_array(u32 *va, u32 *vaend)
153{ 131{
154 int ch_no; 132 int nchunks;
155 struct rpcrdma_write_array *ary = 133 struct rpcrdma_write_array *ary =
156 (struct rpcrdma_write_array *)va; 134 (struct rpcrdma_write_array *)va;
157 135
@@ -164,28 +142,15 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
164 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); 142 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
165 return NULL; 143 return NULL;
166 } 144 }
167 ary->wc_discrim = ntohl(ary->wc_discrim); 145 nchunks = ntohl(ary->wc_nchunks);
168 ary->wc_nchunks = ntohl(ary->wc_nchunks);
169 if (((unsigned long)&ary->wc_array[0] + 146 if (((unsigned long)&ary->wc_array[0] +
170 (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > 147 (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
171 (unsigned long)vaend) { 148 (unsigned long)vaend) {
172 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", 149 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
173 ary, ary->wc_nchunks, vaend); 150 ary, nchunks, vaend);
174 return NULL; 151 return NULL;
175 } 152 }
176 for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) { 153 return (u32 *)&ary->wc_array[nchunks];
177 u64 ch_offset;
178
179 ary->wc_array[ch_no].wc_target.rs_handle =
180 ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
181 ary->wc_array[ch_no].wc_target.rs_length =
182 ntohl(ary->wc_array[ch_no].wc_target.rs_length);
183 va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
184 xdr_decode_hyper(va, &ch_offset);
185 put_unaligned(ch_offset, (u64 *)va);
186 }
187
188 return (u32 *)&ary->wc_array[ch_no];
189} 154}
190 155
191int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req, 156int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
@@ -386,13 +351,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
386 351
387void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, 352void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
388 int chunk_no, 353 int chunk_no,
389 u32 rs_handle, u64 rs_offset, 354 __be32 rs_handle,
355 __be64 rs_offset,
390 u32 write_len) 356 u32 write_len)
391{ 357{
392 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target; 358 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
393 seg->rs_handle = htonl(rs_handle); 359 seg->rs_handle = rs_handle;
360 seg->rs_offset = rs_offset;
394 seg->rs_length = htonl(write_len); 361 seg->rs_length = htonl(write_len);
395 xdr_encode_hyper((u32 *) &seg->rs_offset, rs_offset);
396} 362}
397 363
398void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt, 364void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index df67211c4baf..41cb63b623df 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
147 page_off = 0; 147 page_off = 0;
148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
149 ch_no = 0; 149 ch_no = 0;
150 ch_bytes = ch->rc_target.rs_length; 150 ch_bytes = ntohl(ch->rc_target.rs_length);
151 head->arg.head[0] = rqstp->rq_arg.head[0]; 151 head->arg.head[0] = rqstp->rq_arg.head[0];
152 head->arg.tail[0] = rqstp->rq_arg.tail[0]; 152 head->arg.tail[0] = rqstp->rq_arg.tail[0];
153 head->arg.pages = &head->pages[head->count]; 153 head->arg.pages = &head->pages[head->count];
@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
183 ch_no++; 183 ch_no++;
184 ch++; 184 ch++;
185 chl_map->ch[ch_no].start = sge_no; 185 chl_map->ch[ch_no].start = sge_no;
186 ch_bytes = ch->rc_target.rs_length; 186 ch_bytes = ntohl(ch->rc_target.rs_length);
187 /* If bytes remaining account for next chunk */ 187 /* If bytes remaining account for next chunk */
188 if (byte_count) { 188 if (byte_count) {
189 head->arg.page_len += ch_bytes; 189 head->arg.page_len += ch_bytes;
@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
281 offset = 0; 281 offset = 0;
282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
283 for (ch_no = 0; ch_no < ch_count; ch_no++) { 283 for (ch_no = 0; ch_no < ch_count; ch_no++) {
284 int len = ntohl(ch->rc_target.rs_length);
284 rpl_map->sge[ch_no].iov_base = frmr->kva + offset; 285 rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
285 rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length; 286 rpl_map->sge[ch_no].iov_len = len;
286 chl_map->ch[ch_no].count = 1; 287 chl_map->ch[ch_no].count = 1;
287 chl_map->ch[ch_no].start = ch_no; 288 chl_map->ch[ch_no].start = ch_no;
288 offset += ch->rc_target.rs_length; 289 offset += len;
289 ch++; 290 ch++;
290 } 291 }
291 292
@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
316 for (i = 0; i < count; i++) { 317 for (i = 0; i < count; i++) {
317 ctxt->sge[i].length = 0; /* in case map fails */ 318 ctxt->sge[i].length = 0; /* in case map fails */
318 if (!frmr) { 319 if (!frmr) {
319 BUG_ON(0 == virt_to_page(vec[i].iov_base)); 320 BUG_ON(!virt_to_page(vec[i].iov_base));
320 off = (unsigned long)vec[i].iov_base & ~PAGE_MASK; 321 off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
321 ctxt->sge[i].addr = 322 ctxt->sge[i].addr =
322 ib_dma_map_page(xprt->sc_cm_id->device, 323 ib_dma_map_page(xprt->sc_cm_id->device,
@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
426 427
427 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 428 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
428 ch->rc_discrim != 0; ch++, ch_no++) { 429 ch->rc_discrim != 0; ch++, ch_no++) {
430 u64 rs_offset;
429next_sge: 431next_sge:
430 ctxt = svc_rdma_get_context(xprt); 432 ctxt = svc_rdma_get_context(xprt);
431 ctxt->direction = DMA_FROM_DEVICE; 433 ctxt->direction = DMA_FROM_DEVICE;
@@ -440,10 +442,10 @@ next_sge:
440 read_wr.opcode = IB_WR_RDMA_READ; 442 read_wr.opcode = IB_WR_RDMA_READ;
441 ctxt->wr_op = read_wr.opcode; 443 ctxt->wr_op = read_wr.opcode;
442 read_wr.send_flags = IB_SEND_SIGNALED; 444 read_wr.send_flags = IB_SEND_SIGNALED;
443 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle; 445 read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
444 read_wr.wr.rdma.remote_addr = 446 xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
445 get_unaligned(&(ch->rc_target.rs_offset)) + 447 &rs_offset);
446 sgl_offset; 448 read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
447 read_wr.sg_list = ctxt->sge; 449 read_wr.sg_list = ctxt->sge;
448 read_wr.num_sge = 450 read_wr.num_sge =
449 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count); 451 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 249a835b703f..42eb7ba0b903 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -409,21 +409,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
409 u64 rs_offset; 409 u64 rs_offset;
410 410
411 arg_ch = &arg_ary->wc_array[chunk_no].wc_target; 411 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
412 write_len = min(xfer_len, arg_ch->rs_length); 412 write_len = min(xfer_len, ntohl(arg_ch->rs_length));
413 413
414 /* Prepare the response chunk given the length actually 414 /* Prepare the response chunk given the length actually
415 * written */ 415 * written */
416 rs_offset = get_unaligned(&(arg_ch->rs_offset)); 416 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
417 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, 417 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
418 arg_ch->rs_handle, 418 arg_ch->rs_handle,
419 rs_offset, 419 arg_ch->rs_offset,
420 write_len); 420 write_len);
421 chunk_off = 0; 421 chunk_off = 0;
422 while (write_len) { 422 while (write_len) {
423 int this_write; 423 int this_write;
424 this_write = min(write_len, max_write); 424 this_write = min(write_len, max_write);
425 ret = send_write(xprt, rqstp, 425 ret = send_write(xprt, rqstp,
426 arg_ch->rs_handle, 426 ntohl(arg_ch->rs_handle),
427 rs_offset + chunk_off, 427 rs_offset + chunk_off,
428 xdr_off, 428 xdr_off,
429 this_write, 429 this_write,
@@ -457,6 +457,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
457 u32 xdr_off; 457 u32 xdr_off;
458 int chunk_no; 458 int chunk_no;
459 int chunk_off; 459 int chunk_off;
460 int nchunks;
460 struct rpcrdma_segment *ch; 461 struct rpcrdma_segment *ch;
461 struct rpcrdma_write_array *arg_ary; 462 struct rpcrdma_write_array *arg_ary;
462 struct rpcrdma_write_array *res_ary; 463 struct rpcrdma_write_array *res_ary;
@@ -476,26 +477,27 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
476 max_write = xprt->sc_max_sge * PAGE_SIZE; 477 max_write = xprt->sc_max_sge * PAGE_SIZE;
477 478
478 /* xdr offset starts at RPC message */ 479 /* xdr offset starts at RPC message */
480 nchunks = ntohl(arg_ary->wc_nchunks);
479 for (xdr_off = 0, chunk_no = 0; 481 for (xdr_off = 0, chunk_no = 0;
480 xfer_len && chunk_no < arg_ary->wc_nchunks; 482 xfer_len && chunk_no < nchunks;
481 chunk_no++) { 483 chunk_no++) {
482 u64 rs_offset; 484 u64 rs_offset;
483 ch = &arg_ary->wc_array[chunk_no].wc_target; 485 ch = &arg_ary->wc_array[chunk_no].wc_target;
484 write_len = min(xfer_len, ch->rs_length); 486 write_len = min(xfer_len, htonl(ch->rs_length));
485 487
486 /* Prepare the reply chunk given the length actually 488 /* Prepare the reply chunk given the length actually
487 * written */ 489 * written */
488 rs_offset = get_unaligned(&(ch->rs_offset)); 490 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
489 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, 491 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
490 ch->rs_handle, rs_offset, 492 ch->rs_handle, ch->rs_offset,
491 write_len); 493 write_len);
492 chunk_off = 0; 494 chunk_off = 0;
493 while (write_len) { 495 while (write_len) {
494 int this_write; 496 int this_write;
495 497
496 this_write = min(write_len, max_write); 498 this_write = min(write_len, max_write);
497 ret = send_write(xprt, rqstp, 499 ret = send_write(xprt, rqstp,
498 ch->rs_handle, 500 ntohl(ch->rs_handle),
499 rs_offset + chunk_off, 501 rs_offset + chunk_off,
500 xdr_off, 502 xdr_off,
501 this_write, 503 this_write,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 894cb42db91d..73b428bef598 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -51,6 +51,7 @@
51#include <rdma/rdma_cm.h> 51#include <rdma/rdma_cm.h>
52#include <linux/sunrpc/svc_rdma.h> 52#include <linux/sunrpc/svc_rdma.h>
53#include <linux/export.h> 53#include <linux/export.h>
54#include "xprt_rdma.h"
54 55
55#define RPCDBG_FACILITY RPCDBG_SVCXPRT 56#define RPCDBG_FACILITY RPCDBG_SVCXPRT
56 57
@@ -90,12 +91,6 @@ struct svc_xprt_class svc_rdma_class = {
90 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, 91 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
91}; 92};
92 93
93/* WR context cache. Created in svc_rdma.c */
94extern struct kmem_cache *svc_rdma_ctxt_cachep;
95
96/* Workqueue created in svc_rdma.c */
97extern struct workqueue_struct *svc_rdma_wq;
98
99struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) 94struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
100{ 95{
101 struct svc_rdma_op_ctxt *ctxt; 96 struct svc_rdma_op_ctxt *ctxt;
@@ -150,9 +145,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
150 atomic_dec(&xprt->sc_ctxt_used); 145 atomic_dec(&xprt->sc_ctxt_used);
151} 146}
152 147
153/* Temporary NFS request map cache. Created in svc_rdma.c */
154extern struct kmem_cache *svc_rdma_map_cachep;
155
156/* 148/*
157 * Temporary NFS req mappings are shared across all transport 149 * Temporary NFS req mappings are shared across all transport
158 * instances. These are short lived and should be bounded by the number 150 * instances. These are short lived and should be bounded by the number
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 28236bab57f9..745973b729af 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1490,6 +1490,9 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1490 u8 key; 1490 u8 key;
1491 int len, pageoff; 1491 int len, pageoff;
1492 int i, rc; 1492 int i, rc;
1493 int seg_len;
1494 u64 pa;
1495 int page_no;
1493 1496
1494 pageoff = offset_in_page(seg1->mr_offset); 1497 pageoff = offset_in_page(seg1->mr_offset);
1495 seg1->mr_offset -= pageoff; /* start of page */ 1498 seg1->mr_offset -= pageoff; /* start of page */
@@ -1497,11 +1500,15 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1497 len = -pageoff; 1500 len = -pageoff;
1498 if (*nsegs > RPCRDMA_MAX_DATA_SEGS) 1501 if (*nsegs > RPCRDMA_MAX_DATA_SEGS)
1499 *nsegs = RPCRDMA_MAX_DATA_SEGS; 1502 *nsegs = RPCRDMA_MAX_DATA_SEGS;
1500 for (i = 0; i < *nsegs;) { 1503 for (page_no = i = 0; i < *nsegs;) {
1501 rpcrdma_map_one(ia, seg, writing); 1504 rpcrdma_map_one(ia, seg, writing);
1502 seg1->mr_chunk.rl_mw->r.frmr.fr_pgl->page_list[i] = seg->mr_dma; 1505 pa = seg->mr_dma;
1506 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
1507 seg1->mr_chunk.rl_mw->r.frmr.fr_pgl->
1508 page_list[page_no++] = pa;
1509 pa += PAGE_SIZE;
1510 }
1503 len += seg->mr_len; 1511 len += seg->mr_len;
1504 BUG_ON(seg->mr_len > PAGE_SIZE);
1505 ++seg; 1512 ++seg;
1506 ++i; 1513 ++i;
1507 /* Check for holes */ 1514 /* Check for holes */
@@ -1540,9 +1547,9 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1540 frmr_wr.send_flags = IB_SEND_SIGNALED; 1547 frmr_wr.send_flags = IB_SEND_SIGNALED;
1541 frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma; 1548 frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
1542 frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl; 1549 frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
1543 frmr_wr.wr.fast_reg.page_list_len = i; 1550 frmr_wr.wr.fast_reg.page_list_len = page_no;
1544 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 1551 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1545 frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT; 1552 frmr_wr.wr.fast_reg.length = page_no << PAGE_SHIFT;
1546 BUG_ON(frmr_wr.wr.fast_reg.length < len); 1553 BUG_ON(frmr_wr.wr.fast_reg.length < len);
1547 frmr_wr.wr.fast_reg.access_flags = (writing ? 1554 frmr_wr.wr.fast_reg.access_flags = (writing ?
1548 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : 1555 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 08c5d5a128fc..9a66c95b5837 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -343,4 +343,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
343 */ 343 */
344int rpcrdma_marshal_req(struct rpc_rqst *); 344int rpcrdma_marshal_req(struct rpc_rqst *);
345 345
346/* Temporary NFS request map cache. Created in svc_rdma.c */
347extern struct kmem_cache *svc_rdma_map_cachep;
348/* WR context cache. Created in svc_rdma.c */
349extern struct kmem_cache *svc_rdma_ctxt_cachep;
350/* Workqueue created in svc_rdma.c */
351extern struct workqueue_struct *svc_rdma_wq;
352
346#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ 353#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 55472c48825e..890b03f8d877 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -53,12 +53,12 @@ static void xs_close(struct rpc_xprt *xprt);
53/* 53/*
54 * xprtsock tunables 54 * xprtsock tunables
55 */ 55 */
56unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 56static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
57unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; 57static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
58unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; 58static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
59 59
60unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 60static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
61unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 61static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
62 62
63#define XS_TCP_LINGER_TO (15U * HZ) 63#define XS_TCP_LINGER_TO (15U * HZ)
64static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; 64static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
@@ -2227,7 +2227,7 @@ static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2227 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2227 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2228 2228
2229 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu " 2229 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2230 "%llu %llu\n", 2230 "%llu %llu %lu %llu %llu\n",
2231 xprt->stat.bind_count, 2231 xprt->stat.bind_count,
2232 xprt->stat.connect_count, 2232 xprt->stat.connect_count,
2233 xprt->stat.connect_time, 2233 xprt->stat.connect_time,
@@ -2236,7 +2236,10 @@ static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2236 xprt->stat.recvs, 2236 xprt->stat.recvs,
2237 xprt->stat.bad_xids, 2237 xprt->stat.bad_xids,
2238 xprt->stat.req_u, 2238 xprt->stat.req_u,
2239 xprt->stat.bklog_u); 2239 xprt->stat.bklog_u,
2240 xprt->stat.max_slots,
2241 xprt->stat.sending_u,
2242 xprt->stat.pending_u);
2240} 2243}
2241 2244
2242/** 2245/**
@@ -2249,14 +2252,18 @@ static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2249{ 2252{
2250 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2253 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2251 2254
2252 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", 2255 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2256 "%lu %llu %llu\n",
2253 transport->srcport, 2257 transport->srcport,
2254 xprt->stat.bind_count, 2258 xprt->stat.bind_count,
2255 xprt->stat.sends, 2259 xprt->stat.sends,
2256 xprt->stat.recvs, 2260 xprt->stat.recvs,
2257 xprt->stat.bad_xids, 2261 xprt->stat.bad_xids,
2258 xprt->stat.req_u, 2262 xprt->stat.req_u,
2259 xprt->stat.bklog_u); 2263 xprt->stat.bklog_u,
2264 xprt->stat.max_slots,
2265 xprt->stat.sending_u,
2266 xprt->stat.pending_u);
2260} 2267}
2261 2268
2262/** 2269/**
@@ -2273,7 +2280,8 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2273 if (xprt_connected(xprt)) 2280 if (xprt_connected(xprt))
2274 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2281 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2275 2282
2276 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", 2283 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2284 "%llu %llu %lu %llu %llu\n",
2277 transport->srcport, 2285 transport->srcport,
2278 xprt->stat.bind_count, 2286 xprt->stat.bind_count,
2279 xprt->stat.connect_count, 2287 xprt->stat.connect_count,
@@ -2283,7 +2291,10 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2283 xprt->stat.recvs, 2291 xprt->stat.recvs,
2284 xprt->stat.bad_xids, 2292 xprt->stat.bad_xids,
2285 xprt->stat.req_u, 2293 xprt->stat.req_u,
2286 xprt->stat.bklog_u); 2294 xprt->stat.bklog_u,
2295 xprt->stat.max_slots,
2296 xprt->stat.sending_u,
2297 xprt->stat.pending_u);
2287} 2298}
2288 2299
2289/* 2300/*
@@ -2464,6 +2475,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2464static struct rpc_xprt_ops bc_tcp_ops = { 2475static struct rpc_xprt_ops bc_tcp_ops = {
2465 .reserve_xprt = xprt_reserve_xprt, 2476 .reserve_xprt = xprt_reserve_xprt,
2466 .release_xprt = xprt_release_xprt, 2477 .release_xprt = xprt_release_xprt,
2478 .rpcbind = xs_local_rpcbind,
2467 .buf_alloc = bc_malloc, 2479 .buf_alloc = bc_malloc,
2468 .buf_free = bc_free, 2480 .buf_free = bc_free,
2469 .send_request = bc_send_request, 2481 .send_request = bc_send_request,
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index e75813904f26..e3a6e37cd1c5 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -26,10 +26,6 @@
26#include <linux/if_ether.h> 26#include <linux/if_ether.h>
27#endif 27#endif
28 28
29#ifdef CONFIG_TR
30#include <linux/if_tr.h>
31#endif
32
33static struct ctl_table_set * 29static struct ctl_table_set *
34net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces) 30net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces)
35{ 31{
@@ -59,30 +55,15 @@ static struct ctl_table_root net_sysctl_root = {
59 .permissions = net_ctl_permissions, 55 .permissions = net_ctl_permissions,
60}; 56};
61 57
62static int net_ctl_ro_header_perms(struct ctl_table_root *root,
63 struct nsproxy *namespaces, struct ctl_table *table)
64{
65 if (net_eq(namespaces->net_ns, &init_net))
66 return table->mode;
67 else
68 return table->mode & ~0222;
69}
70
71static struct ctl_table_root net_sysctl_ro_root = {
72 .permissions = net_ctl_ro_header_perms,
73};
74
75static int __net_init sysctl_net_init(struct net *net) 58static int __net_init sysctl_net_init(struct net *net)
76{ 59{
77 setup_sysctl_set(&net->sysctls, 60 setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen);
78 &net_sysctl_ro_root.default_set,
79 is_seen);
80 return 0; 61 return 0;
81} 62}
82 63
83static void __net_exit sysctl_net_exit(struct net *net) 64static void __net_exit sysctl_net_exit(struct net *net)
84{ 65{
85 WARN_ON(!list_empty(&net->sysctls.list)); 66 retire_sysctl_set(&net->sysctls);
86} 67}
87 68
88static struct pernet_operations sysctl_pernet_ops = { 69static struct pernet_operations sysctl_pernet_ops = {
@@ -90,38 +71,32 @@ static struct pernet_operations sysctl_pernet_ops = {
90 .exit = sysctl_net_exit, 71 .exit = sysctl_net_exit,
91}; 72};
92 73
93static __init int sysctl_init(void) 74static struct ctl_table_header *net_header;
75__init int net_sysctl_init(void)
94{ 76{
95 int ret; 77 static struct ctl_table empty[1];
78 int ret = -ENOMEM;
79 /* Avoid limitations in the sysctl implementation by
80 * registering "/proc/sys/net" as an empty directory not in a
81 * network namespace.
82 */
83 net_header = register_sysctl("net", empty);
84 if (!net_header)
85 goto out;
96 ret = register_pernet_subsys(&sysctl_pernet_ops); 86 ret = register_pernet_subsys(&sysctl_pernet_ops);
97 if (ret) 87 if (ret)
98 goto out; 88 goto out;
99 register_sysctl_root(&net_sysctl_root); 89 register_sysctl_root(&net_sysctl_root);
100 setup_sysctl_set(&net_sysctl_ro_root.default_set, NULL, NULL);
101 register_sysctl_root(&net_sysctl_ro_root);
102out: 90out:
103 return ret; 91 return ret;
104} 92}
105subsys_initcall(sysctl_init);
106
107struct ctl_table_header *register_net_sysctl_table(struct net *net,
108 const struct ctl_path *path, struct ctl_table *table)
109{
110 struct nsproxy namespaces;
111 namespaces = *current->nsproxy;
112 namespaces.net_ns = net;
113 return __register_sysctl_paths(&net_sysctl_root,
114 &namespaces, path, table);
115}
116EXPORT_SYMBOL_GPL(register_net_sysctl_table);
117 93
118struct ctl_table_header *register_net_sysctl_rotable(const 94struct ctl_table_header *register_net_sysctl(struct net *net,
119 struct ctl_path *path, struct ctl_table *table) 95 const char *path, struct ctl_table *table)
120{ 96{
121 return __register_sysctl_paths(&net_sysctl_ro_root, 97 return __register_sysctl_table(&net->sysctls, path, table);
122 &init_nsproxy, path, table);
123} 98}
124EXPORT_SYMBOL_GPL(register_net_sysctl_rotable); 99EXPORT_SYMBOL_GPL(register_net_sysctl);
125 100
126void unregister_net_sysctl_table(struct ctl_table_header *header) 101void unregister_net_sysctl_table(struct ctl_table_header *header)
127{ 102{
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 521d24d04ab2..6cd55d671d3a 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -9,5 +9,3 @@ tipc-y += addr.o bcast.o bearer.o config.o \
9 name_distr.o subscr.o name_table.o net.o \ 9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o node_subscr.o port.o ref.o \ 10 netlink.o node.o node_subscr.o port.o ref.o \
11 socket.o log.o eth_media.o 11 socket.o log.o eth_media.o
12
13# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index a6fdab33877e..357b74b26f9e 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -45,7 +45,6 @@
45 * 45 *
46 * Returns 1 if domain address is valid, otherwise 0 46 * Returns 1 if domain address is valid, otherwise 0
47 */ 47 */
48
49int tipc_addr_domain_valid(u32 addr) 48int tipc_addr_domain_valid(u32 addr)
50{ 49{
51 u32 n = tipc_node(addr); 50 u32 n = tipc_node(addr);
@@ -66,7 +65,6 @@ int tipc_addr_domain_valid(u32 addr)
66 * 65 *
67 * Returns 1 if address can be used, otherwise 0 66 * Returns 1 if address can be used, otherwise 0
68 */ 67 */
69
70int tipc_addr_node_valid(u32 addr) 68int tipc_addr_node_valid(u32 addr)
71{ 69{
72 return tipc_addr_domain_valid(addr) && tipc_node(addr); 70 return tipc_addr_domain_valid(addr) && tipc_node(addr);
@@ -86,7 +84,6 @@ int tipc_in_scope(u32 domain, u32 addr)
86/** 84/**
87 * tipc_addr_scope - convert message lookup domain to a 2-bit scope value 85 * tipc_addr_scope - convert message lookup domain to a 2-bit scope value
88 */ 86 */
89
90int tipc_addr_scope(u32 domain) 87int tipc_addr_scope(u32 domain)
91{ 88{
92 if (likely(!domain)) 89 if (likely(!domain))
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index e4f35afe3207..60b00ab93d74 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -50,18 +50,33 @@ static inline u32 tipc_cluster_mask(u32 addr)
50 return addr & TIPC_CLUSTER_MASK; 50 return addr & TIPC_CLUSTER_MASK;
51} 51}
52 52
53static inline int in_own_cluster(u32 addr) 53static inline int in_own_cluster_exact(u32 addr)
54{ 54{
55 return !((addr ^ tipc_own_addr) >> 12); 55 return !((addr ^ tipc_own_addr) >> 12);
56} 56}
57 57
58/** 58/**
59 * in_own_node - test for node inclusion; <0.0.0> always matches
60 */
61static inline int in_own_node(u32 addr)
62{
63 return (addr == tipc_own_addr) || !addr;
64}
65
66/**
67 * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
68 */
69static inline int in_own_cluster(u32 addr)
70{
71 return in_own_cluster_exact(addr) || !addr;
72}
73
74/**
59 * addr_domain - convert 2-bit scope value to equivalent message lookup domain 75 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
60 * 76 *
61 * Needed when address of a named message must be looked up a second time 77 * Needed when address of a named message must be looked up a second time
62 * after a network hop. 78 * after a network hop.
63 */ 79 */
64
65static inline u32 addr_domain(u32 sc) 80static inline u32 addr_domain(u32 sc)
66{ 81{
67 if (likely(sc == TIPC_NODE_SCOPE)) 82 if (likely(sc == TIPC_NODE_SCOPE))
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 8eb87b11d100..2625f5ebe3e8 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -73,7 +73,6 @@ struct tipc_bcbearer_pair {
73 * large local variables within multicast routines. Concurrent access is 73 * large local variables within multicast routines. Concurrent access is
74 * prevented through use of the spinlock "bc_lock". 74 * prevented through use of the spinlock "bc_lock".
75 */ 75 */
76
77struct tipc_bcbearer { 76struct tipc_bcbearer {
78 struct tipc_bearer bearer; 77 struct tipc_bearer bearer;
79 struct tipc_media media; 78 struct tipc_media media;
@@ -92,7 +91,6 @@ struct tipc_bcbearer {
92 * 91 *
93 * Handles sequence numbering, fragmentation, bundling, etc. 92 * Handles sequence numbering, fragmentation, bundling, etc.
94 */ 93 */
95
96struct tipc_bclink { 94struct tipc_bclink {
97 struct tipc_link link; 95 struct tipc_link link;
98 struct tipc_node node; 96 struct tipc_node node;
@@ -157,44 +155,18 @@ u32 tipc_bclink_get_last_sent(void)
157 return bcl->fsm_msg_cnt; 155 return bcl->fsm_msg_cnt;
158} 156}
159 157
160/** 158static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
161 * bclink_set_gap - set gap according to contents of current deferred pkt queue
162 *
163 * Called with 'node' locked, bc_lock unlocked
164 */
165
166static void bclink_set_gap(struct tipc_node *n_ptr)
167{
168 struct sk_buff *buf = n_ptr->bclink.deferred_head;
169
170 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
171 mod(n_ptr->bclink.last_in);
172 if (unlikely(buf != NULL))
173 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
174}
175
176/**
177 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
178 *
179 * This mechanism endeavours to prevent all nodes in network from trying
180 * to ACK or NACK at the same time.
181 *
182 * Note: TIPC uses a different trigger to distribute ACKs than it does to
183 * distribute NACKs, but tries to use the same spacing (divide by 16).
184 */
185
186static int bclink_ack_allowed(u32 n)
187{ 159{
188 return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag; 160 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
161 seqno : node->bclink.last_sent;
189} 162}
190 163
191 164
192/** 165/*
193 * tipc_bclink_retransmit_to - get most recent node to request retransmission 166 * tipc_bclink_retransmit_to - get most recent node to request retransmission
194 * 167 *
195 * Called with bc_lock locked 168 * Called with bc_lock locked
196 */ 169 */
197
198struct tipc_node *tipc_bclink_retransmit_to(void) 170struct tipc_node *tipc_bclink_retransmit_to(void)
199{ 171{
200 return bclink->retransmit_to; 172 return bclink->retransmit_to;
@@ -207,7 +179,6 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
207 * 179 *
208 * Called with bc_lock locked 180 * Called with bc_lock locked
209 */ 181 */
210
211static void bclink_retransmit_pkt(u32 after, u32 to) 182static void bclink_retransmit_pkt(u32 after, u32 to)
212{ 183{
213 struct sk_buff *buf; 184 struct sk_buff *buf;
@@ -225,7 +196,6 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
225 * 196 *
226 * Node is locked, bc_lock unlocked. 197 * Node is locked, bc_lock unlocked.
227 */ 198 */
228
229void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 199void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
230{ 200{
231 struct sk_buff *crs; 201 struct sk_buff *crs;
@@ -281,7 +251,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
281 if (bcbuf_acks(crs) == 0) { 251 if (bcbuf_acks(crs) == 0) {
282 bcl->first_out = next; 252 bcl->first_out = next;
283 bcl->out_queue_size--; 253 bcl->out_queue_size--;
284 buf_discard(crs); 254 kfree_skb(crs);
285 released = 1; 255 released = 1;
286 } 256 }
287 crs = next; 257 crs = next;
@@ -300,143 +270,94 @@ exit:
300 spin_unlock_bh(&bc_lock); 270 spin_unlock_bh(&bc_lock);
301} 271}
302 272
303/** 273/*
304 * bclink_send_ack - unicast an ACK msg 274 * tipc_bclink_update_link_state - update broadcast link state
305 * 275 *
306 * tipc_net_lock and node lock set 276 * tipc_net_lock and node lock set
307 */ 277 */
308 278void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
309static void bclink_send_ack(struct tipc_node *n_ptr)
310{ 279{
311 struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 280 struct sk_buff *buf;
312 281
313 if (l_ptr != NULL) 282 /* Ignore "stale" link state info */
314 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
315}
316 283
317/** 284 if (less_eq(last_sent, n_ptr->bclink.last_in))
318 * bclink_send_nack- broadcast a NACK msg 285 return;
319 *
320 * tipc_net_lock and node lock set
321 */
322 286
323static void bclink_send_nack(struct tipc_node *n_ptr) 287 /* Update link synchronization state; quit if in sync */
324{ 288
325 struct sk_buff *buf; 289 bclink_update_last_sent(n_ptr, last_sent);
326 struct tipc_msg *msg; 290
291 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
292 return;
293
294 /* Update out-of-sync state; quit if loss is still unconfirmed */
327 295
328 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to)) 296 if ((++n_ptr->bclink.oos_state) == 1) {
297 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
298 return;
299 n_ptr->bclink.oos_state++;
300 }
301
302 /* Don't NACK if one has been recently sent (or seen) */
303
304 if (n_ptr->bclink.oos_state & 0x1)
329 return; 305 return;
330 306
307 /* Send NACK */
308
331 buf = tipc_buf_acquire(INT_H_SIZE); 309 buf = tipc_buf_acquire(INT_H_SIZE);
332 if (buf) { 310 if (buf) {
333 msg = buf_msg(buf); 311 struct tipc_msg *msg = buf_msg(buf);
312
334 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 313 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
335 INT_H_SIZE, n_ptr->addr); 314 INT_H_SIZE, n_ptr->addr);
336 msg_set_non_seq(msg, 1); 315 msg_set_non_seq(msg, 1);
337 msg_set_mc_netid(msg, tipc_net_id); 316 msg_set_mc_netid(msg, tipc_net_id);
338 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 317 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
339 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 318 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
340 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 319 msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
341 msg_set_bcast_tag(msg, tipc_own_tag); 320 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
321 : n_ptr->bclink.last_sent);
342 322
323 spin_lock_bh(&bc_lock);
343 tipc_bearer_send(&bcbearer->bearer, buf, NULL); 324 tipc_bearer_send(&bcbearer->bearer, buf, NULL);
344 bcl->stats.sent_nacks++; 325 bcl->stats.sent_nacks++;
345 buf_discard(buf); 326 spin_unlock_bh(&bc_lock);
346 327 kfree_skb(buf);
347 /*
348 * Ensure we doesn't send another NACK msg to the node
349 * until 16 more deferred messages arrive from it
350 * (i.e. helps prevent all nodes from NACK'ing at same time)
351 */
352 328
353 n_ptr->bclink.nack_sync = tipc_own_tag; 329 n_ptr->bclink.oos_state++;
354 } 330 }
355} 331}
356 332
357/** 333/*
358 * tipc_bclink_check_gap - send a NACK if a sequence gap exists 334 * bclink_peek_nack - monitor retransmission requests sent by other nodes
359 * 335 *
360 * tipc_net_lock and node lock set 336 * Delay any upcoming NACK by this node if another node has already
361 */ 337 * requested the first message this node is going to ask for.
362
363void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
364{
365 if (!n_ptr->bclink.supported ||
366 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
367 return;
368
369 bclink_set_gap(n_ptr);
370 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
371 n_ptr->bclink.gap_to = last_sent;
372 bclink_send_nack(n_ptr);
373}
374
375/**
376 * tipc_bclink_peek_nack - process a NACK msg meant for another node
377 * 338 *
378 * Only tipc_net_lock set. 339 * Only tipc_net_lock set.
379 */ 340 */
380 341static void bclink_peek_nack(struct tipc_msg *msg)
381static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
382{ 342{
383 struct tipc_node *n_ptr = tipc_node_find(dest); 343 struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
384 u32 my_after, my_to;
385 344
386 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) 345 if (unlikely(!n_ptr))
387 return; 346 return;
347
388 tipc_node_lock(n_ptr); 348 tipc_node_lock(n_ptr);
389 /*
390 * Modify gap to suppress unnecessary NACKs from this node
391 */
392 my_after = n_ptr->bclink.gap_after;
393 my_to = n_ptr->bclink.gap_to;
394
395 if (less_eq(gap_after, my_after)) {
396 if (less(my_after, gap_to) && less(gap_to, my_to))
397 n_ptr->bclink.gap_after = gap_to;
398 else if (less_eq(my_to, gap_to))
399 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
400 } else if (less_eq(gap_after, my_to)) {
401 if (less_eq(my_to, gap_to))
402 n_ptr->bclink.gap_to = gap_after;
403 } else {
404 /*
405 * Expand gap if missing bufs not in deferred queue:
406 */
407 struct sk_buff *buf = n_ptr->bclink.deferred_head;
408 u32 prev = n_ptr->bclink.gap_to;
409 349
410 for (; buf; buf = buf->next) { 350 if (n_ptr->bclink.supported &&
411 u32 seqno = buf_seqno(buf); 351 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
352 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
353 n_ptr->bclink.oos_state = 2;
412 354
413 if (mod(seqno - prev) != 1) {
414 buf = NULL;
415 break;
416 }
417 if (seqno == gap_after)
418 break;
419 prev = seqno;
420 }
421 if (buf == NULL)
422 n_ptr->bclink.gap_to = gap_after;
423 }
424 /*
425 * Some nodes may send a complementary NACK now:
426 */
427 if (bclink_ack_allowed(sender_tag + 1)) {
428 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
429 bclink_send_nack(n_ptr);
430 bclink_set_gap(n_ptr);
431 }
432 }
433 tipc_node_unlock(n_ptr); 355 tipc_node_unlock(n_ptr);
434} 356}
435 357
436/** 358/*
437 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster 359 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
438 */ 360 */
439
440int tipc_bclink_send_msg(struct sk_buff *buf) 361int tipc_bclink_send_msg(struct sk_buff *buf)
441{ 362{
442 int res; 363 int res;
@@ -445,7 +366,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
445 366
446 if (!bclink->bcast_nodes.count) { 367 if (!bclink->bcast_nodes.count) {
447 res = msg_data_sz(buf_msg(buf)); 368 res = msg_data_sz(buf_msg(buf));
448 buf_discard(buf); 369 kfree_skb(buf);
449 goto exit; 370 goto exit;
450 } 371 }
451 372
@@ -460,19 +381,43 @@ exit:
460 return res; 381 return res;
461} 382}
462 383
463/** 384/*
385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
386 *
387 * Called with both sending node's lock and bc_lock taken.
388 */
389static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
390{
391 bclink_update_last_sent(node, seqno);
392 node->bclink.last_in = seqno;
393 node->bclink.oos_state = 0;
394 bcl->stats.recv_info++;
395
396 /*
397 * Unicast an ACK periodically, ensuring that
398 * all nodes in the cluster don't ACK at the same time
399 */
400
401 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
402 tipc_link_send_proto_msg(
403 node->active_links[node->addr & 1],
404 STATE_MSG, 0, 0, 0, 0, 0);
405 bcl->stats.sent_acks++;
406 }
407}
408
409/*
464 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
465 * 411 *
466 * tipc_net_lock is read_locked, no other locks set 412 * tipc_net_lock is read_locked, no other locks set
467 */ 413 */
468
469void tipc_bclink_recv_pkt(struct sk_buff *buf) 414void tipc_bclink_recv_pkt(struct sk_buff *buf)
470{ 415{
471 struct tipc_msg *msg = buf_msg(buf); 416 struct tipc_msg *msg = buf_msg(buf);
472 struct tipc_node *node; 417 struct tipc_node *node;
473 u32 next_in; 418 u32 next_in;
474 u32 seqno; 419 u32 seqno;
475 struct sk_buff *deferred; 420 int deferred;
476 421
477 /* Screen out unwanted broadcast messages */ 422 /* Screen out unwanted broadcast messages */
478 423
@@ -487,6 +432,8 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
487 if (unlikely(!node->bclink.supported)) 432 if (unlikely(!node->bclink.supported))
488 goto unlock; 433 goto unlock;
489 434
435 /* Handle broadcast protocol message */
436
490 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 437 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
491 if (msg_type(msg) != STATE_MSG) 438 if (msg_type(msg) != STATE_MSG)
492 goto unlock; 439 goto unlock;
@@ -501,89 +448,118 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
501 spin_unlock_bh(&bc_lock); 448 spin_unlock_bh(&bc_lock);
502 } else { 449 } else {
503 tipc_node_unlock(node); 450 tipc_node_unlock(node);
504 tipc_bclink_peek_nack(msg_destnode(msg), 451 bclink_peek_nack(msg);
505 msg_bcast_tag(msg),
506 msg_bcgap_after(msg),
507 msg_bcgap_to(msg));
508 } 452 }
509 goto exit; 453 goto exit;
510 } 454 }
511 455
512 /* Handle in-sequence broadcast message */ 456 /* Handle in-sequence broadcast message */
513 457
514receive:
515 next_in = mod(node->bclink.last_in + 1);
516 seqno = msg_seqno(msg); 458 seqno = msg_seqno(msg);
459 next_in = mod(node->bclink.last_in + 1);
517 460
518 if (likely(seqno == next_in)) { 461 if (likely(seqno == next_in)) {
519 bcl->stats.recv_info++; 462receive:
520 node->bclink.last_in++; 463 /* Deliver message to destination */
521 bclink_set_gap(node); 464
522 if (unlikely(bclink_ack_allowed(seqno))) {
523 bclink_send_ack(node);
524 bcl->stats.sent_acks++;
525 }
526 if (likely(msg_isdata(msg))) { 465 if (likely(msg_isdata(msg))) {
466 spin_lock_bh(&bc_lock);
467 bclink_accept_pkt(node, seqno);
468 spin_unlock_bh(&bc_lock);
527 tipc_node_unlock(node); 469 tipc_node_unlock(node);
528 if (likely(msg_mcast(msg))) 470 if (likely(msg_mcast(msg)))
529 tipc_port_recv_mcast(buf, NULL); 471 tipc_port_recv_mcast(buf, NULL);
530 else 472 else
531 buf_discard(buf); 473 kfree_skb(buf);
532 } else if (msg_user(msg) == MSG_BUNDLER) { 474 } else if (msg_user(msg) == MSG_BUNDLER) {
475 spin_lock_bh(&bc_lock);
476 bclink_accept_pkt(node, seqno);
533 bcl->stats.recv_bundles++; 477 bcl->stats.recv_bundles++;
534 bcl->stats.recv_bundled += msg_msgcnt(msg); 478 bcl->stats.recv_bundled += msg_msgcnt(msg);
479 spin_unlock_bh(&bc_lock);
535 tipc_node_unlock(node); 480 tipc_node_unlock(node);
536 tipc_link_recv_bundle(buf); 481 tipc_link_recv_bundle(buf);
537 } else if (msg_user(msg) == MSG_FRAGMENTER) { 482 } else if (msg_user(msg) == MSG_FRAGMENTER) {
483 int ret = tipc_link_recv_fragment(&node->bclink.defragm,
484 &buf, &msg);
485 if (ret < 0)
486 goto unlock;
487 spin_lock_bh(&bc_lock);
488 bclink_accept_pkt(node, seqno);
538 bcl->stats.recv_fragments++; 489 bcl->stats.recv_fragments++;
539 if (tipc_link_recv_fragment(&node->bclink.defragm, 490 if (ret > 0)
540 &buf, &msg))
541 bcl->stats.recv_fragmented++; 491 bcl->stats.recv_fragmented++;
492 spin_unlock_bh(&bc_lock);
542 tipc_node_unlock(node); 493 tipc_node_unlock(node);
543 tipc_net_route_msg(buf); 494 tipc_net_route_msg(buf);
544 } else if (msg_user(msg) == NAME_DISTRIBUTOR) { 495 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
496 spin_lock_bh(&bc_lock);
497 bclink_accept_pkt(node, seqno);
498 spin_unlock_bh(&bc_lock);
545 tipc_node_unlock(node); 499 tipc_node_unlock(node);
546 tipc_named_recv(buf); 500 tipc_named_recv(buf);
547 } else { 501 } else {
502 spin_lock_bh(&bc_lock);
503 bclink_accept_pkt(node, seqno);
504 spin_unlock_bh(&bc_lock);
548 tipc_node_unlock(node); 505 tipc_node_unlock(node);
549 buf_discard(buf); 506 kfree_skb(buf);
550 } 507 }
551 buf = NULL; 508 buf = NULL;
509
510 /* Determine new synchronization state */
511
552 tipc_node_lock(node); 512 tipc_node_lock(node);
553 deferred = node->bclink.deferred_head; 513 if (unlikely(!tipc_node_is_up(node)))
554 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 514 goto unlock;
555 buf = deferred; 515
556 msg = buf_msg(buf); 516 if (node->bclink.last_in == node->bclink.last_sent)
557 node->bclink.deferred_head = deferred->next; 517 goto unlock;
558 goto receive; 518
559 } 519 if (!node->bclink.deferred_head) {
560 } else if (less(next_in, seqno)) { 520 node->bclink.oos_state = 1;
561 u32 gap_after = node->bclink.gap_after; 521 goto unlock;
562 u32 gap_to = node->bclink.gap_to;
563
564 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
565 &node->bclink.deferred_tail,
566 buf)) {
567 node->bclink.nack_sync++;
568 bcl->stats.deferred_recv++;
569 if (seqno == mod(gap_after + 1))
570 node->bclink.gap_after = seqno;
571 else if (less(gap_after, seqno) && less(seqno, gap_to))
572 node->bclink.gap_to = seqno;
573 } 522 }
523
524 msg = buf_msg(node->bclink.deferred_head);
525 seqno = msg_seqno(msg);
526 next_in = mod(next_in + 1);
527 if (seqno != next_in)
528 goto unlock;
529
530 /* Take in-sequence message from deferred queue & deliver it */
531
532 buf = node->bclink.deferred_head;
533 node->bclink.deferred_head = buf->next;
534 node->bclink.deferred_size--;
535 goto receive;
536 }
537
538 /* Handle out-of-sequence broadcast message */
539
540 if (less(next_in, seqno)) {
541 deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
542 &node->bclink.deferred_tail,
543 buf);
544 node->bclink.deferred_size += deferred;
545 bclink_update_last_sent(node, seqno);
574 buf = NULL; 546 buf = NULL;
575 if (bclink_ack_allowed(node->bclink.nack_sync)) { 547 } else
576 if (gap_to != gap_after) 548 deferred = 0;
577 bclink_send_nack(node); 549
578 bclink_set_gap(node); 550 spin_lock_bh(&bc_lock);
579 } 551
580 } else { 552 if (deferred)
553 bcl->stats.deferred_recv++;
554 else
581 bcl->stats.duplicates++; 555 bcl->stats.duplicates++;
582 } 556
557 spin_unlock_bh(&bc_lock);
558
583unlock: 559unlock:
584 tipc_node_unlock(node); 560 tipc_node_unlock(node);
585exit: 561exit:
586 buf_discard(buf); 562 kfree_skb(buf);
587} 563}
588 564
589u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 565u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
@@ -602,7 +578,6 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
602 * Returns 0 (packet sent successfully) under all circumstances, 578 * Returns 0 (packet sent successfully) under all circumstances,
603 * since the broadcast link's pseudo-bearer never blocks 579 * since the broadcast link's pseudo-bearer never blocks
604 */ 580 */
605
606static int tipc_bcbearer_send(struct sk_buff *buf, 581static int tipc_bcbearer_send(struct sk_buff *buf,
607 struct tipc_bearer *unused1, 582 struct tipc_bearer *unused1,
608 struct tipc_media_addr *unused2) 583 struct tipc_media_addr *unused2)
@@ -615,7 +590,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
615 * preparation is skipped for broadcast link protocol messages 590 * preparation is skipped for broadcast link protocol messages
616 * since they are sent in an unreliable manner and don't need it 591 * since they are sent in an unreliable manner and don't need it
617 */ 592 */
618
619 if (likely(!msg_non_seq(buf_msg(buf)))) { 593 if (likely(!msg_non_seq(buf_msg(buf)))) {
620 struct tipc_msg *msg; 594 struct tipc_msg *msg;
621 595
@@ -632,7 +606,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
632 } 606 }
633 607
634 /* Send buffer over bearers until all targets reached */ 608 /* Send buffer over bearers until all targets reached */
635
636 bcbearer->remains = bclink->bcast_nodes; 609 bcbearer->remains = bclink->bcast_nodes;
637 610
638 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 611 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
@@ -674,7 +647,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
674/** 647/**
675 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 648 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
676 */ 649 */
677
678void tipc_bcbearer_sort(void) 650void tipc_bcbearer_sort(void)
679{ 651{
680 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 652 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
@@ -685,7 +657,6 @@ void tipc_bcbearer_sort(void)
685 spin_lock_bh(&bc_lock); 657 spin_lock_bh(&bc_lock);
686 658
687 /* Group bearers by priority (can assume max of two per priority) */ 659 /* Group bearers by priority (can assume max of two per priority) */
688
689 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 660 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
690 661
691 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 662 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
@@ -701,7 +672,6 @@ void tipc_bcbearer_sort(void)
701 } 672 }
702 673
703 /* Create array of bearer pairs for broadcasting */ 674 /* Create array of bearer pairs for broadcasting */
704
705 bp_curr = bcbearer->bpairs; 675 bp_curr = bcbearer->bpairs;
706 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); 676 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
707 677
@@ -831,7 +801,6 @@ void tipc_bclink_stop(void)
831/** 801/**
832 * tipc_nmap_add - add a node to a node map 802 * tipc_nmap_add - add a node to a node map
833 */ 803 */
834
835void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 804void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
836{ 805{
837 int n = tipc_node(node); 806 int n = tipc_node(node);
@@ -847,7 +816,6 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
847/** 816/**
848 * tipc_nmap_remove - remove a node from a node map 817 * tipc_nmap_remove - remove a node from a node map
849 */ 818 */
850
851void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 819void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
852{ 820{
853 int n = tipc_node(node); 821 int n = tipc_node(node);
@@ -866,7 +834,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
866 * @nm_b: input node map B 834 * @nm_b: input node map B
867 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 835 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
868 */ 836 */
869
870static void tipc_nmap_diff(struct tipc_node_map *nm_a, 837static void tipc_nmap_diff(struct tipc_node_map *nm_a,
871 struct tipc_node_map *nm_b, 838 struct tipc_node_map *nm_b,
872 struct tipc_node_map *nm_diff) 839 struct tipc_node_map *nm_diff)
@@ -892,7 +859,6 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
892/** 859/**
893 * tipc_port_list_add - add a port to a port list, ensuring no duplicates 860 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
894 */ 861 */
895
896void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) 862void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
897{ 863{
898 struct tipc_port_list *item = pl_ptr; 864 struct tipc_port_list *item = pl_ptr;
@@ -926,7 +892,6 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
926 * tipc_port_list_free - free dynamically created entries in port_list chain 892 * tipc_port_list_free - free dynamically created entries in port_list chain
927 * 893 *
928 */ 894 */
929
930void tipc_port_list_free(struct tipc_port_list *pl_ptr) 895void tipc_port_list_free(struct tipc_port_list *pl_ptr)
931{ 896{
932 struct tipc_port_list *item; 897 struct tipc_port_list *item;
@@ -937,4 +902,3 @@ void tipc_port_list_free(struct tipc_port_list *pl_ptr)
937 kfree(item); 902 kfree(item);
938 } 903 }
939} 904}
940
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index b009666c60b0..a93306557e00 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -45,7 +45,6 @@
45 * @count: # of nodes in set 45 * @count: # of nodes in set
46 * @map: bitmap of node identifiers that are in the set 46 * @map: bitmap of node identifiers that are in the set
47 */ 47 */
48
49struct tipc_node_map { 48struct tipc_node_map {
50 u32 count; 49 u32 count;
51 u32 map[MAX_NODES / WSIZE]; 50 u32 map[MAX_NODES / WSIZE];
@@ -59,7 +58,6 @@ struct tipc_node_map {
59 * @next: pointer to next entry in list 58 * @next: pointer to next entry in list
60 * @ports: array of port references 59 * @ports: array of port references
61 */ 60 */
62
63struct tipc_port_list { 61struct tipc_port_list {
64 int count; 62 int count;
65 struct tipc_port_list *next; 63 struct tipc_port_list *next;
@@ -77,7 +75,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
77/** 75/**
78 * tipc_nmap_equal - test for equality of node maps 76 * tipc_nmap_equal - test for equality of node maps
79 */ 77 */
80
81static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b) 78static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b)
82{ 79{
83 return !memcmp(nm_a, nm_b, sizeof(*nm_a)); 80 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
@@ -96,7 +93,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf);
96void tipc_bclink_recv_pkt(struct sk_buff *buf); 93void tipc_bclink_recv_pkt(struct sk_buff *buf);
97u32 tipc_bclink_get_last_sent(void); 94u32 tipc_bclink_get_last_sent(void);
98u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr); 95u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
99void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 seqno); 96void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
100int tipc_bclink_stats(char *stats_buf, const u32 buf_size); 97int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
101int tipc_bclink_reset_stats(void); 98int tipc_bclink_reset_stats(void);
102int tipc_bclink_set_queue_limits(u32 limit); 99int tipc_bclink_set_queue_limits(u32 limit);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 329fb659fae4..a297e3a2e3e7 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -53,7 +53,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr);
53 * 53 *
54 * Returns 1 if media name is valid, otherwise 0. 54 * Returns 1 if media name is valid, otherwise 0.
55 */ 55 */
56
57static int media_name_valid(const char *name) 56static int media_name_valid(const char *name)
58{ 57{
59 u32 len; 58 u32 len;
@@ -67,7 +66,6 @@ static int media_name_valid(const char *name)
67/** 66/**
68 * tipc_media_find - locates specified media object by name 67 * tipc_media_find - locates specified media object by name
69 */ 68 */
70
71struct tipc_media *tipc_media_find(const char *name) 69struct tipc_media *tipc_media_find(const char *name)
72{ 70{
73 u32 i; 71 u32 i;
@@ -82,7 +80,6 @@ struct tipc_media *tipc_media_find(const char *name)
82/** 80/**
83 * media_find_id - locates specified media object by type identifier 81 * media_find_id - locates specified media object by type identifier
84 */ 82 */
85
86static struct tipc_media *media_find_id(u8 type) 83static struct tipc_media *media_find_id(u8 type)
87{ 84{
88 u32 i; 85 u32 i;
@@ -99,7 +96,6 @@ static struct tipc_media *media_find_id(u8 type)
99 * 96 *
100 * Bearers for this media type must be activated separately at a later stage. 97 * Bearers for this media type must be activated separately at a later stage.
101 */ 98 */
102
103int tipc_register_media(struct tipc_media *m_ptr) 99int tipc_register_media(struct tipc_media *m_ptr)
104{ 100{
105 int res = -EINVAL; 101 int res = -EINVAL;
@@ -134,7 +130,6 @@ exit:
134/** 130/**
135 * tipc_media_addr_printf - record media address in print buffer 131 * tipc_media_addr_printf - record media address in print buffer
136 */ 132 */
137
138void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) 133void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
139{ 134{
140 char addr_str[MAX_ADDR_STR]; 135 char addr_str[MAX_ADDR_STR];
@@ -156,7 +151,6 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
156/** 151/**
157 * tipc_media_get_names - record names of registered media in buffer 152 * tipc_media_get_names - record names of registered media in buffer
158 */ 153 */
159
160struct sk_buff *tipc_media_get_names(void) 154struct sk_buff *tipc_media_get_names(void)
161{ 155{
162 struct sk_buff *buf; 156 struct sk_buff *buf;
@@ -183,7 +177,6 @@ struct sk_buff *tipc_media_get_names(void)
183 * 177 *
184 * Returns 1 if bearer name is valid, otherwise 0. 178 * Returns 1 if bearer name is valid, otherwise 0.
185 */ 179 */
186
187static int bearer_name_validate(const char *name, 180static int bearer_name_validate(const char *name,
188 struct tipc_bearer_names *name_parts) 181 struct tipc_bearer_names *name_parts)
189{ 182{
@@ -194,7 +187,6 @@ static int bearer_name_validate(const char *name,
194 u32 if_len; 187 u32 if_len;
195 188
196 /* copy bearer name & ensure length is OK */ 189 /* copy bearer name & ensure length is OK */
197
198 name_copy[TIPC_MAX_BEARER_NAME - 1] = 0; 190 name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
199 /* need above in case non-Posix strncpy() doesn't pad with nulls */ 191 /* need above in case non-Posix strncpy() doesn't pad with nulls */
200 strncpy(name_copy, name, TIPC_MAX_BEARER_NAME); 192 strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
@@ -202,7 +194,6 @@ static int bearer_name_validate(const char *name,
202 return 0; 194 return 0;
203 195
204 /* ensure all component parts of bearer name are present */ 196 /* ensure all component parts of bearer name are present */
205
206 media_name = name_copy; 197 media_name = name_copy;
207 if_name = strchr(media_name, ':'); 198 if_name = strchr(media_name, ':');
208 if (if_name == NULL) 199 if (if_name == NULL)
@@ -212,7 +203,6 @@ static int bearer_name_validate(const char *name,
212 if_len = strlen(if_name) + 1; 203 if_len = strlen(if_name) + 1;
213 204
214 /* validate component parts of bearer name */ 205 /* validate component parts of bearer name */
215
216 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || 206 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
217 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || 207 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
218 (strspn(media_name, tipc_alphabet) != (media_len - 1)) || 208 (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
@@ -220,7 +210,6 @@ static int bearer_name_validate(const char *name,
220 return 0; 210 return 0;
221 211
222 /* return bearer name components, if necessary */ 212 /* return bearer name components, if necessary */
223
224 if (name_parts) { 213 if (name_parts) {
225 strcpy(name_parts->media_name, media_name); 214 strcpy(name_parts->media_name, media_name);
226 strcpy(name_parts->if_name, if_name); 215 strcpy(name_parts->if_name, if_name);
@@ -231,7 +220,6 @@ static int bearer_name_validate(const char *name,
231/** 220/**
232 * tipc_bearer_find - locates bearer object with matching bearer name 221 * tipc_bearer_find - locates bearer object with matching bearer name
233 */ 222 */
234
235struct tipc_bearer *tipc_bearer_find(const char *name) 223struct tipc_bearer *tipc_bearer_find(const char *name)
236{ 224{
237 struct tipc_bearer *b_ptr; 225 struct tipc_bearer *b_ptr;
@@ -247,7 +235,6 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
247/** 235/**
248 * tipc_bearer_find_interface - locates bearer object with matching interface name 236 * tipc_bearer_find_interface - locates bearer object with matching interface name
249 */ 237 */
250
251struct tipc_bearer *tipc_bearer_find_interface(const char *if_name) 238struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
252{ 239{
253 struct tipc_bearer *b_ptr; 240 struct tipc_bearer *b_ptr;
@@ -267,7 +254,6 @@ struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
267/** 254/**
268 * tipc_bearer_get_names - record names of bearers in buffer 255 * tipc_bearer_get_names - record names of bearers in buffer
269 */ 256 */
270
271struct sk_buff *tipc_bearer_get_names(void) 257struct sk_buff *tipc_bearer_get_names(void)
272{ 258{
273 struct sk_buff *buf; 259 struct sk_buff *buf;
@@ -363,7 +349,6 @@ void tipc_continue(struct tipc_bearer *b_ptr)
363 * the bearer is congested. 'tipc_net_lock' is in read_lock here 349 * the bearer is congested. 'tipc_net_lock' is in read_lock here
364 * bearer.lock is busy 350 * bearer.lock is busy
365 */ 351 */
366
367static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, 352static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
368 struct tipc_link *l_ptr) 353 struct tipc_link *l_ptr)
369{ 354{
@@ -377,7 +362,6 @@ static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
377 * the bearer is congested. 'tipc_net_lock' is in read_lock here, 362 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
378 * bearer.lock is free 363 * bearer.lock is free
379 */ 364 */
380
381void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) 365void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
382{ 366{
383 spin_lock_bh(&b_ptr->lock); 367 spin_lock_bh(&b_ptr->lock);
@@ -410,7 +394,6 @@ int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
410/** 394/**
411 * tipc_bearer_congested - determines if bearer is currently congested 395 * tipc_bearer_congested - determines if bearer is currently congested
412 */ 396 */
413
414int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) 397int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
415{ 398{
416 if (unlikely(b_ptr->blocked)) 399 if (unlikely(b_ptr->blocked))
@@ -423,7 +406,6 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
423/** 406/**
424 * tipc_enable_bearer - enable bearer with the given name 407 * tipc_enable_bearer - enable bearer with the given name
425 */ 408 */
426
427int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) 409int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
428{ 410{
429 struct tipc_bearer *b_ptr; 411 struct tipc_bearer *b_ptr;
@@ -435,7 +417,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
435 u32 i; 417 u32 i;
436 int res = -EINVAL; 418 int res = -EINVAL;
437 419
438 if (tipc_mode != TIPC_NET_MODE) { 420 if (!tipc_own_addr) {
439 warn("Bearer <%s> rejected, not supported in standalone mode\n", 421 warn("Bearer <%s> rejected, not supported in standalone mode\n",
440 name); 422 name);
441 return -ENOPROTOOPT; 423 return -ENOPROTOOPT;
@@ -449,15 +431,14 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
449 if (tipc_in_scope(disc_domain, tipc_own_addr)) { 431 if (tipc_in_scope(disc_domain, tipc_own_addr)) {
450 disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK; 432 disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
451 res = 0; /* accept any node in own cluster */ 433 res = 0; /* accept any node in own cluster */
452 } else if (in_own_cluster(disc_domain)) 434 } else if (in_own_cluster_exact(disc_domain))
453 res = 0; /* accept specified node in own cluster */ 435 res = 0; /* accept specified node in own cluster */
454 } 436 }
455 if (res) { 437 if (res) {
456 warn("Bearer <%s> rejected, illegal discovery domain\n", name); 438 warn("Bearer <%s> rejected, illegal discovery domain\n", name);
457 return -EINVAL; 439 return -EINVAL;
458 } 440 }
459 if ((priority < TIPC_MIN_LINK_PRI || 441 if ((priority > TIPC_MAX_LINK_PRI) &&
460 priority > TIPC_MAX_LINK_PRI) &&
461 (priority != TIPC_MEDIA_LINK_PRI)) { 442 (priority != TIPC_MEDIA_LINK_PRI)) {
462 warn("Bearer <%s> rejected, illegal priority\n", name); 443 warn("Bearer <%s> rejected, illegal priority\n", name);
463 return -EINVAL; 444 return -EINVAL;
@@ -542,7 +523,6 @@ exit:
542 * tipc_block_bearer(): Block the bearer with the given name, 523 * tipc_block_bearer(): Block the bearer with the given name,
543 * and reset all its links 524 * and reset all its links
544 */ 525 */
545
546int tipc_block_bearer(const char *name) 526int tipc_block_bearer(const char *name)
547{ 527{
548 struct tipc_bearer *b_ptr = NULL; 528 struct tipc_bearer *b_ptr = NULL;
@@ -574,11 +554,10 @@ int tipc_block_bearer(const char *name)
574} 554}
575 555
576/** 556/**
577 * bearer_disable - 557 * bearer_disable
578 * 558 *
579 * Note: This routine assumes caller holds tipc_net_lock. 559 * Note: This routine assumes caller holds tipc_net_lock.
580 */ 560 */
581
582static void bearer_disable(struct tipc_bearer *b_ptr) 561static void bearer_disable(struct tipc_bearer *b_ptr)
583{ 562{
584 struct tipc_link *l_ptr; 563 struct tipc_link *l_ptr;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index d3eac56b8c21..e3b2be37fb31 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -49,7 +49,6 @@
49 * - media type identifier located at offset 3 49 * - media type identifier located at offset 3
50 * - remaining bytes vary according to media type 50 * - remaining bytes vary according to media type
51 */ 51 */
52
53#define TIPC_MEDIA_ADDR_SIZE 20 52#define TIPC_MEDIA_ADDR_SIZE 20
54#define TIPC_MEDIA_TYPE_OFFSET 3 53#define TIPC_MEDIA_TYPE_OFFSET 3
55 54
@@ -64,7 +63,6 @@
64 * @media_id: TIPC media type identifier 63 * @media_id: TIPC media type identifier
65 * @broadcast: non-zero if address is a broadcast address 64 * @broadcast: non-zero if address is a broadcast address
66 */ 65 */
67
68struct tipc_media_addr { 66struct tipc_media_addr {
69 u8 value[TIPC_MEDIA_ADDR_SIZE]; 67 u8 value[TIPC_MEDIA_ADDR_SIZE];
70 u8 media_id; 68 u8 media_id;
@@ -89,7 +87,6 @@ struct tipc_bearer;
89 * @type_id: TIPC media identifier 87 * @type_id: TIPC media identifier
90 * @name: media name 88 * @name: media name
91 */ 89 */
92
93struct tipc_media { 90struct tipc_media {
94 int (*send_msg)(struct sk_buff *buf, 91 int (*send_msg)(struct sk_buff *buf,
95 struct tipc_bearer *b_ptr, 92 struct tipc_bearer *b_ptr,
@@ -216,7 +213,6 @@ void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
216 * send routine always returns success -- even if the buffer was not sent -- 213 * send routine always returns success -- even if the buffer was not sent --
217 * and let TIPC's link code deal with the undelivered message. 214 * and let TIPC's link code deal with the undelivered message.
218 */ 215 */
219
220static inline int tipc_bearer_send(struct tipc_bearer *b_ptr, 216static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
221 struct sk_buff *buf, 217 struct sk_buff *buf,
222 struct tipc_media_addr *dest) 218 struct tipc_media_addr *dest)
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 4785bf26cdf4..c5712a343810 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -131,7 +131,6 @@ static struct sk_buff *tipc_show_stats(void)
131 tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n"); 131 tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
132 132
133 /* Use additional tipc_printf()'s to return more info ... */ 133 /* Use additional tipc_printf()'s to return more info ... */
134
135 str_len = tipc_printbuf_validate(&pb); 134 str_len = tipc_printbuf_validate(&pb);
136 skb_put(buf, TLV_SPACE(str_len)); 135 skb_put(buf, TLV_SPACE(str_len));
137 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 136 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -179,7 +178,7 @@ static struct sk_buff *cfg_set_own_addr(void)
179 if (!tipc_addr_node_valid(addr)) 178 if (!tipc_addr_node_valid(addr))
180 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 179 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
181 " (node address)"); 180 " (node address)");
182 if (tipc_mode == TIPC_NET_MODE) 181 if (tipc_own_addr)
183 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 182 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
184 " (cannot change node address once assigned)"); 183 " (cannot change node address once assigned)");
185 184
@@ -191,7 +190,6 @@ static struct sk_buff *cfg_set_own_addr(void)
191 * configuration commands can't be received until a local configuration 190 * configuration commands can't be received until a local configuration
192 * command to enable the first bearer is received and processed. 191 * command to enable the first bearer is received and processed.
193 */ 192 */
194
195 spin_unlock_bh(&config_lock); 193 spin_unlock_bh(&config_lock);
196 tipc_core_start_net(addr); 194 tipc_core_start_net(addr);
197 spin_lock_bh(&config_lock); 195 spin_lock_bh(&config_lock);
@@ -218,7 +216,7 @@ static struct sk_buff *cfg_set_max_publications(void)
218 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 216 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
219 217
220 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 218 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
221 if (value != delimit(value, 1, 65535)) 219 if (value < 1 || value > 65535)
222 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 220 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
223 " (max publications must be 1-65535)"); 221 " (max publications must be 1-65535)");
224 tipc_max_publications = value; 222 tipc_max_publications = value;
@@ -233,7 +231,7 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
233 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 231 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
234 232
235 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 233 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
236 if (value != delimit(value, 1, 65535)) 234 if (value < 1 || value > 65535)
237 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 235 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
238 " (max subscriptions must be 1-65535"); 236 " (max subscriptions must be 1-65535");
239 tipc_max_subscriptions = value; 237 tipc_max_subscriptions = value;
@@ -249,14 +247,11 @@ static struct sk_buff *cfg_set_max_ports(void)
249 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 247 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
250 if (value == tipc_max_ports) 248 if (value == tipc_max_ports)
251 return tipc_cfg_reply_none(); 249 return tipc_cfg_reply_none();
252 if (value != delimit(value, 127, 65535)) 250 if (value < 127 || value > 65535)
253 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 251 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
254 " (max ports must be 127-65535)"); 252 " (max ports must be 127-65535)");
255 if (tipc_mode != TIPC_NOT_RUNNING) 253 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
256 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 254 " (cannot change max ports while TIPC is active)");
257 " (cannot change max ports while TIPC is active)");
258 tipc_max_ports = value;
259 return tipc_cfg_reply_none();
260} 255}
261 256
262static struct sk_buff *cfg_set_netid(void) 257static struct sk_buff *cfg_set_netid(void)
@@ -268,10 +263,10 @@ static struct sk_buff *cfg_set_netid(void)
268 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 263 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
269 if (value == tipc_net_id) 264 if (value == tipc_net_id)
270 return tipc_cfg_reply_none(); 265 return tipc_cfg_reply_none();
271 if (value != delimit(value, 1, 9999)) 266 if (value < 1 || value > 9999)
272 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 267 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
273 " (network id must be 1-9999)"); 268 " (network id must be 1-9999)");
274 if (tipc_mode == TIPC_NET_MODE) 269 if (tipc_own_addr)
275 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 270 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
276 " (cannot change network id once TIPC has joined a network)"); 271 " (cannot change network id once TIPC has joined a network)");
277 tipc_net_id = value; 272 tipc_net_id = value;
@@ -286,14 +281,12 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
286 spin_lock_bh(&config_lock); 281 spin_lock_bh(&config_lock);
287 282
288 /* Save request and reply details in a well-known location */ 283 /* Save request and reply details in a well-known location */
289
290 req_tlv_area = request_area; 284 req_tlv_area = request_area;
291 req_tlv_space = request_space; 285 req_tlv_space = request_space;
292 rep_headroom = reply_headroom; 286 rep_headroom = reply_headroom;
293 287
294 /* Check command authorization */ 288 /* Check command authorization */
295 289 if (likely(in_own_node(orig_node))) {
296 if (likely(orig_node == tipc_own_addr)) {
297 /* command is permitted */ 290 /* command is permitted */
298 } else if (cmd >= 0x8000) { 291 } else if (cmd >= 0x8000) {
299 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 292 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -313,7 +306,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
313 } 306 }
314 307
315 /* Call appropriate processing routine */ 308 /* Call appropriate processing routine */
316
317 switch (cmd) { 309 switch (cmd) {
318 case TIPC_CMD_NOOP: 310 case TIPC_CMD_NOOP:
319 rep_tlv_buf = tipc_cfg_reply_none(); 311 rep_tlv_buf = tipc_cfg_reply_none();
@@ -436,7 +428,6 @@ static void cfg_named_msg_event(void *userdata,
436 struct sk_buff *rep_buf; 428 struct sk_buff *rep_buf;
437 429
438 /* Validate configuration message header (ignore invalid message) */ 430 /* Validate configuration message header (ignore invalid message) */
439
440 req_hdr = (struct tipc_cfg_msg_hdr *)msg; 431 req_hdr = (struct tipc_cfg_msg_hdr *)msg;
441 if ((size < sizeof(*req_hdr)) || 432 if ((size < sizeof(*req_hdr)) ||
442 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) || 433 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
@@ -446,7 +437,6 @@ static void cfg_named_msg_event(void *userdata,
446 } 437 }
447 438
448 /* Generate reply for request (if can't, return request) */ 439 /* Generate reply for request (if can't, return request) */
449
450 rep_buf = tipc_cfg_do_cmd(orig->node, 440 rep_buf = tipc_cfg_do_cmd(orig->node,
451 ntohs(req_hdr->tcm_type), 441 ntohs(req_hdr->tcm_type),
452 msg + sizeof(*req_hdr), 442 msg + sizeof(*req_hdr),
@@ -481,7 +471,7 @@ int tipc_cfg_init(void)
481 471
482 seq.type = TIPC_CFG_SRV; 472 seq.type = TIPC_CFG_SRV;
483 seq.lower = seq.upper = tipc_own_addr; 473 seq.lower = seq.upper = tipc_own_addr;
484 res = tipc_nametbl_publish_rsv(config_port_ref, TIPC_ZONE_SCOPE, &seq); 474 res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
485 if (res) 475 if (res)
486 goto failed; 476 goto failed;
487 477
@@ -492,10 +482,23 @@ failed:
492 return res; 482 return res;
493} 483}
494 484
485void tipc_cfg_reinit(void)
486{
487 struct tipc_name_seq seq;
488 int res;
489
490 seq.type = TIPC_CFG_SRV;
491 seq.lower = seq.upper = 0;
492 tipc_withdraw(config_port_ref, TIPC_ZONE_SCOPE, &seq);
493
494 seq.lower = seq.upper = tipc_own_addr;
495 res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
496 if (res)
497 err("Unable to reinitialize configuration service\n");
498}
499
495void tipc_cfg_stop(void) 500void tipc_cfg_stop(void)
496{ 501{
497 if (config_port_ref) { 502 tipc_deleteport(config_port_ref);
498 tipc_deleteport(config_port_ref); 503 config_port_ref = 0;
499 config_port_ref = 0;
500 }
501} 504}
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 80da6ebc2785..1f252f3fa058 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -66,6 +66,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
66 int headroom); 66 int headroom);
67 67
68int tipc_cfg_init(void); 68int tipc_cfg_init(void);
69void tipc_cfg_reinit(void);
69void tipc_cfg_stop(void); 70void tipc_cfg_stop(void);
70 71
71#endif 72#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 2691cd57b8a8..f7b95239ebda 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -52,15 +52,12 @@
52#endif 52#endif
53 53
54/* global variables used by multiple sub-systems within TIPC */ 54/* global variables used by multiple sub-systems within TIPC */
55
56int tipc_mode = TIPC_NOT_RUNNING;
57int tipc_random; 55int tipc_random;
58 56
59const char tipc_alphabet[] = 57const char tipc_alphabet[] =
60 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_."; 58 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
61 59
62/* configurable TIPC parameters */ 60/* configurable TIPC parameters */
63
64u32 tipc_own_addr; 61u32 tipc_own_addr;
65int tipc_max_ports; 62int tipc_max_ports;
66int tipc_max_subscriptions; 63int tipc_max_subscriptions;
@@ -78,7 +75,6 @@ int tipc_remote_management;
78 * NOTE: Headroom is reserved to allow prepending of a data link header. 75 * NOTE: Headroom is reserved to allow prepending of a data link header.
79 * There may also be unrequested tailroom present at the buffer's end. 76 * There may also be unrequested tailroom present at the buffer's end.
80 */ 77 */
81
82struct sk_buff *tipc_buf_acquire(u32 size) 78struct sk_buff *tipc_buf_acquire(u32 size)
83{ 79{
84 struct sk_buff *skb; 80 struct sk_buff *skb;
@@ -96,7 +92,6 @@ struct sk_buff *tipc_buf_acquire(u32 size)
96/** 92/**
97 * tipc_core_stop_net - shut down TIPC networking sub-systems 93 * tipc_core_stop_net - shut down TIPC networking sub-systems
98 */ 94 */
99
100static void tipc_core_stop_net(void) 95static void tipc_core_stop_net(void)
101{ 96{
102 tipc_net_stop(); 97 tipc_net_stop();
@@ -106,7 +101,6 @@ static void tipc_core_stop_net(void)
106/** 101/**
107 * start_net - start TIPC networking sub-systems 102 * start_net - start TIPC networking sub-systems
108 */ 103 */
109
110int tipc_core_start_net(unsigned long addr) 104int tipc_core_start_net(unsigned long addr)
111{ 105{
112 int res; 106 int res;
@@ -122,14 +116,8 @@ int tipc_core_start_net(unsigned long addr)
122/** 116/**
123 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode 117 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
124 */ 118 */
125
126static void tipc_core_stop(void) 119static void tipc_core_stop(void)
127{ 120{
128 if (tipc_mode != TIPC_NODE_MODE)
129 return;
130
131 tipc_mode = TIPC_NOT_RUNNING;
132
133 tipc_netlink_stop(); 121 tipc_netlink_stop();
134 tipc_handler_stop(); 122 tipc_handler_stop();
135 tipc_cfg_stop(); 123 tipc_cfg_stop();
@@ -143,16 +131,11 @@ static void tipc_core_stop(void)
143/** 131/**
144 * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode 132 * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
145 */ 133 */
146
147static int tipc_core_start(void) 134static int tipc_core_start(void)
148{ 135{
149 int res; 136 int res;
150 137
151 if (tipc_mode != TIPC_NOT_RUNNING)
152 return -ENOPROTOOPT;
153
154 get_random_bytes(&tipc_random, sizeof(tipc_random)); 138 get_random_bytes(&tipc_random, sizeof(tipc_random));
155 tipc_mode = TIPC_NODE_MODE;
156 139
157 res = tipc_handler_start(); 140 res = tipc_handler_start();
158 if (!res) 141 if (!res)
@@ -160,9 +143,9 @@ static int tipc_core_start(void)
160 if (!res) 143 if (!res)
161 res = tipc_nametbl_init(); 144 res = tipc_nametbl_init();
162 if (!res) 145 if (!res)
163 res = tipc_k_signal((Handler)tipc_subscr_start, 0); 146 res = tipc_subscr_start();
164 if (!res) 147 if (!res)
165 res = tipc_k_signal((Handler)tipc_cfg_init, 0); 148 res = tipc_cfg_init();
166 if (!res) 149 if (!res)
167 res = tipc_netlink_start(); 150 res = tipc_netlink_start();
168 if (!res) 151 if (!res)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 2761af36d141..2a9bb99537b3 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -85,7 +85,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...);
85/* 85/*
86 * TIPC_OUTPUT is the destination print buffer for system messages. 86 * TIPC_OUTPUT is the destination print buffer for system messages.
87 */ 87 */
88
89#ifndef TIPC_OUTPUT 88#ifndef TIPC_OUTPUT
90#define TIPC_OUTPUT TIPC_LOG 89#define TIPC_OUTPUT TIPC_LOG
91#endif 90#endif
@@ -102,7 +101,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...);
102/* 101/*
103 * DBG_OUTPUT is the destination print buffer for debug messages. 102 * DBG_OUTPUT is the destination print buffer for debug messages.
104 */ 103 */
105
106#ifndef DBG_OUTPUT 104#ifndef DBG_OUTPUT
107#define DBG_OUTPUT TIPC_LOG 105#define DBG_OUTPUT TIPC_LOG
108#endif 106#endif
@@ -126,20 +124,11 @@ void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
126/* 124/*
127 * TIPC-specific error codes 125 * TIPC-specific error codes
128 */ 126 */
129
130#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ 127#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
131 128
132/* 129/*
133 * TIPC operating mode routines
134 */
135#define TIPC_NOT_RUNNING 0
136#define TIPC_NODE_MODE 1
137#define TIPC_NET_MODE 2
138
139/*
140 * Global configuration variables 130 * Global configuration variables
141 */ 131 */
142
143extern u32 tipc_own_addr; 132extern u32 tipc_own_addr;
144extern int tipc_max_ports; 133extern int tipc_max_ports;
145extern int tipc_max_subscriptions; 134extern int tipc_max_subscriptions;
@@ -150,8 +139,6 @@ extern int tipc_remote_management;
150/* 139/*
151 * Other global variables 140 * Other global variables
152 */ 141 */
153
154extern int tipc_mode;
155extern int tipc_random; 142extern int tipc_random;
156extern const char tipc_alphabet[]; 143extern const char tipc_alphabet[];
157 144
@@ -159,7 +146,6 @@ extern const char tipc_alphabet[];
159/* 146/*
160 * Routines available to privileged subsystems 147 * Routines available to privileged subsystems
161 */ 148 */
162
163extern int tipc_core_start_net(unsigned long); 149extern int tipc_core_start_net(unsigned long);
164extern int tipc_handler_start(void); 150extern int tipc_handler_start(void);
165extern void tipc_handler_stop(void); 151extern void tipc_handler_stop(void);
@@ -168,20 +154,9 @@ extern void tipc_netlink_stop(void);
168extern int tipc_socket_init(void); 154extern int tipc_socket_init(void);
169extern void tipc_socket_stop(void); 155extern void tipc_socket_stop(void);
170 156
171static inline int delimit(int val, int min, int max)
172{
173 if (val > max)
174 return max;
175 if (val < min)
176 return min;
177 return val;
178}
179
180
181/* 157/*
182 * TIPC timer and signal code 158 * TIPC timer and signal code
183 */ 159 */
184
185typedef void (*Handler) (unsigned long); 160typedef void (*Handler) (unsigned long);
186 161
187u32 tipc_k_signal(Handler routine, unsigned long argument); 162u32 tipc_k_signal(Handler routine, unsigned long argument);
@@ -194,7 +169,6 @@ u32 tipc_k_signal(Handler routine, unsigned long argument);
194 * 169 *
195 * Timer must be initialized before use (and terminated when no longer needed). 170 * Timer must be initialized before use (and terminated when no longer needed).
196 */ 171 */
197
198static inline void k_init_timer(struct timer_list *timer, Handler routine, 172static inline void k_init_timer(struct timer_list *timer, Handler routine,
199 unsigned long argument) 173 unsigned long argument)
200{ 174{
@@ -214,7 +188,6 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine,
214 * then an additional jiffy is added to account for the fact that 188 * then an additional jiffy is added to account for the fact that
215 * the starting time may be in the middle of the current jiffy. 189 * the starting time may be in the middle of the current jiffy.
216 */ 190 */
217
218static inline void k_start_timer(struct timer_list *timer, unsigned long msec) 191static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
219{ 192{
220 mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1); 193 mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
@@ -230,7 +203,6 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
230 * WARNING: Must not be called when holding locks required by the timer's 203 * WARNING: Must not be called when holding locks required by the timer's
231 * timeout routine, otherwise deadlock can occur on SMP systems! 204 * timeout routine, otherwise deadlock can occur on SMP systems!
232 */ 205 */
233
234static inline void k_cancel_timer(struct timer_list *timer) 206static inline void k_cancel_timer(struct timer_list *timer)
235{ 207{
236 del_timer_sync(timer); 208 del_timer_sync(timer);
@@ -247,12 +219,10 @@ static inline void k_cancel_timer(struct timer_list *timer)
247 * (Do not "enhance" this routine to automatically cancel an active timer, 219 * (Do not "enhance" this routine to automatically cancel an active timer,
248 * otherwise deadlock can arise when a timeout routine calls k_term_timer.) 220 * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
249 */ 221 */
250
251static inline void k_term_timer(struct timer_list *timer) 222static inline void k_term_timer(struct timer_list *timer)
252{ 223{
253} 224}
254 225
255
256/* 226/*
257 * TIPC message buffer code 227 * TIPC message buffer code
258 * 228 *
@@ -262,7 +232,6 @@ static inline void k_term_timer(struct timer_list *timer)
262 * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields 232 * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
263 * are word aligned for quicker access 233 * are word aligned for quicker access
264 */ 234 */
265
266#define BUF_HEADROOM LL_MAX_HEADER 235#define BUF_HEADROOM LL_MAX_HEADER
267 236
268struct tipc_skb_cb { 237struct tipc_skb_cb {
@@ -271,7 +240,6 @@ struct tipc_skb_cb {
271 240
272#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 241#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
273 242
274
275static inline struct tipc_msg *buf_msg(struct sk_buff *skb) 243static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
276{ 244{
277 return (struct tipc_msg *)skb->data; 245 return (struct tipc_msg *)skb->data;
@@ -279,28 +247,4 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
279 247
280extern struct sk_buff *tipc_buf_acquire(u32 size); 248extern struct sk_buff *tipc_buf_acquire(u32 size);
281 249
282/**
283 * buf_discard - frees a TIPC message buffer
284 * @skb: message buffer
285 *
286 * Frees a message buffer. If passed NULL, just returns.
287 */
288
289static inline void buf_discard(struct sk_buff *skb)
290{
291 kfree_skb(skb);
292}
293
294/**
295 * buf_linearize - convert a TIPC message buffer into a single contiguous piece
296 * @skb: message buffer
297 *
298 * Returns 0 on success.
299 */
300
301static inline int buf_linearize(struct sk_buff *skb)
302{
303 return skb_linearize(skb);
304}
305
306#endif 250#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index a00e5f811569..ae054cfe179f 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -70,7 +70,6 @@ struct tipc_link_req {
70 * @dest_domain: network domain of node(s) which should respond to message 70 * @dest_domain: network domain of node(s) which should respond to message
71 * @b_ptr: ptr to bearer issuing message 71 * @b_ptr: ptr to bearer issuing message
72 */ 72 */
73
74static struct sk_buff *tipc_disc_init_msg(u32 type, 73static struct sk_buff *tipc_disc_init_msg(u32 type,
75 u32 dest_domain, 74 u32 dest_domain,
76 struct tipc_bearer *b_ptr) 75 struct tipc_bearer *b_ptr)
@@ -82,6 +81,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
82 msg = buf_msg(buf); 81 msg = buf_msg(buf);
83 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain); 82 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
84 msg_set_non_seq(msg, 1); 83 msg_set_non_seq(msg, 1);
84 msg_set_node_sig(msg, tipc_random);
85 msg_set_dest_domain(msg, dest_domain); 85 msg_set_dest_domain(msg, dest_domain);
86 msg_set_bc_netid(msg, tipc_net_id); 86 msg_set_bc_netid(msg, tipc_net_id);
87 b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg)); 87 b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
@@ -95,7 +95,6 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
95 * @node_addr: duplicated node address 95 * @node_addr: duplicated node address
96 * @media_addr: media address advertised by duplicated node 96 * @media_addr: media address advertised by duplicated node
97 */ 97 */
98
99static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, 98static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
100 struct tipc_media_addr *media_addr) 99 struct tipc_media_addr *media_addr)
101{ 100{
@@ -116,25 +115,26 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
116 * @buf: buffer containing message 115 * @buf: buffer containing message
117 * @b_ptr: bearer that message arrived on 116 * @b_ptr: bearer that message arrived on
118 */ 117 */
119
120void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) 118void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
121{ 119{
122 struct tipc_node *n_ptr; 120 struct tipc_node *n_ptr;
123 struct tipc_link *link; 121 struct tipc_link *link;
124 struct tipc_media_addr media_addr, *addr; 122 struct tipc_media_addr media_addr;
125 struct sk_buff *rbuf; 123 struct sk_buff *rbuf;
126 struct tipc_msg *msg = buf_msg(buf); 124 struct tipc_msg *msg = buf_msg(buf);
127 u32 dest = msg_dest_domain(msg); 125 u32 dest = msg_dest_domain(msg);
128 u32 orig = msg_prevnode(msg); 126 u32 orig = msg_prevnode(msg);
129 u32 net_id = msg_bc_netid(msg); 127 u32 net_id = msg_bc_netid(msg);
130 u32 type = msg_type(msg); 128 u32 type = msg_type(msg);
129 u32 signature = msg_node_sig(msg);
130 int addr_mismatch;
131 int link_fully_up; 131 int link_fully_up;
132 132
133 media_addr.broadcast = 1; 133 media_addr.broadcast = 1;
134 b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg)); 134 b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg));
135 buf_discard(buf); 135 kfree_skb(buf);
136 136
137 /* Validate discovery message from requesting node */ 137 /* Ensure message from node is valid and communication is permitted */
138 if (net_id != tipc_net_id) 138 if (net_id != tipc_net_id)
139 return; 139 return;
140 if (media_addr.broadcast) 140 if (media_addr.broadcast)
@@ -162,15 +162,50 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
162 } 162 }
163 tipc_node_lock(n_ptr); 163 tipc_node_lock(n_ptr);
164 164
165 /* Prepare to validate requesting node's signature and media address */
165 link = n_ptr->links[b_ptr->identity]; 166 link = n_ptr->links[b_ptr->identity];
167 addr_mismatch = (link != NULL) &&
168 memcmp(&link->media_addr, &media_addr, sizeof(media_addr));
166 169
167 /* Create a link endpoint for this bearer, if necessary */ 170 /*
168 if (!link) { 171 * Ensure discovery message's signature is correct
169 link = tipc_link_create(n_ptr, b_ptr, &media_addr); 172 *
170 if (!link) { 173 * If signature is incorrect and there is no working link to the node,
174 * accept the new signature but invalidate all existing links to the
175 * node so they won't re-activate without a new discovery message.
176 *
177 * If signature is incorrect and the requested link to the node is
178 * working, accept the new signature. (This is an instance of delayed
179 * rediscovery, where a link endpoint was able to re-establish contact
180 * with its peer endpoint on a node that rebooted before receiving a
181 * discovery message from that node.)
182 *
183 * If signature is incorrect and there is a working link to the node
184 * that is not the requested link, reject the request (must be from
185 * a duplicate node).
186 */
187 if (signature != n_ptr->signature) {
188 if (n_ptr->working_links == 0) {
189 struct tipc_link *curr_link;
190 int i;
191
192 for (i = 0; i < MAX_BEARERS; i++) {
193 curr_link = n_ptr->links[i];
194 if (curr_link) {
195 memset(&curr_link->media_addr, 0,
196 sizeof(media_addr));
197 tipc_link_reset(curr_link);
198 }
199 }
200 addr_mismatch = (link != NULL);
201 } else if (tipc_link_is_up(link) && !addr_mismatch) {
202 /* delayed rediscovery */
203 } else {
204 disc_dupl_alert(b_ptr, orig, &media_addr);
171 tipc_node_unlock(n_ptr); 205 tipc_node_unlock(n_ptr);
172 return; 206 return;
173 } 207 }
208 n_ptr->signature = signature;
174 } 209 }
175 210
176 /* 211 /*
@@ -183,17 +218,25 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
183 * the new media address and reset the link to ensure it starts up 218 * the new media address and reset the link to ensure it starts up
184 * cleanly. 219 * cleanly.
185 */ 220 */
186 addr = &link->media_addr; 221 if (addr_mismatch) {
187 if (memcmp(addr, &media_addr, sizeof(*addr))) { 222 if (tipc_link_is_up(link)) {
188 if (tipc_link_is_up(link) || (!link->started)) {
189 disc_dupl_alert(b_ptr, orig, &media_addr); 223 disc_dupl_alert(b_ptr, orig, &media_addr);
190 tipc_node_unlock(n_ptr); 224 tipc_node_unlock(n_ptr);
191 return; 225 return;
226 } else {
227 memcpy(&link->media_addr, &media_addr,
228 sizeof(media_addr));
229 tipc_link_reset(link);
230 }
231 }
232
233 /* Create a link endpoint for this bearer, if necessary */
234 if (!link) {
235 link = tipc_link_create(n_ptr, b_ptr, &media_addr);
236 if (!link) {
237 tipc_node_unlock(n_ptr);
238 return;
192 } 239 }
193 warn("Resetting link <%s>, peer interface address changed\n",
194 link->name);
195 memcpy(addr, &media_addr, sizeof(*addr));
196 tipc_link_reset(link);
197 } 240 }
198 241
199 /* Accept discovery message & send response, if necessary */ 242 /* Accept discovery message & send response, if necessary */
@@ -203,7 +246,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
203 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr); 246 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
204 if (rbuf) { 247 if (rbuf) {
205 b_ptr->media->send_msg(rbuf, b_ptr, &media_addr); 248 b_ptr->media->send_msg(rbuf, b_ptr, &media_addr);
206 buf_discard(rbuf); 249 kfree_skb(rbuf);
207 } 250 }
208 } 251 }
209 252
@@ -217,7 +260,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
217 * Reinitiates discovery process if discovery object has no associated nodes 260 * Reinitiates discovery process if discovery object has no associated nodes
218 * and is either not currently searching or is searching at a slow rate 261 * and is either not currently searching or is searching at a slow rate
219 */ 262 */
220
221static void disc_update(struct tipc_link_req *req) 263static void disc_update(struct tipc_link_req *req)
222{ 264{
223 if (!req->num_nodes) { 265 if (!req->num_nodes) {
@@ -233,7 +275,6 @@ static void disc_update(struct tipc_link_req *req)
233 * tipc_disc_add_dest - increment set of discovered nodes 275 * tipc_disc_add_dest - increment set of discovered nodes
234 * @req: ptr to link request structure 276 * @req: ptr to link request structure
235 */ 277 */
236
237void tipc_disc_add_dest(struct tipc_link_req *req) 278void tipc_disc_add_dest(struct tipc_link_req *req)
238{ 279{
239 req->num_nodes++; 280 req->num_nodes++;
@@ -243,7 +284,6 @@ void tipc_disc_add_dest(struct tipc_link_req *req)
243 * tipc_disc_remove_dest - decrement set of discovered nodes 284 * tipc_disc_remove_dest - decrement set of discovered nodes
244 * @req: ptr to link request structure 285 * @req: ptr to link request structure
245 */ 286 */
246
247void tipc_disc_remove_dest(struct tipc_link_req *req) 287void tipc_disc_remove_dest(struct tipc_link_req *req)
248{ 288{
249 req->num_nodes--; 289 req->num_nodes--;
@@ -254,7 +294,6 @@ void tipc_disc_remove_dest(struct tipc_link_req *req)
254 * disc_send_msg - send link setup request message 294 * disc_send_msg - send link setup request message
255 * @req: ptr to link request structure 295 * @req: ptr to link request structure
256 */ 296 */
257
258static void disc_send_msg(struct tipc_link_req *req) 297static void disc_send_msg(struct tipc_link_req *req)
259{ 298{
260 if (!req->bearer->blocked) 299 if (!req->bearer->blocked)
@@ -267,7 +306,6 @@ static void disc_send_msg(struct tipc_link_req *req)
267 * 306 *
268 * Called whenever a link setup request timer associated with a bearer expires. 307 * Called whenever a link setup request timer associated with a bearer expires.
269 */ 308 */
270
271static void disc_timeout(struct tipc_link_req *req) 309static void disc_timeout(struct tipc_link_req *req)
272{ 310{
273 int max_delay; 311 int max_delay;
@@ -275,7 +313,6 @@ static void disc_timeout(struct tipc_link_req *req)
275 spin_lock_bh(&req->bearer->lock); 313 spin_lock_bh(&req->bearer->lock);
276 314
277 /* Stop searching if only desired node has been found */ 315 /* Stop searching if only desired node has been found */
278
279 if (tipc_node(req->domain) && req->num_nodes) { 316 if (tipc_node(req->domain) && req->num_nodes) {
280 req->timer_intv = TIPC_LINK_REQ_INACTIVE; 317 req->timer_intv = TIPC_LINK_REQ_INACTIVE;
281 goto exit; 318 goto exit;
@@ -288,7 +325,6 @@ static void disc_timeout(struct tipc_link_req *req)
288 * hold at fast polling rate if don't have any associated nodes, 325 * hold at fast polling rate if don't have any associated nodes,
289 * otherwise hold at slow polling rate 326 * otherwise hold at slow polling rate
290 */ 327 */
291
292 disc_send_msg(req); 328 disc_send_msg(req);
293 329
294 req->timer_intv *= 2; 330 req->timer_intv *= 2;
@@ -312,7 +348,6 @@ exit:
312 * 348 *
313 * Returns 0 if successful, otherwise -errno. 349 * Returns 0 if successful, otherwise -errno.
314 */ 350 */
315
316int tipc_disc_create(struct tipc_bearer *b_ptr, 351int tipc_disc_create(struct tipc_bearer *b_ptr,
317 struct tipc_media_addr *dest, u32 dest_domain) 352 struct tipc_media_addr *dest, u32 dest_domain)
318{ 353{
@@ -344,12 +379,10 @@ int tipc_disc_create(struct tipc_bearer *b_ptr,
344 * tipc_disc_delete - destroy object sending periodic link setup requests 379 * tipc_disc_delete - destroy object sending periodic link setup requests
345 * @req: ptr to link request structure 380 * @req: ptr to link request structure
346 */ 381 */
347
348void tipc_disc_delete(struct tipc_link_req *req) 382void tipc_disc_delete(struct tipc_link_req *req)
349{ 383{
350 k_cancel_timer(&req->timer); 384 k_cancel_timer(&req->timer);
351 k_term_timer(&req->timer); 385 k_term_timer(&req->timer);
352 buf_discard(req->buf); 386 kfree_skb(req->buf);
353 kfree(req); 387 kfree(req);
354} 388}
355
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 527e3f0e165d..90ac9bfa7abb 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -48,7 +48,6 @@
48 * @tipc_packet_type: used in binding TIPC to Ethernet driver 48 * @tipc_packet_type: used in binding TIPC to Ethernet driver
49 * @cleanup: work item used when disabling bearer 49 * @cleanup: work item used when disabling bearer
50 */ 50 */
51
52struct eth_bearer { 51struct eth_bearer {
53 struct tipc_bearer *bearer; 52 struct tipc_bearer *bearer;
54 struct net_device *dev; 53 struct net_device *dev;
@@ -67,7 +66,6 @@ static struct notifier_block notifier;
67 * Media-dependent "value" field stores MAC address in first 6 bytes 66 * Media-dependent "value" field stores MAC address in first 6 bytes
68 * and zeroes out the remaining bytes. 67 * and zeroes out the remaining bytes.
69 */ 68 */
70
71static void eth_media_addr_set(struct tipc_media_addr *a, char *mac) 69static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
72{ 70{
73 memcpy(a->value, mac, ETH_ALEN); 71 memcpy(a->value, mac, ETH_ALEN);
@@ -79,7 +77,6 @@ static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
79/** 77/**
80 * send_msg - send a TIPC message out over an Ethernet interface 78 * send_msg - send a TIPC message out over an Ethernet interface
81 */ 79 */
82
83static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, 80static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
84 struct tipc_media_addr *dest) 81 struct tipc_media_addr *dest)
85{ 82{
@@ -115,7 +112,6 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
115 * ignores packets sent using Ethernet multicast, and traffic sent to other 112 * ignores packets sent using Ethernet multicast, and traffic sent to other
116 * nodes (which can happen if interface is running in promiscuous mode). 113 * nodes (which can happen if interface is running in promiscuous mode).
117 */ 114 */
118
119static int recv_msg(struct sk_buff *buf, struct net_device *dev, 115static int recv_msg(struct sk_buff *buf, struct net_device *dev,
120 struct packet_type *pt, struct net_device *orig_dev) 116 struct packet_type *pt, struct net_device *orig_dev)
121{ 117{
@@ -140,7 +136,6 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
140/** 136/**
141 * enable_bearer - attach TIPC bearer to an Ethernet interface 137 * enable_bearer - attach TIPC bearer to an Ethernet interface
142 */ 138 */
143
144static int enable_bearer(struct tipc_bearer *tb_ptr) 139static int enable_bearer(struct tipc_bearer *tb_ptr)
145{ 140{
146 struct net_device *dev = NULL; 141 struct net_device *dev = NULL;
@@ -151,7 +146,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
151 int pending_dev = 0; 146 int pending_dev = 0;
152 147
153 /* Find unused Ethernet bearer structure */ 148 /* Find unused Ethernet bearer structure */
154
155 while (eb_ptr->dev) { 149 while (eb_ptr->dev) {
156 if (!eb_ptr->bearer) 150 if (!eb_ptr->bearer)
157 pending_dev++; 151 pending_dev++;
@@ -160,7 +154,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
160 } 154 }
161 155
162 /* Find device with specified name */ 156 /* Find device with specified name */
163
164 read_lock(&dev_base_lock); 157 read_lock(&dev_base_lock);
165 for_each_netdev(&init_net, pdev) { 158 for_each_netdev(&init_net, pdev) {
166 if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) { 159 if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
@@ -174,7 +167,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
174 return -ENODEV; 167 return -ENODEV;
175 168
176 /* Create Ethernet bearer for device */ 169 /* Create Ethernet bearer for device */
177
178 eb_ptr->dev = dev; 170 eb_ptr->dev = dev;
179 eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC); 171 eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
180 eb_ptr->tipc_packet_type.dev = dev; 172 eb_ptr->tipc_packet_type.dev = dev;
@@ -184,7 +176,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
184 dev_add_pack(&eb_ptr->tipc_packet_type); 176 dev_add_pack(&eb_ptr->tipc_packet_type);
185 177
186 /* Associate TIPC bearer with Ethernet bearer */ 178 /* Associate TIPC bearer with Ethernet bearer */
187
188 eb_ptr->bearer = tb_ptr; 179 eb_ptr->bearer = tb_ptr;
189 tb_ptr->usr_handle = (void *)eb_ptr; 180 tb_ptr->usr_handle = (void *)eb_ptr;
190 tb_ptr->mtu = dev->mtu; 181 tb_ptr->mtu = dev->mtu;
@@ -198,7 +189,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
198 * 189 *
199 * This routine must be invoked from a work queue because it can sleep. 190 * This routine must be invoked from a work queue because it can sleep.
200 */ 191 */
201
202static void cleanup_bearer(struct work_struct *work) 192static void cleanup_bearer(struct work_struct *work)
203{ 193{
204 struct eth_bearer *eb_ptr = 194 struct eth_bearer *eb_ptr =
@@ -216,7 +206,6 @@ static void cleanup_bearer(struct work_struct *work)
216 * then get worker thread to complete bearer cleanup. (Can't do cleanup 206 * then get worker thread to complete bearer cleanup. (Can't do cleanup
217 * here because cleanup code needs to sleep and caller holds spinlocks.) 207 * here because cleanup code needs to sleep and caller holds spinlocks.)
218 */ 208 */
219
220static void disable_bearer(struct tipc_bearer *tb_ptr) 209static void disable_bearer(struct tipc_bearer *tb_ptr)
221{ 210{
222 struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle; 211 struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
@@ -232,7 +221,6 @@ static void disable_bearer(struct tipc_bearer *tb_ptr)
232 * Change the state of the Ethernet bearer (if any) associated with the 221 * Change the state of the Ethernet bearer (if any) associated with the
233 * specified device. 222 * specified device.
234 */ 223 */
235
236static int recv_notification(struct notifier_block *nb, unsigned long evt, 224static int recv_notification(struct notifier_block *nb, unsigned long evt,
237 void *dv) 225 void *dv)
238{ 226{
@@ -281,7 +269,6 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
281/** 269/**
282 * eth_addr2str - convert Ethernet address to string 270 * eth_addr2str - convert Ethernet address to string
283 */ 271 */
284
285static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) 272static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
286{ 273{
287 if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ 274 if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
@@ -294,7 +281,6 @@ static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
294/** 281/**
295 * eth_str2addr - convert string to Ethernet address 282 * eth_str2addr - convert string to Ethernet address
296 */ 283 */
297
298static int eth_str2addr(struct tipc_media_addr *a, char *str_buf) 284static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
299{ 285{
300 char mac[ETH_ALEN]; 286 char mac[ETH_ALEN];
@@ -314,7 +300,6 @@ static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
314/** 300/**
315 * eth_str2addr - convert Ethernet address format to message header format 301 * eth_str2addr - convert Ethernet address format to message header format
316 */ 302 */
317
318static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area) 303static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
319{ 304{
320 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); 305 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
@@ -326,7 +311,6 @@ static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
326/** 311/**
327 * eth_str2addr - convert message header address format to Ethernet format 312 * eth_str2addr - convert message header address format to Ethernet format
328 */ 313 */
329
330static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area) 314static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
331{ 315{
332 if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH) 316 if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
@@ -339,7 +323,6 @@ static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
339/* 323/*
340 * Ethernet media registration info 324 * Ethernet media registration info
341 */ 325 */
342
343static struct tipc_media eth_media_info = { 326static struct tipc_media eth_media_info = {
344 .send_msg = send_msg, 327 .send_msg = send_msg,
345 .enable_bearer = enable_bearer, 328 .enable_bearer = enable_bearer,
@@ -363,7 +346,6 @@ static struct tipc_media eth_media_info = {
363 * Register Ethernet media type with TIPC bearer code. Also register 346 * Register Ethernet media type with TIPC bearer code. Also register
364 * with OS for notifications about device state changes. 347 * with OS for notifications about device state changes.
365 */ 348 */
366
367int tipc_eth_media_start(void) 349int tipc_eth_media_start(void)
368{ 350{
369 int res; 351 int res;
@@ -386,7 +368,6 @@ int tipc_eth_media_start(void)
386/** 368/**
387 * tipc_eth_media_stop - deactivate Ethernet bearer support 369 * tipc_eth_media_stop - deactivate Ethernet bearer support
388 */ 370 */
389
390void tipc_eth_media_stop(void) 371void tipc_eth_media_stop(void)
391{ 372{
392 if (!eth_started) 373 if (!eth_started)
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 274c98e164b7..9c6f22ff1c6d 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -129,4 +129,3 @@ void tipc_handler_stop(void)
129 129
130 kmem_cache_destroy(tipc_queue_item_cache); 130 kmem_cache_destroy(tipc_queue_item_cache);
131} 131}
132
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ac1832a66f8a..7a614f43549d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -45,13 +45,11 @@
45/* 45/*
46 * Out-of-range value for link session numbers 46 * Out-of-range value for link session numbers
47 */ 47 */
48
49#define INVALID_SESSION 0x10000 48#define INVALID_SESSION 0x10000
50 49
51/* 50/*
52 * Link state events: 51 * Link state events:
53 */ 52 */
54
55#define STARTING_EVT 856384768 /* link processing trigger */ 53#define STARTING_EVT 856384768 /* link processing trigger */
56#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 54#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
57#define TIMEOUT_EVT 560817u /* link timer expired */ 55#define TIMEOUT_EVT 560817u /* link timer expired */
@@ -67,7 +65,6 @@
67/* 65/*
68 * State value stored in 'exp_msg_count' 66 * State value stored in 'exp_msg_count'
69 */ 67 */
70
71#define START_CHANGEOVER 100000u 68#define START_CHANGEOVER 100000u
72 69
73/** 70/**
@@ -77,7 +74,6 @@
77 * @addr_peer: network address of node at far end 74 * @addr_peer: network address of node at far end
78 * @if_peer: name of interface at far end 75 * @if_peer: name of interface at far end
79 */ 76 */
80
81struct tipc_link_name { 77struct tipc_link_name {
82 u32 addr_local; 78 u32 addr_local;
83 char if_local[TIPC_MAX_IF_NAME]; 79 char if_local[TIPC_MAX_IF_NAME];
@@ -105,7 +101,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
105/* 101/*
106 * Simple link routines 102 * Simple link routines
107 */ 103 */
108
109static unsigned int align(unsigned int i) 104static unsigned int align(unsigned int i)
110{ 105{
111 return (i + 3) & ~3u; 106 return (i + 3) & ~3u;
@@ -143,7 +138,6 @@ static u32 link_last_sent(struct tipc_link *l_ptr)
143/* 138/*
144 * Simple non-static link routines (i.e. referenced outside this file) 139 * Simple non-static link routines (i.e. referenced outside this file)
145 */ 140 */
146
147int tipc_link_is_up(struct tipc_link *l_ptr) 141int tipc_link_is_up(struct tipc_link *l_ptr)
148{ 142{
149 if (!l_ptr) 143 if (!l_ptr)
@@ -164,7 +158,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
164 * 158 *
165 * Returns 1 if link name is valid, otherwise 0. 159 * Returns 1 if link name is valid, otherwise 0.
166 */ 160 */
167
168static int link_name_validate(const char *name, 161static int link_name_validate(const char *name,
169 struct tipc_link_name *name_parts) 162 struct tipc_link_name *name_parts)
170{ 163{
@@ -180,7 +173,6 @@ static int link_name_validate(const char *name,
180 u32 if_peer_len; 173 u32 if_peer_len;
181 174
182 /* copy link name & ensure length is OK */ 175 /* copy link name & ensure length is OK */
183
184 name_copy[TIPC_MAX_LINK_NAME - 1] = 0; 176 name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
185 /* need above in case non-Posix strncpy() doesn't pad with nulls */ 177 /* need above in case non-Posix strncpy() doesn't pad with nulls */
186 strncpy(name_copy, name, TIPC_MAX_LINK_NAME); 178 strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
@@ -188,7 +180,6 @@ static int link_name_validate(const char *name,
188 return 0; 180 return 0;
189 181
190 /* ensure all component parts of link name are present */ 182 /* ensure all component parts of link name are present */
191
192 addr_local = name_copy; 183 addr_local = name_copy;
193 if_local = strchr(addr_local, ':'); 184 if_local = strchr(addr_local, ':');
194 if (if_local == NULL) 185 if (if_local == NULL)
@@ -206,7 +197,6 @@ static int link_name_validate(const char *name,
206 if_peer_len = strlen(if_peer) + 1; 197 if_peer_len = strlen(if_peer) + 1;
207 198
208 /* validate component parts of link name */ 199 /* validate component parts of link name */
209
210 if ((sscanf(addr_local, "%u.%u.%u%c", 200 if ((sscanf(addr_local, "%u.%u.%u%c",
211 &z_local, &c_local, &n_local, &dummy) != 3) || 201 &z_local, &c_local, &n_local, &dummy) != 3) ||
212 (sscanf(addr_peer, "%u.%u.%u%c", 202 (sscanf(addr_peer, "%u.%u.%u%c",
@@ -220,7 +210,6 @@ static int link_name_validate(const char *name,
220 return 0; 210 return 0;
221 211
222 /* return link name components, if necessary */ 212 /* return link name components, if necessary */
223
224 if (name_parts) { 213 if (name_parts) {
225 name_parts->addr_local = tipc_addr(z_local, c_local, n_local); 214 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
226 strcpy(name_parts->if_local, if_local); 215 strcpy(name_parts->if_local, if_local);
@@ -239,13 +228,11 @@ static int link_name_validate(const char *name,
239 * another thread because tipc_link_delete() always cancels the link timer before 228 * another thread because tipc_link_delete() always cancels the link timer before
240 * tipc_node_delete() is called.) 229 * tipc_node_delete() is called.)
241 */ 230 */
242
243static void link_timeout(struct tipc_link *l_ptr) 231static void link_timeout(struct tipc_link *l_ptr)
244{ 232{
245 tipc_node_lock(l_ptr->owner); 233 tipc_node_lock(l_ptr->owner);
246 234
247 /* update counters used in statistical profiling of send traffic */ 235 /* update counters used in statistical profiling of send traffic */
248
249 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 236 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
250 l_ptr->stats.queue_sz_counts++; 237 l_ptr->stats.queue_sz_counts++;
251 238
@@ -278,7 +265,6 @@ static void link_timeout(struct tipc_link *l_ptr)
278 } 265 }
279 266
280 /* do all other link processing performed on a periodic basis */ 267 /* do all other link processing performed on a periodic basis */
281
282 link_check_defragm_bufs(l_ptr); 268 link_check_defragm_bufs(l_ptr);
283 269
284 link_state_event(l_ptr, TIMEOUT_EVT); 270 link_state_event(l_ptr, TIMEOUT_EVT);
@@ -302,7 +288,6 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time)
302 * 288 *
303 * Returns pointer to link. 289 * Returns pointer to link.
304 */ 290 */
305
306struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 291struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
307 struct tipc_bearer *b_ptr, 292 struct tipc_bearer *b_ptr,
308 const struct tipc_media_addr *media_addr) 293 const struct tipc_media_addr *media_addr)
@@ -383,7 +368,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
383 * This routine must not grab the node lock until after link timer cancellation 368 * This routine must not grab the node lock until after link timer cancellation
384 * to avoid a potential deadlock situation. 369 * to avoid a potential deadlock situation.
385 */ 370 */
386
387void tipc_link_delete(struct tipc_link *l_ptr) 371void tipc_link_delete(struct tipc_link *l_ptr)
388{ 372{
389 if (!l_ptr) { 373 if (!l_ptr) {
@@ -419,7 +403,6 @@ static void link_start(struct tipc_link *l_ptr)
419 * Schedules port for renewed sending of messages after link congestion 403 * Schedules port for renewed sending of messages after link congestion
420 * has abated. 404 * has abated.
421 */ 405 */
422
423static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 406static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
424{ 407{
425 struct tipc_port *p_ptr; 408 struct tipc_port *p_ptr;
@@ -476,7 +459,6 @@ exit:
476 * link_release_outqueue - purge link's outbound message queue 459 * link_release_outqueue - purge link's outbound message queue
477 * @l_ptr: pointer to link 460 * @l_ptr: pointer to link
478 */ 461 */
479
480static void link_release_outqueue(struct tipc_link *l_ptr) 462static void link_release_outqueue(struct tipc_link *l_ptr)
481{ 463{
482 struct sk_buff *buf = l_ptr->first_out; 464 struct sk_buff *buf = l_ptr->first_out;
@@ -484,7 +466,7 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
484 466
485 while (buf) { 467 while (buf) {
486 next = buf->next; 468 next = buf->next;
487 buf_discard(buf); 469 kfree_skb(buf);
488 buf = next; 470 buf = next;
489 } 471 }
490 l_ptr->first_out = NULL; 472 l_ptr->first_out = NULL;
@@ -495,7 +477,6 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
495 * tipc_link_reset_fragments - purge link's inbound message fragments queue 477 * tipc_link_reset_fragments - purge link's inbound message fragments queue
496 * @l_ptr: pointer to link 478 * @l_ptr: pointer to link
497 */ 479 */
498
499void tipc_link_reset_fragments(struct tipc_link *l_ptr) 480void tipc_link_reset_fragments(struct tipc_link *l_ptr)
500{ 481{
501 struct sk_buff *buf = l_ptr->defragm_buf; 482 struct sk_buff *buf = l_ptr->defragm_buf;
@@ -503,7 +484,7 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
503 484
504 while (buf) { 485 while (buf) {
505 next = buf->next; 486 next = buf->next;
506 buf_discard(buf); 487 kfree_skb(buf);
507 buf = next; 488 buf = next;
508 } 489 }
509 l_ptr->defragm_buf = NULL; 490 l_ptr->defragm_buf = NULL;
@@ -513,7 +494,6 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
513 * tipc_link_stop - purge all inbound and outbound messages associated with link 494 * tipc_link_stop - purge all inbound and outbound messages associated with link
514 * @l_ptr: pointer to link 495 * @l_ptr: pointer to link
515 */ 496 */
516
517void tipc_link_stop(struct tipc_link *l_ptr) 497void tipc_link_stop(struct tipc_link *l_ptr)
518{ 498{
519 struct sk_buff *buf; 499 struct sk_buff *buf;
@@ -522,20 +502,20 @@ void tipc_link_stop(struct tipc_link *l_ptr)
522 buf = l_ptr->oldest_deferred_in; 502 buf = l_ptr->oldest_deferred_in;
523 while (buf) { 503 while (buf) {
524 next = buf->next; 504 next = buf->next;
525 buf_discard(buf); 505 kfree_skb(buf);
526 buf = next; 506 buf = next;
527 } 507 }
528 508
529 buf = l_ptr->first_out; 509 buf = l_ptr->first_out;
530 while (buf) { 510 while (buf) {
531 next = buf->next; 511 next = buf->next;
532 buf_discard(buf); 512 kfree_skb(buf);
533 buf = next; 513 buf = next;
534 } 514 }
535 515
536 tipc_link_reset_fragments(l_ptr); 516 tipc_link_reset_fragments(l_ptr);
537 517
538 buf_discard(l_ptr->proto_msg_queue); 518 kfree_skb(l_ptr->proto_msg_queue);
539 l_ptr->proto_msg_queue = NULL; 519 l_ptr->proto_msg_queue = NULL;
540} 520}
541 521
@@ -569,14 +549,13 @@ void tipc_link_reset(struct tipc_link *l_ptr)
569 } 549 }
570 550
571 /* Clean up all queues: */ 551 /* Clean up all queues: */
572
573 link_release_outqueue(l_ptr); 552 link_release_outqueue(l_ptr);
574 buf_discard(l_ptr->proto_msg_queue); 553 kfree_skb(l_ptr->proto_msg_queue);
575 l_ptr->proto_msg_queue = NULL; 554 l_ptr->proto_msg_queue = NULL;
576 buf = l_ptr->oldest_deferred_in; 555 buf = l_ptr->oldest_deferred_in;
577 while (buf) { 556 while (buf) {
578 struct sk_buff *next = buf->next; 557 struct sk_buff *next = buf->next;
579 buf_discard(buf); 558 kfree_skb(buf);
580 buf = next; 559 buf = next;
581 } 560 }
582 if (!list_empty(&l_ptr->waiting_ports)) 561 if (!list_empty(&l_ptr->waiting_ports))
@@ -611,8 +590,7 @@ static void link_activate(struct tipc_link *l_ptr)
611 * @l_ptr: pointer to link 590 * @l_ptr: pointer to link
612 * @event: state machine event to process 591 * @event: state machine event to process
613 */ 592 */
614 593static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
615static void link_state_event(struct tipc_link *l_ptr, unsigned event)
616{ 594{
617 struct tipc_link *other; 595 struct tipc_link *other;
618 u32 cont_intv = l_ptr->continuity_interval; 596 u32 cont_intv = l_ptr->continuity_interval;
@@ -785,7 +763,6 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned event)
785 * link_bundle_buf(): Append contents of a buffer to 763 * link_bundle_buf(): Append contents of a buffer to
786 * the tail of an existing one. 764 * the tail of an existing one.
787 */ 765 */
788
789static int link_bundle_buf(struct tipc_link *l_ptr, 766static int link_bundle_buf(struct tipc_link *l_ptr,
790 struct sk_buff *bundler, 767 struct sk_buff *bundler,
791 struct sk_buff *buf) 768 struct sk_buff *buf)
@@ -810,7 +787,7 @@ static int link_bundle_buf(struct tipc_link *l_ptr,
810 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 787 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
811 msg_set_size(bundler_msg, to_pos + size); 788 msg_set_size(bundler_msg, to_pos + size);
812 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 789 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
813 buf_discard(buf); 790 kfree_skb(buf);
814 l_ptr->stats.sent_bundled++; 791 l_ptr->stats.sent_bundled++;
815 return 1; 792 return 1;
816} 793}
@@ -860,7 +837,6 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
860 * inside TIPC when the 'fast path' in tipc_send_buf 837 * inside TIPC when the 'fast path' in tipc_send_buf
861 * has failed, and from link_send() 838 * has failed, and from link_send()
862 */ 839 */
863
864int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 840int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
865{ 841{
866 struct tipc_msg *msg = buf_msg(buf); 842 struct tipc_msg *msg = buf_msg(buf);
@@ -871,17 +847,14 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
871 u32 queue_limit = l_ptr->queue_limit[imp]; 847 u32 queue_limit = l_ptr->queue_limit[imp];
872 u32 max_packet = l_ptr->max_pkt; 848 u32 max_packet = l_ptr->max_pkt;
873 849
874 msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
875
876 /* Match msg importance against queue limits: */ 850 /* Match msg importance against queue limits: */
877
878 if (unlikely(queue_size >= queue_limit)) { 851 if (unlikely(queue_size >= queue_limit)) {
879 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 852 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
880 link_schedule_port(l_ptr, msg_origport(msg), size); 853 link_schedule_port(l_ptr, msg_origport(msg), size);
881 buf_discard(buf); 854 kfree_skb(buf);
882 return -ELINKCONG; 855 return -ELINKCONG;
883 } 856 }
884 buf_discard(buf); 857 kfree_skb(buf);
885 if (imp > CONN_MANAGER) { 858 if (imp > CONN_MANAGER) {
886 warn("Resetting link <%s>, send queue full", l_ptr->name); 859 warn("Resetting link <%s>, send queue full", l_ptr->name);
887 tipc_link_reset(l_ptr); 860 tipc_link_reset(l_ptr);
@@ -890,12 +863,10 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
890 } 863 }
891 864
892 /* Fragmentation needed ? */ 865 /* Fragmentation needed ? */
893
894 if (size > max_packet) 866 if (size > max_packet)
895 return link_send_long_buf(l_ptr, buf); 867 return link_send_long_buf(l_ptr, buf);
896 868
897 /* Packet can be queued or sent: */ 869 /* Packet can be queued or sent. */
898
899 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && 870 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
900 !link_congested(l_ptr))) { 871 !link_congested(l_ptr))) {
901 link_add_to_outqueue(l_ptr, buf, msg); 872 link_add_to_outqueue(l_ptr, buf, msg);
@@ -909,13 +880,11 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
909 } 880 }
910 return dsz; 881 return dsz;
911 } 882 }
912 /* Congestion: can message be bundled ?: */ 883 /* Congestion: can message be bundled ? */
913
914 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 884 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
915 (msg_user(msg) != MSG_FRAGMENTER)) { 885 (msg_user(msg) != MSG_FRAGMENTER)) {
916 886
917 /* Try adding message to an existing bundle */ 887 /* Try adding message to an existing bundle */
918
919 if (l_ptr->next_out && 888 if (l_ptr->next_out &&
920 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { 889 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
921 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 890 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
@@ -923,7 +892,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
923 } 892 }
924 893
925 /* Try creating a new bundle */ 894 /* Try creating a new bundle */
926
927 if (size <= max_packet * 2 / 3) { 895 if (size <= max_packet * 2 / 3) {
928 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 896 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
929 struct tipc_msg bundler_hdr; 897 struct tipc_msg bundler_hdr;
@@ -953,7 +921,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
953 * not been selected yet, and the the owner node is not locked 921 * not been selected yet, and the the owner node is not locked
954 * Called by TIPC internal users, e.g. the name distributor 922 * Called by TIPC internal users, e.g. the name distributor
955 */ 923 */
956
957int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 924int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
958{ 925{
959 struct tipc_link *l_ptr; 926 struct tipc_link *l_ptr;
@@ -968,10 +935,10 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
968 if (l_ptr) 935 if (l_ptr)
969 res = tipc_link_send_buf(l_ptr, buf); 936 res = tipc_link_send_buf(l_ptr, buf);
970 else 937 else
971 buf_discard(buf); 938 kfree_skb(buf);
972 tipc_node_unlock(n_ptr); 939 tipc_node_unlock(n_ptr);
973 } else { 940 } else {
974 buf_discard(buf); 941 kfree_skb(buf);
975 } 942 }
976 read_unlock_bh(&tipc_net_lock); 943 read_unlock_bh(&tipc_net_lock);
977 return res; 944 return res;
@@ -986,7 +953,6 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
986 * small enough not to require fragmentation. 953 * small enough not to require fragmentation.
987 * Called without any locks held. 954 * Called without any locks held.
988 */ 955 */
989
990void tipc_link_send_names(struct list_head *message_list, u32 dest) 956void tipc_link_send_names(struct list_head *message_list, u32 dest)
991{ 957{
992 struct tipc_node *n_ptr; 958 struct tipc_node *n_ptr;
@@ -1015,10 +981,9 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
1015 read_unlock_bh(&tipc_net_lock); 981 read_unlock_bh(&tipc_net_lock);
1016 982
1017 /* discard the messages if they couldn't be sent */ 983 /* discard the messages if they couldn't be sent */
1018
1019 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 984 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
1020 list_del((struct list_head *)buf); 985 list_del((struct list_head *)buf);
1021 buf_discard(buf); 986 kfree_skb(buf);
1022 } 987 }
1023} 988}
1024 989
@@ -1028,7 +993,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
1028 * inclusive total message length. Very time critical. 993 * inclusive total message length. Very time critical.
1029 * Link is locked. Returns user data length. 994 * Link is locked. Returns user data length.
1030 */ 995 */
1031
1032static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 996static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
1033 u32 *used_max_pkt) 997 u32 *used_max_pkt)
1034{ 998{
@@ -1113,7 +1077,6 @@ again:
1113 * Try building message using port's max_pkt hint. 1077 * Try building message using port's max_pkt hint.
1114 * (Must not hold any locks while building message.) 1078 * (Must not hold any locks while building message.)
1115 */ 1079 */
1116
1117 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, 1080 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1118 sender->max_pkt, !sender->user_port, &buf); 1081 sender->max_pkt, !sender->user_port, &buf);
1119 1082
@@ -1133,12 +1096,10 @@ exit:
1133 } 1096 }
1134 1097
1135 /* Exit if build request was invalid */ 1098 /* Exit if build request was invalid */
1136
1137 if (unlikely(res < 0)) 1099 if (unlikely(res < 0))
1138 goto exit; 1100 goto exit;
1139 1101
1140 /* Exit if link (or bearer) is congested */ 1102 /* Exit if link (or bearer) is congested */
1141
1142 if (link_congested(l_ptr) || 1103 if (link_congested(l_ptr) ||
1143 !list_empty(&l_ptr->b_ptr->cong_links)) { 1104 !list_empty(&l_ptr->b_ptr->cong_links)) {
1144 res = link_schedule_port(l_ptr, 1105 res = link_schedule_port(l_ptr,
@@ -1150,7 +1111,6 @@ exit:
1150 * Message size exceeds max_pkt hint; update hint, 1111 * Message size exceeds max_pkt hint; update hint,
1151 * then re-try fast path or fragment the message 1112 * then re-try fast path or fragment the message
1152 */ 1113 */
1153
1154 sender->max_pkt = l_ptr->max_pkt; 1114 sender->max_pkt = l_ptr->max_pkt;
1155 tipc_node_unlock(node); 1115 tipc_node_unlock(node);
1156 read_unlock_bh(&tipc_net_lock); 1116 read_unlock_bh(&tipc_net_lock);
@@ -1168,7 +1128,6 @@ exit:
1168 read_unlock_bh(&tipc_net_lock); 1128 read_unlock_bh(&tipc_net_lock);
1169 1129
1170 /* Couldn't find a link to the destination node */ 1130 /* Couldn't find a link to the destination node */
1171
1172 if (buf) 1131 if (buf)
1173 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1132 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1174 if (res >= 0) 1133 if (res >= 0)
@@ -1222,15 +1181,13 @@ again:
1222 sect_crs = NULL; 1181 sect_crs = NULL;
1223 curr_sect = -1; 1182 curr_sect = -1;
1224 1183
1225 /* Prepare reusable fragment header: */ 1184 /* Prepare reusable fragment header */
1226
1227 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1185 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1228 INT_H_SIZE, msg_destnode(hdr)); 1186 INT_H_SIZE, msg_destnode(hdr));
1229 msg_set_size(&fragm_hdr, max_pkt); 1187 msg_set_size(&fragm_hdr, max_pkt);
1230 msg_set_fragm_no(&fragm_hdr, 1); 1188 msg_set_fragm_no(&fragm_hdr, 1);
1231 1189
1232 /* Prepare header of first fragment: */ 1190 /* Prepare header of first fragment */
1233
1234 buf_chain = buf = tipc_buf_acquire(max_pkt); 1191 buf_chain = buf = tipc_buf_acquire(max_pkt);
1235 if (!buf) 1192 if (!buf)
1236 return -ENOMEM; 1193 return -ENOMEM;
@@ -1239,8 +1196,7 @@ again:
1239 hsz = msg_hdr_sz(hdr); 1196 hsz = msg_hdr_sz(hdr);
1240 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1197 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1241 1198
1242 /* Chop up message: */ 1199 /* Chop up message */
1243
1244 fragm_crs = INT_H_SIZE + hsz; 1200 fragm_crs = INT_H_SIZE + hsz;
1245 fragm_rest = fragm_sz - hsz; 1201 fragm_rest = fragm_sz - hsz;
1246 1202
@@ -1262,7 +1218,7 @@ again:
1262error: 1218error:
1263 for (; buf_chain; buf_chain = buf) { 1219 for (; buf_chain; buf_chain = buf) {
1264 buf = buf_chain->next; 1220 buf = buf_chain->next;
1265 buf_discard(buf_chain); 1221 kfree_skb(buf_chain);
1266 } 1222 }
1267 return -EFAULT; 1223 return -EFAULT;
1268 } 1224 }
@@ -1316,7 +1272,7 @@ error:
1316 tipc_node_unlock(node); 1272 tipc_node_unlock(node);
1317 for (; buf_chain; buf_chain = buf) { 1273 for (; buf_chain; buf_chain = buf) {
1318 buf = buf_chain->next; 1274 buf = buf_chain->next;
1319 buf_discard(buf_chain); 1275 kfree_skb(buf_chain);
1320 } 1276 }
1321 goto again; 1277 goto again;
1322 } 1278 }
@@ -1324,14 +1280,13 @@ error:
1324reject: 1280reject:
1325 for (; buf_chain; buf_chain = buf) { 1281 for (; buf_chain; buf_chain = buf) {
1326 buf = buf_chain->next; 1282 buf = buf_chain->next;
1327 buf_discard(buf_chain); 1283 kfree_skb(buf_chain);
1328 } 1284 }
1329 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, 1285 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1330 total_len, TIPC_ERR_NO_NODE); 1286 total_len, TIPC_ERR_NO_NODE);
1331 } 1287 }
1332 1288
1333 /* Append chain of fragments to send queue & send them */ 1289 /* Append chain of fragments to send queue & send them */
1334
1335 l_ptr->long_msg_seq_no++; 1290 l_ptr->long_msg_seq_no++;
1336 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1291 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1337 l_ptr->stats.sent_fragments += fragm_no; 1292 l_ptr->stats.sent_fragments += fragm_no;
@@ -1352,7 +1307,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1352 1307
1353 /* Step to position where retransmission failed, if any, */ 1308 /* Step to position where retransmission failed, if any, */
1354 /* consider that buffers may have been released in meantime */ 1309 /* consider that buffers may have been released in meantime */
1355
1356 if (r_q_size && buf) { 1310 if (r_q_size && buf) {
1357 u32 last = lesser(mod(r_q_head + r_q_size), 1311 u32 last = lesser(mod(r_q_head + r_q_size),
1358 link_last_sent(l_ptr)); 1312 link_last_sent(l_ptr));
@@ -1367,7 +1321,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1367 } 1321 }
1368 1322
1369 /* Continue retransmission now, if there is anything: */ 1323 /* Continue retransmission now, if there is anything: */
1370
1371 if (r_q_size && buf) { 1324 if (r_q_size && buf) {
1372 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1325 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1373 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1326 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
@@ -1383,14 +1336,13 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1383 } 1336 }
1384 1337
1385 /* Send deferred protocol message, if any: */ 1338 /* Send deferred protocol message, if any: */
1386
1387 buf = l_ptr->proto_msg_queue; 1339 buf = l_ptr->proto_msg_queue;
1388 if (buf) { 1340 if (buf) {
1389 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1341 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1390 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1342 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1391 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1343 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1392 l_ptr->unacked_window = 0; 1344 l_ptr->unacked_window = 0;
1393 buf_discard(buf); 1345 kfree_skb(buf);
1394 l_ptr->proto_msg_queue = NULL; 1346 l_ptr->proto_msg_queue = NULL;
1395 return 0; 1347 return 0;
1396 } else { 1348 } else {
@@ -1400,7 +1352,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1400 } 1352 }
1401 1353
1402 /* Send one deferred data message, if send window not full: */ 1354 /* Send one deferred data message, if send window not full: */
1403
1404 buf = l_ptr->next_out; 1355 buf = l_ptr->next_out;
1405 if (buf) { 1356 if (buf) {
1406 struct tipc_msg *msg = buf_msg(buf); 1357 struct tipc_msg *msg = buf_msg(buf);
@@ -1480,16 +1431,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1480 warn("Retransmission failure on link <%s>\n", l_ptr->name); 1431 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1481 1432
1482 if (l_ptr->addr) { 1433 if (l_ptr->addr) {
1483
1484 /* Handle failure on standard link */ 1434 /* Handle failure on standard link */
1485
1486 link_print(l_ptr, "Resetting link\n"); 1435 link_print(l_ptr, "Resetting link\n");
1487 tipc_link_reset(l_ptr); 1436 tipc_link_reset(l_ptr);
1488 1437
1489 } else { 1438 } else {
1490
1491 /* Handle failure on broadcast link */ 1439 /* Handle failure on broadcast link */
1492
1493 struct tipc_node *n_ptr; 1440 struct tipc_node *n_ptr;
1494 char addr_string[16]; 1441 char addr_string[16];
1495 1442
@@ -1501,13 +1448,13 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1501 tipc_node_lock(n_ptr); 1448 tipc_node_lock(n_ptr);
1502 1449
1503 tipc_addr_string_fill(addr_string, n_ptr->addr); 1450 tipc_addr_string_fill(addr_string, n_ptr->addr);
1504 info("Multicast link info for %s\n", addr_string); 1451 info("Broadcast link info for %s\n", addr_string);
1452 info("Supportable: %d, ", n_ptr->bclink.supportable);
1505 info("Supported: %d, ", n_ptr->bclink.supported); 1453 info("Supported: %d, ", n_ptr->bclink.supported);
1506 info("Acked: %u\n", n_ptr->bclink.acked); 1454 info("Acked: %u\n", n_ptr->bclink.acked);
1507 info("Last in: %u, ", n_ptr->bclink.last_in); 1455 info("Last in: %u, ", n_ptr->bclink.last_in);
1508 info("Gap after: %u, ", n_ptr->bclink.gap_after); 1456 info("Oos state: %u, ", n_ptr->bclink.oos_state);
1509 info("Gap to: %u\n", n_ptr->bclink.gap_to); 1457 info("Last sent: %u\n", n_ptr->bclink.last_sent);
1510 info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1511 1458
1512 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1459 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1513 1460
@@ -1538,7 +1485,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1538 return; 1485 return;
1539 } else { 1486 } else {
1540 /* Detect repeated retransmit failures on uncongested bearer */ 1487 /* Detect repeated retransmit failures on uncongested bearer */
1541
1542 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1488 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1543 if (++l_ptr->stale_count > 100) { 1489 if (++l_ptr->stale_count > 100) {
1544 link_retransmit_failure(l_ptr, buf); 1490 link_retransmit_failure(l_ptr, buf);
@@ -1573,7 +1519,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1573/** 1519/**
1574 * link_insert_deferred_queue - insert deferred messages back into receive chain 1520 * link_insert_deferred_queue - insert deferred messages back into receive chain
1575 */ 1521 */
1576
1577static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1522static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1578 struct sk_buff *buf) 1523 struct sk_buff *buf)
1579{ 1524{
@@ -1604,7 +1549,6 @@ static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1604 * TIPC will ignore the excess, under the assumption that it is optional info 1549 * TIPC will ignore the excess, under the assumption that it is optional info
1605 * introduced by a later release of the protocol. 1550 * introduced by a later release of the protocol.
1606 */ 1551 */
1607
1608static int link_recv_buf_validate(struct sk_buff *buf) 1552static int link_recv_buf_validate(struct sk_buff *buf)
1609{ 1553{
1610 static u32 min_data_hdr_size[8] = { 1554 static u32 min_data_hdr_size[8] = {
@@ -1650,7 +1594,6 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1650 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1594 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1651 * structure (i.e. cannot be NULL), but bearer can be inactive. 1595 * structure (i.e. cannot be NULL), but bearer can be inactive.
1652 */ 1596 */
1653
1654void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) 1597void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1655{ 1598{
1656 read_lock_bh(&tipc_net_lock); 1599 read_lock_bh(&tipc_net_lock);
@@ -1668,22 +1611,18 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1668 head = head->next; 1611 head = head->next;
1669 1612
1670 /* Ensure bearer is still enabled */ 1613 /* Ensure bearer is still enabled */
1671
1672 if (unlikely(!b_ptr->active)) 1614 if (unlikely(!b_ptr->active))
1673 goto cont; 1615 goto cont;
1674 1616
1675 /* Ensure message is well-formed */ 1617 /* Ensure message is well-formed */
1676
1677 if (unlikely(!link_recv_buf_validate(buf))) 1618 if (unlikely(!link_recv_buf_validate(buf)))
1678 goto cont; 1619 goto cont;
1679 1620
1680 /* Ensure message data is a single contiguous unit */ 1621 /* Ensure message data is a single contiguous unit */
1681 1622 if (unlikely(skb_linearize(buf)))
1682 if (unlikely(buf_linearize(buf)))
1683 goto cont; 1623 goto cont;
1684 1624
1685 /* Handle arrival of a non-unicast link message */ 1625 /* Handle arrival of a non-unicast link message */
1686
1687 msg = buf_msg(buf); 1626 msg = buf_msg(buf);
1688 1627
1689 if (unlikely(msg_non_seq(msg))) { 1628 if (unlikely(msg_non_seq(msg))) {
@@ -1695,20 +1634,17 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1695 } 1634 }
1696 1635
1697 /* Discard unicast link messages destined for another node */ 1636 /* Discard unicast link messages destined for another node */
1698
1699 if (unlikely(!msg_short(msg) && 1637 if (unlikely(!msg_short(msg) &&
1700 (msg_destnode(msg) != tipc_own_addr))) 1638 (msg_destnode(msg) != tipc_own_addr)))
1701 goto cont; 1639 goto cont;
1702 1640
1703 /* Locate neighboring node that sent message */ 1641 /* Locate neighboring node that sent message */
1704
1705 n_ptr = tipc_node_find(msg_prevnode(msg)); 1642 n_ptr = tipc_node_find(msg_prevnode(msg));
1706 if (unlikely(!n_ptr)) 1643 if (unlikely(!n_ptr))
1707 goto cont; 1644 goto cont;
1708 tipc_node_lock(n_ptr); 1645 tipc_node_lock(n_ptr);
1709 1646
1710 /* Locate unicast link endpoint that should handle message */ 1647 /* Locate unicast link endpoint that should handle message */
1711
1712 l_ptr = n_ptr->links[b_ptr->identity]; 1648 l_ptr = n_ptr->links[b_ptr->identity];
1713 if (unlikely(!l_ptr)) { 1649 if (unlikely(!l_ptr)) {
1714 tipc_node_unlock(n_ptr); 1650 tipc_node_unlock(n_ptr);
@@ -1716,7 +1652,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1716 } 1652 }
1717 1653
1718 /* Verify that communication with node is currently allowed */ 1654 /* Verify that communication with node is currently allowed */
1719
1720 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1655 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1721 msg_user(msg) == LINK_PROTOCOL && 1656 msg_user(msg) == LINK_PROTOCOL &&
1722 (msg_type(msg) == RESET_MSG || 1657 (msg_type(msg) == RESET_MSG ||
@@ -1730,13 +1665,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1730 } 1665 }
1731 1666
1732 /* Validate message sequence number info */ 1667 /* Validate message sequence number info */
1733
1734 seq_no = msg_seqno(msg); 1668 seq_no = msg_seqno(msg);
1735 ackd = msg_ack(msg); 1669 ackd = msg_ack(msg);
1736 1670
1737 /* Release acked messages */ 1671 /* Release acked messages */
1738 1672 if (n_ptr->bclink.supported)
1739 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1740 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1673 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1741 1674
1742 crs = l_ptr->first_out; 1675 crs = l_ptr->first_out;
@@ -1744,7 +1677,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1744 less_eq(buf_seqno(crs), ackd)) { 1677 less_eq(buf_seqno(crs), ackd)) {
1745 struct sk_buff *next = crs->next; 1678 struct sk_buff *next = crs->next;
1746 1679
1747 buf_discard(crs); 1680 kfree_skb(crs);
1748 crs = next; 1681 crs = next;
1749 released++; 1682 released++;
1750 } 1683 }
@@ -1754,7 +1687,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1754 } 1687 }
1755 1688
1756 /* Try sending any messages link endpoint has pending */ 1689 /* Try sending any messages link endpoint has pending */
1757
1758 if (unlikely(l_ptr->next_out)) 1690 if (unlikely(l_ptr->next_out))
1759 tipc_link_push_queue(l_ptr); 1691 tipc_link_push_queue(l_ptr);
1760 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1692 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
@@ -1765,7 +1697,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1765 } 1697 }
1766 1698
1767 /* Now (finally!) process the incoming message */ 1699 /* Now (finally!) process the incoming message */
1768
1769protocol_check: 1700protocol_check:
1770 if (likely(link_working_working(l_ptr))) { 1701 if (likely(link_working_working(l_ptr))) {
1771 if (likely(seq_no == mod(l_ptr->next_in_no))) { 1702 if (likely(seq_no == mod(l_ptr->next_in_no))) {
@@ -1773,52 +1704,56 @@ protocol_check:
1773 if (unlikely(l_ptr->oldest_deferred_in)) 1704 if (unlikely(l_ptr->oldest_deferred_in))
1774 head = link_insert_deferred_queue(l_ptr, 1705 head = link_insert_deferred_queue(l_ptr,
1775 head); 1706 head);
1776 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1777deliver: 1707deliver:
1778 if (likely(msg_isdata(msg))) { 1708 if (likely(msg_isdata(msg))) {
1779 tipc_node_unlock(n_ptr); 1709 tipc_node_unlock(n_ptr);
1780 tipc_port_recv_msg(buf); 1710 tipc_port_recv_msg(buf);
1781 continue; 1711 continue;
1712 }
1713 switch (msg_user(msg)) {
1714 int ret;
1715 case MSG_BUNDLER:
1716 l_ptr->stats.recv_bundles++;
1717 l_ptr->stats.recv_bundled +=
1718 msg_msgcnt(msg);
1719 tipc_node_unlock(n_ptr);
1720 tipc_link_recv_bundle(buf);
1721 continue;
1722 case NAME_DISTRIBUTOR:
1723 tipc_node_unlock(n_ptr);
1724 tipc_named_recv(buf);
1725 continue;
1726 case CONN_MANAGER:
1727 tipc_node_unlock(n_ptr);
1728 tipc_port_recv_proto_msg(buf);
1729 continue;
1730 case MSG_FRAGMENTER:
1731 l_ptr->stats.recv_fragments++;
1732 ret = tipc_link_recv_fragment(
1733 &l_ptr->defragm_buf,
1734 &buf, &msg);
1735 if (ret == 1) {
1736 l_ptr->stats.recv_fragmented++;
1737 goto deliver;
1782 } 1738 }
1783 switch (msg_user(msg)) { 1739 if (ret == -1)
1784 case MSG_BUNDLER: 1740 l_ptr->next_in_no--;
1785 l_ptr->stats.recv_bundles++; 1741 break;
1786 l_ptr->stats.recv_bundled += 1742 case CHANGEOVER_PROTOCOL:
1787 msg_msgcnt(msg); 1743 type = msg_type(msg);
1788 tipc_node_unlock(n_ptr); 1744 if (link_recv_changeover_msg(&l_ptr,
1789 tipc_link_recv_bundle(buf); 1745 &buf)) {
1790 continue; 1746 msg = buf_msg(buf);
1791 case NAME_DISTRIBUTOR: 1747 seq_no = msg_seqno(msg);
1792 tipc_node_unlock(n_ptr); 1748 if (type == ORIGINAL_MSG)
1793 tipc_named_recv(buf);
1794 continue;
1795 case CONN_MANAGER:
1796 tipc_node_unlock(n_ptr);
1797 tipc_port_recv_proto_msg(buf);
1798 continue;
1799 case MSG_FRAGMENTER:
1800 l_ptr->stats.recv_fragments++;
1801 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1802 &buf, &msg)) {
1803 l_ptr->stats.recv_fragmented++;
1804 goto deliver; 1749 goto deliver;
1805 } 1750 goto protocol_check;
1806 break;
1807 case CHANGEOVER_PROTOCOL:
1808 type = msg_type(msg);
1809 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1810 msg = buf_msg(buf);
1811 seq_no = msg_seqno(msg);
1812 if (type == ORIGINAL_MSG)
1813 goto deliver;
1814 goto protocol_check;
1815 }
1816 break;
1817 default:
1818 buf_discard(buf);
1819 buf = NULL;
1820 break;
1821 } 1751 }
1752 break;
1753 default:
1754 kfree_skb(buf);
1755 buf = NULL;
1756 break;
1822 } 1757 }
1823 tipc_node_unlock(n_ptr); 1758 tipc_node_unlock(n_ptr);
1824 tipc_net_route_msg(buf); 1759 tipc_net_route_msg(buf);
@@ -1847,23 +1782,21 @@ deliver:
1847 } 1782 }
1848 tipc_node_unlock(n_ptr); 1783 tipc_node_unlock(n_ptr);
1849cont: 1784cont:
1850 buf_discard(buf); 1785 kfree_skb(buf);
1851 } 1786 }
1852 read_unlock_bh(&tipc_net_lock); 1787 read_unlock_bh(&tipc_net_lock);
1853} 1788}
1854 1789
1855/* 1790/*
1856 * link_defer_buf(): Sort a received out-of-sequence packet 1791 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1857 * into the deferred reception queue. 1792 *
1858 * Returns the increase of the queue length,i.e. 0 or 1 1793 * Returns increase in queue length (i.e. 0 or 1)
1859 */ 1794 */
1860 1795u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1861u32 tipc_link_defer_pkt(struct sk_buff **head,
1862 struct sk_buff **tail,
1863 struct sk_buff *buf) 1796 struct sk_buff *buf)
1864{ 1797{
1865 struct sk_buff *prev = NULL; 1798 struct sk_buff *queue_buf;
1866 struct sk_buff *crs = *head; 1799 struct sk_buff **prev;
1867 u32 seq_no = buf_seqno(buf); 1800 u32 seq_no = buf_seqno(buf);
1868 1801
1869 buf->next = NULL; 1802 buf->next = NULL;
@@ -1881,34 +1814,32 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
1881 return 1; 1814 return 1;
1882 } 1815 }
1883 1816
1884 /* Scan through queue and sort it in */ 1817 /* Locate insertion point in queue, then insert; discard if duplicate */
1885 do { 1818 prev = head;
1886 struct tipc_msg *msg = buf_msg(crs); 1819 queue_buf = *head;
1820 for (;;) {
1821 u32 curr_seqno = buf_seqno(queue_buf);
1887 1822
1888 if (less(seq_no, msg_seqno(msg))) { 1823 if (seq_no == curr_seqno) {
1889 buf->next = crs; 1824 kfree_skb(buf);
1890 if (prev) 1825 return 0;
1891 prev->next = buf;
1892 else
1893 *head = buf;
1894 return 1;
1895 } 1826 }
1896 if (seq_no == msg_seqno(msg)) 1827
1828 if (less(seq_no, curr_seqno))
1897 break; 1829 break;
1898 prev = crs;
1899 crs = crs->next;
1900 } while (crs);
1901 1830
1902 /* Message is a duplicate of an existing message */ 1831 prev = &queue_buf->next;
1832 queue_buf = queue_buf->next;
1833 }
1903 1834
1904 buf_discard(buf); 1835 buf->next = queue_buf;
1905 return 0; 1836 *prev = buf;
1837 return 1;
1906} 1838}
1907 1839
1908/** 1840/*
1909 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1841 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1910 */ 1842 */
1911
1912static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1843static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1913 struct sk_buff *buf) 1844 struct sk_buff *buf)
1914{ 1845{
@@ -1920,17 +1851,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1920 } 1851 }
1921 1852
1922 /* Record OOS packet arrival (force mismatch on next timeout) */ 1853 /* Record OOS packet arrival (force mismatch on next timeout) */
1923
1924 l_ptr->checkpoint--; 1854 l_ptr->checkpoint--;
1925 1855
1926 /* 1856 /*
1927 * Discard packet if a duplicate; otherwise add it to deferred queue 1857 * Discard packet if a duplicate; otherwise add it to deferred queue
1928 * and notify peer of gap as per protocol specification 1858 * and notify peer of gap as per protocol specification
1929 */ 1859 */
1930
1931 if (less(seq_no, mod(l_ptr->next_in_no))) { 1860 if (less(seq_no, mod(l_ptr->next_in_no))) {
1932 l_ptr->stats.duplicates++; 1861 l_ptr->stats.duplicates++;
1933 buf_discard(buf); 1862 kfree_skb(buf);
1934 return; 1863 return;
1935 } 1864 }
1936 1865
@@ -1956,17 +1885,23 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1956 u32 msg_size = sizeof(l_ptr->proto_msg); 1885 u32 msg_size = sizeof(l_ptr->proto_msg);
1957 int r_flag; 1886 int r_flag;
1958 1887
1888 /* Discard any previous message that was deferred due to congestion */
1889 if (l_ptr->proto_msg_queue) {
1890 kfree_skb(l_ptr->proto_msg_queue);
1891 l_ptr->proto_msg_queue = NULL;
1892 }
1893
1959 if (link_blocked(l_ptr)) 1894 if (link_blocked(l_ptr))
1960 return; 1895 return;
1961 1896
1962 /* Abort non-RESET send if communication with node is prohibited */ 1897 /* Abort non-RESET send if communication with node is prohibited */
1963
1964 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1898 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1965 return; 1899 return;
1966 1900
1901 /* Create protocol message with "out-of-sequence" sequence number */
1967 msg_set_type(msg, msg_typ); 1902 msg_set_type(msg, msg_typ);
1968 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1903 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1969 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 1904 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1970 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1905 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1971 1906
1972 if (msg_typ == STATE_MSG) { 1907 if (msg_typ == STATE_MSG) {
@@ -2020,44 +1955,33 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
2020 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1955 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
2021 msg_set_redundant_link(msg, r_flag); 1956 msg_set_redundant_link(msg, r_flag);
2022 msg_set_linkprio(msg, l_ptr->priority); 1957 msg_set_linkprio(msg, l_ptr->priority);
2023 1958 msg_set_size(msg, msg_size);
2024 /* Ensure sequence number will not fit : */
2025 1959
2026 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1960 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2027 1961
2028 /* Congestion? */
2029
2030 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2031 if (!l_ptr->proto_msg_queue) {
2032 l_ptr->proto_msg_queue =
2033 tipc_buf_acquire(sizeof(l_ptr->proto_msg));
2034 }
2035 buf = l_ptr->proto_msg_queue;
2036 if (!buf)
2037 return;
2038 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2039 return;
2040 }
2041
2042 /* Message can be sent */
2043
2044 buf = tipc_buf_acquire(msg_size); 1962 buf = tipc_buf_acquire(msg_size);
2045 if (!buf) 1963 if (!buf)
2046 return; 1964 return;
2047 1965
2048 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1966 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2049 msg_set_size(buf_msg(buf), msg_size);
2050 1967
2051 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1968 /* Defer message if bearer is already congested */
2052 l_ptr->unacked_window = 0; 1969 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2053 buf_discard(buf); 1970 l_ptr->proto_msg_queue = buf;
1971 return;
1972 }
1973
1974 /* Defer message if attempting to send results in bearer congestion */
1975 if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1976 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1977 l_ptr->proto_msg_queue = buf;
1978 l_ptr->stats.bearer_congs++;
2054 return; 1979 return;
2055 } 1980 }
2056 1981
2057 /* New congestion */ 1982 /* Discard message if it was sent successfully */
2058 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1983 l_ptr->unacked_window = 0;
2059 l_ptr->proto_msg_queue = buf; 1984 kfree_skb(buf);
2060 l_ptr->stats.bearer_congs++;
2061} 1985}
2062 1986
2063/* 1987/*
@@ -2065,7 +1989,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
2065 * Note that network plane id propagates through the network, and may 1989 * Note that network plane id propagates through the network, and may
2066 * change at any time. The node with lowest address rules 1990 * change at any time. The node with lowest address rules
2067 */ 1991 */
2068
2069static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) 1992static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2070{ 1993{
2071 u32 rec_gap = 0; 1994 u32 rec_gap = 0;
@@ -2078,7 +2001,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2078 goto exit; 2001 goto exit;
2079 2002
2080 /* record unnumbered packet arrival (force mismatch on next timeout) */ 2003 /* record unnumbered packet arrival (force mismatch on next timeout) */
2081
2082 l_ptr->checkpoint--; 2004 l_ptr->checkpoint--;
2083 2005
2084 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 2006 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
@@ -2105,10 +2027,11 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2105 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 2027 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2106 } 2028 }
2107 2029
2030 link_state_event(l_ptr, RESET_MSG);
2031
2108 /* fall thru' */ 2032 /* fall thru' */
2109 case ACTIVATE_MSG: 2033 case ACTIVATE_MSG:
2110 /* Update link settings according other endpoint's values */ 2034 /* Update link settings according other endpoint's values */
2111
2112 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 2035 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2113 2036
2114 msg_tol = msg_link_tolerance(msg); 2037 msg_tol = msg_link_tolerance(msg);
@@ -2127,16 +2050,21 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2127 } else { 2050 } else {
2128 l_ptr->max_pkt = l_ptr->max_pkt_target; 2051 l_ptr->max_pkt = l_ptr->max_pkt_target;
2129 } 2052 }
2130 l_ptr->owner->bclink.supported = (max_pkt_info != 0); 2053 l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
2131 2054
2132 link_state_event(l_ptr, msg_type(msg)); 2055 /* Synchronize broadcast link info, if not done previously */
2056 if (!tipc_node_is_up(l_ptr->owner)) {
2057 l_ptr->owner->bclink.last_sent =
2058 l_ptr->owner->bclink.last_in =
2059 msg_last_bcast(msg);
2060 l_ptr->owner->bclink.oos_state = 0;
2061 }
2133 2062
2134 l_ptr->peer_session = msg_session(msg); 2063 l_ptr->peer_session = msg_session(msg);
2135 l_ptr->peer_bearer_id = msg_bearer_id(msg); 2064 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2136 2065
2137 /* Synchronize broadcast sequence numbers */ 2066 if (msg_type(msg) == ACTIVATE_MSG)
2138 if (!tipc_node_redundant_links(l_ptr->owner)) 2067 link_state_event(l_ptr, ACTIVATE_MSG);
2139 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2140 break; 2068 break;
2141 case STATE_MSG: 2069 case STATE_MSG:
2142 2070
@@ -2176,8 +2104,9 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2176 } 2104 }
2177 2105
2178 /* Protocol message before retransmits, reduce loss risk */ 2106 /* Protocol message before retransmits, reduce loss risk */
2179 2107 if (l_ptr->owner->bclink.supported)
2180 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg)); 2108 tipc_bclink_update_link_state(l_ptr->owner,
2109 msg_last_bcast(msg));
2181 2110
2182 if (rec_gap || (msg_probe(msg))) { 2111 if (rec_gap || (msg_probe(msg))) {
2183 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2112 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
@@ -2191,7 +2120,7 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2191 break; 2120 break;
2192 } 2121 }
2193exit: 2122exit:
2194 buf_discard(buf); 2123 kfree_skb(buf);
2195} 2124}
2196 2125
2197 2126
@@ -2232,7 +2161,6 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr,
2232 * changeover(): Send whole message queue via the remaining link 2161 * changeover(): Send whole message queue via the remaining link
2233 * Owner node is locked. 2162 * Owner node is locked.
2234 */ 2163 */
2235
2236void tipc_link_changeover(struct tipc_link *l_ptr) 2164void tipc_link_changeover(struct tipc_link *l_ptr)
2237{ 2165{
2238 u32 msgcount = l_ptr->out_queue_size; 2166 u32 msgcount = l_ptr->out_queue_size;
@@ -2332,8 +2260,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2332 } 2260 }
2333} 2261}
2334 2262
2335
2336
2337/** 2263/**
2338 * buf_extract - extracts embedded TIPC message from another message 2264 * buf_extract - extracts embedded TIPC message from another message
2339 * @skb: encapsulating message buffer 2265 * @skb: encapsulating message buffer
@@ -2342,7 +2268,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2342 * Returns a new message buffer containing an embedded message. The 2268 * Returns a new message buffer containing an embedded message. The
2343 * encapsulating message itself is left unchanged. 2269 * encapsulating message itself is left unchanged.
2344 */ 2270 */
2345
2346static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2271static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2347{ 2272{
2348 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2273 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
@@ -2359,7 +2284,6 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2359 * link_recv_changeover_msg(): Receive tunneled packet sent 2284 * link_recv_changeover_msg(): Receive tunneled packet sent
2360 * via other link. Node is locked. Return extracted buffer. 2285 * via other link. Node is locked. Return extracted buffer.
2361 */ 2286 */
2362
2363static int link_recv_changeover_msg(struct tipc_link **l_ptr, 2287static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2364 struct sk_buff **buf) 2288 struct sk_buff **buf)
2365{ 2289{
@@ -2389,12 +2313,11 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2389 warn("Link changeover error, duplicate msg dropped\n"); 2313 warn("Link changeover error, duplicate msg dropped\n");
2390 goto exit; 2314 goto exit;
2391 } 2315 }
2392 buf_discard(tunnel_buf); 2316 kfree_skb(tunnel_buf);
2393 return 1; 2317 return 1;
2394 } 2318 }
2395 2319
2396 /* First original message ?: */ 2320 /* First original message ?: */
2397
2398 if (tipc_link_is_up(dest_link)) { 2321 if (tipc_link_is_up(dest_link)) {
2399 info("Resetting link <%s>, changeover initiated by peer\n", 2322 info("Resetting link <%s>, changeover initiated by peer\n",
2400 dest_link->name); 2323 dest_link->name);
@@ -2409,7 +2332,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2409 } 2332 }
2410 2333
2411 /* Receive original message */ 2334 /* Receive original message */
2412
2413 if (dest_link->exp_msg_count == 0) { 2335 if (dest_link->exp_msg_count == 0) {
2414 warn("Link switchover error, " 2336 warn("Link switchover error, "
2415 "got too many tunnelled messages\n"); 2337 "got too many tunnelled messages\n");
@@ -2421,7 +2343,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2421 } else { 2343 } else {
2422 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2344 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2423 if (*buf != NULL) { 2345 if (*buf != NULL) {
2424 buf_discard(tunnel_buf); 2346 kfree_skb(tunnel_buf);
2425 return 1; 2347 return 1;
2426 } else { 2348 } else {
2427 warn("Link changeover error, original msg dropped\n"); 2349 warn("Link changeover error, original msg dropped\n");
@@ -2429,7 +2351,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2429 } 2351 }
2430exit: 2352exit:
2431 *buf = NULL; 2353 *buf = NULL;
2432 buf_discard(tunnel_buf); 2354 kfree_skb(tunnel_buf);
2433 return 0; 2355 return 0;
2434} 2356}
2435 2357
@@ -2451,14 +2373,13 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2451 pos += align(msg_size(buf_msg(obuf))); 2373 pos += align(msg_size(buf_msg(obuf)));
2452 tipc_net_route_msg(obuf); 2374 tipc_net_route_msg(obuf);
2453 } 2375 }
2454 buf_discard(buf); 2376 kfree_skb(buf);
2455} 2377}
2456 2378
2457/* 2379/*
2458 * Fragmentation/defragmentation: 2380 * Fragmentation/defragmentation:
2459 */ 2381 */
2460 2382
2461
2462/* 2383/*
2463 * link_send_long_buf: Entry for buffers needing fragmentation. 2384 * link_send_long_buf: Entry for buffers needing fragmentation.
2464 * The buffer is complete, inclusive total message length. 2385 * The buffer is complete, inclusive total message length.
@@ -2485,12 +2406,10 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2485 destaddr = msg_destnode(inmsg); 2406 destaddr = msg_destnode(inmsg);
2486 2407
2487 /* Prepare reusable fragment header: */ 2408 /* Prepare reusable fragment header: */
2488
2489 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2409 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2490 INT_H_SIZE, destaddr); 2410 INT_H_SIZE, destaddr);
2491 2411
2492 /* Chop up message: */ 2412 /* Chop up message: */
2493
2494 while (rest > 0) { 2413 while (rest > 0) {
2495 struct sk_buff *fragm; 2414 struct sk_buff *fragm;
2496 2415
@@ -2500,11 +2419,11 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2500 } 2419 }
2501 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2420 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2502 if (fragm == NULL) { 2421 if (fragm == NULL) {
2503 buf_discard(buf); 2422 kfree_skb(buf);
2504 while (buf_chain) { 2423 while (buf_chain) {
2505 buf = buf_chain; 2424 buf = buf_chain;
2506 buf_chain = buf_chain->next; 2425 buf_chain = buf_chain->next;
2507 buf_discard(buf); 2426 kfree_skb(buf);
2508 } 2427 }
2509 return -ENOMEM; 2428 return -ENOMEM;
2510 } 2429 }
@@ -2521,10 +2440,9 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2521 crs += fragm_sz; 2440 crs += fragm_sz;
2522 msg_set_type(&fragm_hdr, FRAGMENT); 2441 msg_set_type(&fragm_hdr, FRAGMENT);
2523 } 2442 }
2524 buf_discard(buf); 2443 kfree_skb(buf);
2525 2444
2526 /* Append chain of fragments to send queue & send them */ 2445 /* Append chain of fragments to send queue & send them */
2527
2528 l_ptr->long_msg_seq_no++; 2446 l_ptr->long_msg_seq_no++;
2529 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2447 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2530 l_ptr->stats.sent_fragments += fragm_no; 2448 l_ptr->stats.sent_fragments += fragm_no;
@@ -2540,7 +2458,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2540 * help storing these values in unused, available fields in the 2458 * help storing these values in unused, available fields in the
2541 * pending message. This makes dynamic memory allocation unnecessary. 2459 * pending message. This makes dynamic memory allocation unnecessary.
2542 */ 2460 */
2543
2544static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) 2461static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2545{ 2462{
2546 msg_set_seqno(buf_msg(buf), seqno); 2463 msg_set_seqno(buf_msg(buf), seqno);
@@ -2592,7 +2509,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2592 *fb = NULL; 2509 *fb = NULL;
2593 2510
2594 /* Is there an incomplete message waiting for this fragment? */ 2511 /* Is there an incomplete message waiting for this fragment? */
2595
2596 while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) || 2512 while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
2597 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) { 2513 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2598 prev = pbuf; 2514 prev = pbuf;
@@ -2608,7 +2524,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2608 if (msg_type(imsg) == TIPC_MCAST_MSG) 2524 if (msg_type(imsg) == TIPC_MCAST_MSG)
2609 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE; 2525 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2610 if (msg_size(imsg) > max) { 2526 if (msg_size(imsg) > max) {
2611 buf_discard(fbuf); 2527 kfree_skb(fbuf);
2612 return 0; 2528 return 0;
2613 } 2529 }
2614 pbuf = tipc_buf_acquire(msg_size(imsg)); 2530 pbuf = tipc_buf_acquire(msg_size(imsg));
@@ -2618,14 +2534,15 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2618 skb_copy_to_linear_data(pbuf, imsg, 2534 skb_copy_to_linear_data(pbuf, imsg,
2619 msg_data_sz(fragm)); 2535 msg_data_sz(fragm));
2620 /* Prepare buffer for subsequent fragments. */ 2536 /* Prepare buffer for subsequent fragments. */
2621
2622 set_long_msg_seqno(pbuf, long_msg_seq_no); 2537 set_long_msg_seqno(pbuf, long_msg_seq_no);
2623 set_fragm_size(pbuf, fragm_sz); 2538 set_fragm_size(pbuf, fragm_sz);
2624 set_expected_frags(pbuf, exp_fragm_cnt - 1); 2539 set_expected_frags(pbuf, exp_fragm_cnt - 1);
2625 } else { 2540 } else {
2626 warn("Link unable to reassemble fragmented message\n"); 2541 dbg("Link unable to reassemble fragmented message\n");
2542 kfree_skb(fbuf);
2543 return -1;
2627 } 2544 }
2628 buf_discard(fbuf); 2545 kfree_skb(fbuf);
2629 return 0; 2546 return 0;
2630 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) { 2547 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2631 u32 dsz = msg_data_sz(fragm); 2548 u32 dsz = msg_data_sz(fragm);
@@ -2634,10 +2551,9 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2634 u32 exp_frags = get_expected_frags(pbuf) - 1; 2551 u32 exp_frags = get_expected_frags(pbuf) - 1;
2635 skb_copy_to_linear_data_offset(pbuf, crs, 2552 skb_copy_to_linear_data_offset(pbuf, crs,
2636 msg_data(fragm), dsz); 2553 msg_data(fragm), dsz);
2637 buf_discard(fbuf); 2554 kfree_skb(fbuf);
2638 2555
2639 /* Is message complete? */ 2556 /* Is message complete? */
2640
2641 if (exp_frags == 0) { 2557 if (exp_frags == 0) {
2642 if (prev) 2558 if (prev)
2643 prev->next = pbuf->next; 2559 prev->next = pbuf->next;
@@ -2651,7 +2567,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2651 set_expected_frags(pbuf, exp_frags); 2567 set_expected_frags(pbuf, exp_frags);
2652 return 0; 2568 return 0;
2653 } 2569 }
2654 buf_discard(fbuf); 2570 kfree_skb(fbuf);
2655 return 0; 2571 return 0;
2656} 2572}
2657 2573
@@ -2659,7 +2575,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2659 * link_check_defragm_bufs - flush stale incoming message fragments 2575 * link_check_defragm_bufs - flush stale incoming message fragments
2660 * @l_ptr: pointer to link 2576 * @l_ptr: pointer to link
2661 */ 2577 */
2662
2663static void link_check_defragm_bufs(struct tipc_link *l_ptr) 2578static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2664{ 2579{
2665 struct sk_buff *prev = NULL; 2580 struct sk_buff *prev = NULL;
@@ -2682,14 +2597,12 @@ static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2682 prev->next = buf->next; 2597 prev->next = buf->next;
2683 else 2598 else
2684 l_ptr->defragm_buf = buf->next; 2599 l_ptr->defragm_buf = buf->next;
2685 buf_discard(buf); 2600 kfree_skb(buf);
2686 } 2601 }
2687 buf = next; 2602 buf = next;
2688 } 2603 }
2689} 2604}
2690 2605
2691
2692
2693static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2606static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2694{ 2607{
2695 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2608 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2701,7 +2614,6 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2701 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2614 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2702} 2615}
2703 2616
2704
2705void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2617void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2706{ 2618{
2707 /* Data messages from this node, inclusive FIRST_FRAGM */ 2619 /* Data messages from this node, inclusive FIRST_FRAGM */
@@ -2731,7 +2643,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2731 * 2643 *
2732 * Returns pointer to link (or 0 if invalid link name). 2644 * Returns pointer to link (or 0 if invalid link name).
2733 */ 2645 */
2734
2735static struct tipc_link *link_find_link(const char *name, 2646static struct tipc_link *link_find_link(const char *name,
2736 struct tipc_node **node) 2647 struct tipc_node **node)
2737{ 2648{
@@ -2765,7 +2676,6 @@ static struct tipc_link *link_find_link(const char *name,
2765 * 2676 *
2766 * Returns 1 if value is within range, 0 if not. 2677 * Returns 1 if value is within range, 0 if not.
2767 */ 2678 */
2768
2769static int link_value_is_valid(u16 cmd, u32 new_value) 2679static int link_value_is_valid(u16 cmd, u32 new_value)
2770{ 2680{
2771 switch (cmd) { 2681 switch (cmd) {
@@ -2781,7 +2691,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2781 return 0; 2691 return 0;
2782} 2692}
2783 2693
2784
2785/** 2694/**
2786 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2695 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2787 * @name - ptr to link, bearer, or media name 2696 * @name - ptr to link, bearer, or media name
@@ -2792,7 +2701,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2792 * 2701 *
2793 * Returns 0 if value updated and negative value on error. 2702 * Returns 0 if value updated and negative value on error.
2794 */ 2703 */
2795
2796static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2704static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2797{ 2705{
2798 struct tipc_node *node; 2706 struct tipc_node *node;
@@ -2897,7 +2805,6 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2897 * link_reset_statistics - reset link statistics 2805 * link_reset_statistics - reset link statistics
2898 * @l_ptr: pointer to link 2806 * @l_ptr: pointer to link
2899 */ 2807 */
2900
2901static void link_reset_statistics(struct tipc_link *l_ptr) 2808static void link_reset_statistics(struct tipc_link *l_ptr)
2902{ 2809{
2903 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2810 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
@@ -2938,7 +2845,6 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
2938/** 2845/**
2939 * percent - convert count to a percentage of total (rounding up or down) 2846 * percent - convert count to a percentage of total (rounding up or down)
2940 */ 2847 */
2941
2942static u32 percent(u32 count, u32 total) 2848static u32 percent(u32 count, u32 total)
2943{ 2849{
2944 return (count * 100 + (total / 2)) / total; 2850 return (count * 100 + (total / 2)) / total;
@@ -2952,7 +2858,6 @@ static u32 percent(u32 count, u32 total)
2952 * 2858 *
2953 * Returns length of print buffer data string (or 0 if error) 2859 * Returns length of print buffer data string (or 0 if error)
2954 */ 2860 */
2955
2956static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2861static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2957{ 2862{
2958 struct print_buf pb; 2863 struct print_buf pb;
@@ -3057,7 +2962,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
3057 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2962 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
3058 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO); 2963 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3059 if (!str_len) { 2964 if (!str_len) {
3060 buf_discard(buf); 2965 kfree_skb(buf);
3061 return tipc_cfg_reply_error_string("link not found"); 2966 return tipc_cfg_reply_error_string("link not found");
3062 } 2967 }
3063 2968
@@ -3074,7 +2979,6 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
3074 * 2979 *
3075 * If no active link can be found, uses default maximum packet size. 2980 * If no active link can be found, uses default maximum packet size.
3076 */ 2981 */
3077
3078u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2982u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3079{ 2983{
3080 struct tipc_node *n_ptr; 2984 struct tipc_node *n_ptr;
@@ -3158,4 +3062,3 @@ print_state:
3158 tipc_printbuf_validate(buf); 3062 tipc_printbuf_validate(buf);
3159 info("%s", print_area); 3063 info("%s", print_area);
3160} 3064}
3161
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 73c18c140e1d..d6a60a963ce6 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -47,13 +47,11 @@
47/* 47/*
48 * Out-of-range value for link sequence numbers 48 * Out-of-range value for link sequence numbers
49 */ 49 */
50
51#define INVALID_LINK_SEQ 0x10000 50#define INVALID_LINK_SEQ 0x10000
52 51
53/* 52/*
54 * Link states 53 * Link states
55 */ 54 */
56
57#define WORKING_WORKING 560810u 55#define WORKING_WORKING 560810u
58#define WORKING_UNKNOWN 560811u 56#define WORKING_UNKNOWN 560811u
59#define RESET_UNKNOWN 560812u 57#define RESET_UNKNOWN 560812u
@@ -63,7 +61,6 @@
63 * Starting value for maximum packet size negotiation on unicast links 61 * Starting value for maximum packet size negotiation on unicast links
64 * (unless bearer MTU is less) 62 * (unless bearer MTU is less)
65 */ 63 */
66
67#define MAX_PKT_DEFAULT 1500 64#define MAX_PKT_DEFAULT 1500
68 65
69/** 66/**
@@ -114,7 +111,6 @@
114 * @defragm_buf: list of partially reassembled inbound message fragments 111 * @defragm_buf: list of partially reassembled inbound message fragments
115 * @stats: collects statistics regarding link activity 112 * @stats: collects statistics regarding link activity
116 */ 113 */
117
118struct tipc_link { 114struct tipc_link {
119 u32 addr; 115 u32 addr;
120 char name[TIPC_MAX_LINK_NAME]; 116 char name[TIPC_MAX_LINK_NAME];
@@ -255,7 +251,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr,
255/* 251/*
256 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) 252 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
257 */ 253 */
258
259static inline u32 buf_seqno(struct sk_buff *buf) 254static inline u32 buf_seqno(struct sk_buff *buf)
260{ 255{
261 return msg_seqno(buf_msg(buf)); 256 return msg_seqno(buf_msg(buf));
@@ -294,7 +289,6 @@ static inline u32 lesser(u32 left, u32 right)
294/* 289/*
295 * Link status checking routines 290 * Link status checking routines
296 */ 291 */
297
298static inline int link_working_working(struct tipc_link *l_ptr) 292static inline int link_working_working(struct tipc_link *l_ptr)
299{ 293{
300 return l_ptr->state == WORKING_WORKING; 294 return l_ptr->state == WORKING_WORKING;
diff --git a/net/tipc/log.c b/net/tipc/log.c
index 952c39f643e6..026733f24919 100644
--- a/net/tipc/log.c
+++ b/net/tipc/log.c
@@ -47,7 +47,6 @@
47 * 47 *
48 * Additional user-defined print buffers are also permitted. 48 * Additional user-defined print buffers are also permitted.
49 */ 49 */
50
51static struct print_buf null_buf = { NULL, 0, NULL, 0 }; 50static struct print_buf null_buf = { NULL, 0, NULL, 0 };
52struct print_buf *const TIPC_NULL = &null_buf; 51struct print_buf *const TIPC_NULL = &null_buf;
53 52
@@ -72,7 +71,6 @@ struct print_buf *const TIPC_LOG = &log_buf;
72 * on the caller to prevent simultaneous use of the print buffer(s) being 71 * on the caller to prevent simultaneous use of the print buffer(s) being
73 * manipulated. 72 * manipulated.
74 */ 73 */
75
76static char print_string[TIPC_PB_MAX_STR]; 74static char print_string[TIPC_PB_MAX_STR];
77static DEFINE_SPINLOCK(print_lock); 75static DEFINE_SPINLOCK(print_lock);
78 76
@@ -97,7 +95,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
97 * Note: If the character array is too small (or absent), the print buffer 95 * Note: If the character array is too small (or absent), the print buffer
98 * becomes a null device that discards anything written to it. 96 * becomes a null device that discards anything written to it.
99 */ 97 */
100
101void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) 98void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
102{ 99{
103 pb->buf = raw; 100 pb->buf = raw;
@@ -117,7 +114,6 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
117 * tipc_printbuf_reset - reinitialize print buffer to empty state 114 * tipc_printbuf_reset - reinitialize print buffer to empty state
118 * @pb: pointer to print buffer structure 115 * @pb: pointer to print buffer structure
119 */ 116 */
120
121static void tipc_printbuf_reset(struct print_buf *pb) 117static void tipc_printbuf_reset(struct print_buf *pb)
122{ 118{
123 if (pb->buf) { 119 if (pb->buf) {
@@ -133,7 +129,6 @@ static void tipc_printbuf_reset(struct print_buf *pb)
133 * 129 *
134 * Returns non-zero if print buffer is empty. 130 * Returns non-zero if print buffer is empty.
135 */ 131 */
136
137static int tipc_printbuf_empty(struct print_buf *pb) 132static int tipc_printbuf_empty(struct print_buf *pb)
138{ 133{
139 return !pb->buf || (pb->crs == pb->buf); 134 return !pb->buf || (pb->crs == pb->buf);
@@ -148,7 +143,6 @@ static int tipc_printbuf_empty(struct print_buf *pb)
148 * 143 *
149 * Returns length of print buffer data string (including trailing NUL) 144 * Returns length of print buffer data string (including trailing NUL)
150 */ 145 */
151
152int tipc_printbuf_validate(struct print_buf *pb) 146int tipc_printbuf_validate(struct print_buf *pb)
153{ 147{
154 char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n"; 148 char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
@@ -182,14 +176,12 @@ int tipc_printbuf_validate(struct print_buf *pb)
182 * Current contents of destination print buffer (if any) are discarded. 176 * Current contents of destination print buffer (if any) are discarded.
183 * Source print buffer becomes empty if a successful move occurs. 177 * Source print buffer becomes empty if a successful move occurs.
184 */ 178 */
185
186static void tipc_printbuf_move(struct print_buf *pb_to, 179static void tipc_printbuf_move(struct print_buf *pb_to,
187 struct print_buf *pb_from) 180 struct print_buf *pb_from)
188{ 181{
189 int len; 182 int len;
190 183
191 /* Handle the cases where contents can't be moved */ 184 /* Handle the cases where contents can't be moved */
192
193 if (!pb_to->buf) 185 if (!pb_to->buf)
194 return; 186 return;
195 187
@@ -206,7 +198,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
206 } 198 }
207 199
208 /* Copy data from char after cursor to end (if used) */ 200 /* Copy data from char after cursor to end (if used) */
209
210 len = pb_from->buf + pb_from->size - pb_from->crs - 2; 201 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
211 if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) { 202 if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
212 strcpy(pb_to->buf, pb_from->crs + 1); 203 strcpy(pb_to->buf, pb_from->crs + 1);
@@ -215,7 +206,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
215 pb_to->crs = pb_to->buf; 206 pb_to->crs = pb_to->buf;
216 207
217 /* Copy data from start to cursor (always) */ 208 /* Copy data from start to cursor (always) */
218
219 len = pb_from->crs - pb_from->buf; 209 len = pb_from->crs - pb_from->buf;
220 strcpy(pb_to->crs, pb_from->buf); 210 strcpy(pb_to->crs, pb_from->buf);
221 pb_to->crs += len; 211 pb_to->crs += len;
@@ -228,7 +218,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
228 * @pb: pointer to print buffer 218 * @pb: pointer to print buffer
229 * @fmt: formatted info to be printed 219 * @fmt: formatted info to be printed
230 */ 220 */
231
232void tipc_printf(struct print_buf *pb, const char *fmt, ...) 221void tipc_printf(struct print_buf *pb, const char *fmt, ...)
233{ 222{
234 int chars_to_add; 223 int chars_to_add;
@@ -270,7 +259,6 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
270 * tipc_log_resize - change the size of the TIPC log buffer 259 * tipc_log_resize - change the size of the TIPC log buffer
271 * @log_size: print buffer size to use 260 * @log_size: print buffer size to use
272 */ 261 */
273
274int tipc_log_resize(int log_size) 262int tipc_log_resize(int log_size)
275{ 263{
276 int res = 0; 264 int res = 0;
@@ -295,7 +283,6 @@ int tipc_log_resize(int log_size)
295/** 283/**
296 * tipc_log_resize_cmd - reconfigure size of TIPC log buffer 284 * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
297 */ 285 */
298
299struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space) 286struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
300{ 287{
301 u32 value; 288 u32 value;
@@ -304,7 +291,7 @@ struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
304 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 291 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
305 292
306 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 293 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
307 if (value != delimit(value, 0, 32768)) 294 if (value > 32768)
308 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 295 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
309 " (log size must be 0-32768)"); 296 " (log size must be 0-32768)");
310 if (tipc_log_resize(value)) 297 if (tipc_log_resize(value))
@@ -316,7 +303,6 @@ struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
316/** 303/**
317 * tipc_log_dump - capture TIPC log buffer contents in configuration message 304 * tipc_log_dump - capture TIPC log buffer contents in configuration message
318 */ 305 */
319
320struct sk_buff *tipc_log_dump(void) 306struct sk_buff *tipc_log_dump(void)
321{ 307{
322 struct sk_buff *reply; 308 struct sk_buff *reply;
diff --git a/net/tipc/log.h b/net/tipc/log.h
index 2248d96238e6..d1f5eb967fd8 100644
--- a/net/tipc/log.h
+++ b/net/tipc/log.h
@@ -44,7 +44,6 @@
44 * @crs: pointer to first unused space in character array (i.e. final NUL) 44 * @crs: pointer to first unused space in character array (i.e. final NUL)
45 * @echo: echo output to system console if non-zero 45 * @echo: echo output to system console if non-zero
46 */ 46 */
47
48struct print_buf { 47struct print_buf {
49 char *buf; 48 char *buf;
50 u32 size; 49 u32 size;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 3e4d3e29be61..deea0d232dca 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -72,7 +72,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
72 * 72 *
73 * Returns message data size or errno 73 * Returns message data size or errno
74 */ 74 */
75
76int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, 75int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77 u32 num_sect, unsigned int total_len, 76 u32 num_sect, unsigned int total_len,
78 int max_size, int usrmem, struct sk_buff **buf) 77 int max_size, int usrmem, struct sk_buff **buf)
@@ -106,13 +105,12 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
106 if (likely(res)) 105 if (likely(res))
107 return dsz; 106 return dsz;
108 107
109 buf_discard(*buf); 108 kfree_skb(*buf);
110 *buf = NULL; 109 *buf = NULL;
111 return -EFAULT; 110 return -EFAULT;
112} 111}
113 112
114#ifdef CONFIG_TIPC_DEBUG 113#ifdef CONFIG_TIPC_DEBUG
115
116void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) 114void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
117{ 115{
118 u32 usr = msg_user(msg); 116 u32 usr = msg_user(msg);
@@ -352,5 +350,4 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
352 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) 350 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT))
353 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /"); 351 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
354} 352}
355
356#endif 353#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 7b0cda167107..ba2a72beea68 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -44,7 +44,6 @@
44 * 44 *
45 * Note: Some items are also used with TIPC internal message headers 45 * Note: Some items are also used with TIPC internal message headers
46 */ 46 */
47
48#define TIPC_VERSION 2 47#define TIPC_VERSION 2
49 48
50/* 49/*
@@ -58,7 +57,6 @@
58/* 57/*
59 * Payload message types 58 * Payload message types
60 */ 59 */
61
62#define TIPC_CONN_MSG 0 60#define TIPC_CONN_MSG 0
63#define TIPC_MCAST_MSG 1 61#define TIPC_MCAST_MSG 1
64#define TIPC_NAMED_MSG 2 62#define TIPC_NAMED_MSG 2
@@ -67,7 +65,6 @@
67/* 65/*
68 * Message header sizes 66 * Message header sizes
69 */ 67 */
70
71#define SHORT_H_SIZE 24 /* In-cluster basic payload message */ 68#define SHORT_H_SIZE 24 /* In-cluster basic payload message */
72#define BASIC_H_SIZE 32 /* Basic payload message */ 69#define BASIC_H_SIZE 32 /* Basic payload message */
73#define NAMED_H_SIZE 40 /* Named payload message */ 70#define NAMED_H_SIZE 40 /* Named payload message */
@@ -121,7 +118,6 @@ static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
121/* 118/*
122 * Word 0 119 * Word 0
123 */ 120 */
124
125static inline u32 msg_version(struct tipc_msg *m) 121static inline u32 msg_version(struct tipc_msg *m)
126{ 122{
127 return msg_bits(m, 0, 29, 7); 123 return msg_bits(m, 0, 29, 7);
@@ -216,7 +212,6 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
216/* 212/*
217 * Word 1 213 * Word 1
218 */ 214 */
219
220static inline u32 msg_type(struct tipc_msg *m) 215static inline u32 msg_type(struct tipc_msg *m)
221{ 216{
222 return msg_bits(m, 1, 29, 0x7); 217 return msg_bits(m, 1, 29, 0x7);
@@ -291,7 +286,6 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
291/* 286/*
292 * Word 2 287 * Word 2
293 */ 288 */
294
295static inline u32 msg_ack(struct tipc_msg *m) 289static inline u32 msg_ack(struct tipc_msg *m)
296{ 290{
297 return msg_bits(m, 2, 16, 0xffff); 291 return msg_bits(m, 2, 16, 0xffff);
@@ -315,8 +309,6 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
315/* 309/*
316 * Words 3-10 310 * Words 3-10
317 */ 311 */
318
319
320static inline u32 msg_prevnode(struct tipc_msg *m) 312static inline u32 msg_prevnode(struct tipc_msg *m)
321{ 313{
322 return msg_word(m, 3); 314 return msg_word(m, 3);
@@ -384,11 +376,6 @@ static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
384 msg_set_word(m, 7, a); 376 msg_set_word(m, 7, a);
385} 377}
386 378
387static inline int msg_is_dest(struct tipc_msg *m, u32 d)
388{
389 return msg_short(m) || (msg_destnode(m) == d);
390}
391
392static inline u32 msg_nametype(struct tipc_msg *m) 379static inline u32 msg_nametype(struct tipc_msg *m)
393{ 380{
394 return msg_word(m, 8); 381 return msg_word(m, 8);
@@ -439,7 +426,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
439 return (struct tipc_msg *)msg_data(m); 426 return (struct tipc_msg *)msg_data(m);
440} 427}
441 428
442
443/* 429/*
444 * Constants and routines used to read and write TIPC internal message headers 430 * Constants and routines used to read and write TIPC internal message headers
445 */ 431 */
@@ -447,7 +433,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
447/* 433/*
448 * Internal message users 434 * Internal message users
449 */ 435 */
450
451#define BCAST_PROTOCOL 5 436#define BCAST_PROTOCOL 5
452#define MSG_BUNDLER 6 437#define MSG_BUNDLER 6
453#define LINK_PROTOCOL 7 438#define LINK_PROTOCOL 7
@@ -461,7 +446,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
461/* 446/*
462 * Connection management protocol message types 447 * Connection management protocol message types
463 */ 448 */
464
465#define CONN_PROBE 0 449#define CONN_PROBE 0
466#define CONN_PROBE_REPLY 1 450#define CONN_PROBE_REPLY 1
467#define CONN_ACK 2 451#define CONN_ACK 2
@@ -469,14 +453,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
469/* 453/*
470 * Name distributor message types 454 * Name distributor message types
471 */ 455 */
472
473#define PUBLICATION 0 456#define PUBLICATION 0
474#define WITHDRAWAL 1 457#define WITHDRAWAL 1
475 458
476/* 459/*
477 * Segmentation message types 460 * Segmentation message types
478 */ 461 */
479
480#define FIRST_FRAGMENT 0 462#define FIRST_FRAGMENT 0
481#define FRAGMENT 1 463#define FRAGMENT 1
482#define LAST_FRAGMENT 2 464#define LAST_FRAGMENT 2
@@ -484,7 +466,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
484/* 466/*
485 * Link management protocol message types 467 * Link management protocol message types
486 */ 468 */
487
488#define STATE_MSG 0 469#define STATE_MSG 0
489#define RESET_MSG 1 470#define RESET_MSG 1
490#define ACTIVATE_MSG 2 471#define ACTIVATE_MSG 2
@@ -498,7 +479,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
498/* 479/*
499 * Config protocol message types 480 * Config protocol message types
500 */ 481 */
501
502#define DSC_REQ_MSG 0 482#define DSC_REQ_MSG 0
503#define DSC_RESP_MSG 1 483#define DSC_RESP_MSG 1
504 484
@@ -506,7 +486,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
506/* 486/*
507 * Word 1 487 * Word 1
508 */ 488 */
509
510static inline u32 msg_seq_gap(struct tipc_msg *m) 489static inline u32 msg_seq_gap(struct tipc_msg *m)
511{ 490{
512 return msg_bits(m, 1, 16, 0x1fff); 491 return msg_bits(m, 1, 16, 0x1fff);
@@ -517,11 +496,20 @@ static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
517 msg_set_bits(m, 1, 16, 0x1fff, n); 496 msg_set_bits(m, 1, 16, 0x1fff, n);
518} 497}
519 498
499static inline u32 msg_node_sig(struct tipc_msg *m)
500{
501 return msg_bits(m, 1, 0, 0xffff);
502}
503
504static inline void msg_set_node_sig(struct tipc_msg *m, u32 n)
505{
506 msg_set_bits(m, 1, 0, 0xffff, n);
507}
508
520 509
521/* 510/*
522 * Word 2 511 * Word 2
523 */ 512 */
524
525static inline u32 msg_dest_domain(struct tipc_msg *m) 513static inline u32 msg_dest_domain(struct tipc_msg *m)
526{ 514{
527 return msg_word(m, 2); 515 return msg_word(m, 2);
@@ -556,7 +544,6 @@ static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
556/* 544/*
557 * Word 4 545 * Word 4
558 */ 546 */
559
560static inline u32 msg_last_bcast(struct tipc_msg *m) 547static inline u32 msg_last_bcast(struct tipc_msg *m)
561{ 548{
562 return msg_bits(m, 4, 16, 0xffff); 549 return msg_bits(m, 4, 16, 0xffff);
@@ -623,7 +610,6 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
623/* 610/*
624 * Word 5 611 * Word 5
625 */ 612 */
626
627static inline u32 msg_session(struct tipc_msg *m) 613static inline u32 msg_session(struct tipc_msg *m)
628{ 614{
629 return msg_bits(m, 5, 16, 0xffff); 615 return msg_bits(m, 5, 16, 0xffff);
@@ -692,7 +678,6 @@ static inline char *msg_media_addr(struct tipc_msg *m)
692/* 678/*
693 * Word 9 679 * Word 9
694 */ 680 */
695
696static inline u32 msg_msgcnt(struct tipc_msg *m) 681static inline u32 msg_msgcnt(struct tipc_msg *m)
697{ 682{
698 return msg_bits(m, 9, 16, 0xffff); 683 return msg_bits(m, 9, 16, 0xffff);
@@ -739,5 +724,4 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
739int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, 724int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
740 u32 num_sect, unsigned int total_len, 725 u32 num_sect, unsigned int total_len,
741 int max_size, int usrmem, struct sk_buff **buf); 726 int max_size, int usrmem, struct sk_buff **buf);
742
743#endif 727#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 98ebb37f1808..158318e67b08 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -58,7 +58,6 @@
58 * Note: There is no field that identifies the publishing node because it is 58 * Note: There is no field that identifies the publishing node because it is
59 * the same for all items contained within a publication message. 59 * the same for all items contained within a publication message.
60 */ 60 */
61
62struct distr_item { 61struct distr_item {
63 __be32 type; 62 __be32 type;
64 __be32 lower; 63 __be32 lower;
@@ -68,17 +67,41 @@ struct distr_item {
68}; 67};
69 68
70/** 69/**
71 * List of externally visible publications by this node -- 70 * struct publ_list - list of publications made by this node
72 * that is, all publications having scope > TIPC_NODE_SCOPE. 71 * @list: circular list of publications
72 * @list_size: number of entries in list
73 */ 73 */
74struct publ_list {
75 struct list_head list;
76 u32 size;
77};
78
79static struct publ_list publ_zone = {
80 .list = LIST_HEAD_INIT(publ_zone.list),
81 .size = 0,
82};
83
84static struct publ_list publ_cluster = {
85 .list = LIST_HEAD_INIT(publ_cluster.list),
86 .size = 0,
87};
88
89static struct publ_list publ_node = {
90 .list = LIST_HEAD_INIT(publ_node.list),
91 .size = 0,
92};
93
94static struct publ_list *publ_lists[] = {
95 NULL,
96 &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
97 &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
98 &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
99};
74 100
75static LIST_HEAD(publ_root);
76static u32 publ_cnt;
77 101
78/** 102/**
79 * publ_to_item - add publication info to a publication message 103 * publ_to_item - add publication info to a publication message
80 */ 104 */
81
82static void publ_to_item(struct distr_item *i, struct publication *p) 105static void publ_to_item(struct distr_item *i, struct publication *p)
83{ 106{
84 i->type = htonl(p->type); 107 i->type = htonl(p->type);
@@ -91,7 +114,6 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
91/** 114/**
92 * named_prepare_buf - allocate & initialize a publication message 115 * named_prepare_buf - allocate & initialize a publication message
93 */ 116 */
94
95static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) 117static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
96{ 118{
97 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); 119 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
@@ -120,20 +142,22 @@ static void named_cluster_distribute(struct sk_buff *buf)
120 } 142 }
121 } 143 }
122 144
123 buf_discard(buf); 145 kfree_skb(buf);
124} 146}
125 147
126/** 148/**
127 * tipc_named_publish - tell other nodes about a new publication by this node 149 * tipc_named_publish - tell other nodes about a new publication by this node
128 */ 150 */
129
130void tipc_named_publish(struct publication *publ) 151void tipc_named_publish(struct publication *publ)
131{ 152{
132 struct sk_buff *buf; 153 struct sk_buff *buf;
133 struct distr_item *item; 154 struct distr_item *item;
134 155
135 list_add_tail(&publ->local_list, &publ_root); 156 list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
136 publ_cnt++; 157 publ_lists[publ->scope]->size++;
158
159 if (publ->scope == TIPC_NODE_SCOPE)
160 return;
137 161
138 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 162 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
139 if (!buf) { 163 if (!buf) {
@@ -149,14 +173,16 @@ void tipc_named_publish(struct publication *publ)
149/** 173/**
150 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node 174 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
151 */ 175 */
152
153void tipc_named_withdraw(struct publication *publ) 176void tipc_named_withdraw(struct publication *publ)
154{ 177{
155 struct sk_buff *buf; 178 struct sk_buff *buf;
156 struct distr_item *item; 179 struct distr_item *item;
157 180
158 list_del(&publ->local_list); 181 list_del(&publ->local_list);
159 publ_cnt--; 182 publ_lists[publ->scope]->size--;
183
184 if (publ->scope == TIPC_NODE_SCOPE)
185 return;
160 186
161 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 187 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
162 if (!buf) { 188 if (!buf) {
@@ -169,25 +195,51 @@ void tipc_named_withdraw(struct publication *publ)
169 named_cluster_distribute(buf); 195 named_cluster_distribute(buf);
170} 196}
171 197
198/*
199 * named_distribute - prepare name info for bulk distribution to another node
200 */
201static void named_distribute(struct list_head *message_list, u32 node,
202 struct publ_list *pls, u32 max_item_buf)
203{
204 struct publication *publ;
205 struct sk_buff *buf = NULL;
206 struct distr_item *item = NULL;
207 u32 left = 0;
208 u32 rest = pls->size * ITEM_SIZE;
209
210 list_for_each_entry(publ, &pls->list, local_list) {
211 if (!buf) {
212 left = (rest <= max_item_buf) ? rest : max_item_buf;
213 rest -= left;
214 buf = named_prepare_buf(PUBLICATION, left, node);
215 if (!buf) {
216 warn("Bulk publication failure\n");
217 return;
218 }
219 item = (struct distr_item *)msg_data(buf_msg(buf));
220 }
221 publ_to_item(item, publ);
222 item++;
223 left -= ITEM_SIZE;
224 if (!left) {
225 list_add_tail((struct list_head *)buf, message_list);
226 buf = NULL;
227 }
228 }
229}
230
172/** 231/**
173 * tipc_named_node_up - tell specified node about all publications by this node 232 * tipc_named_node_up - tell specified node about all publications by this node
174 */ 233 */
175
176void tipc_named_node_up(unsigned long nodearg) 234void tipc_named_node_up(unsigned long nodearg)
177{ 235{
178 struct tipc_node *n_ptr; 236 struct tipc_node *n_ptr;
179 struct tipc_link *l_ptr; 237 struct tipc_link *l_ptr;
180 struct publication *publ;
181 struct distr_item *item = NULL;
182 struct sk_buff *buf = NULL;
183 struct list_head message_list; 238 struct list_head message_list;
184 u32 node = (u32)nodearg; 239 u32 node = (u32)nodearg;
185 u32 left = 0;
186 u32 rest;
187 u32 max_item_buf = 0; 240 u32 max_item_buf = 0;
188 241
189 /* compute maximum amount of publication data to send per message */ 242 /* compute maximum amount of publication data to send per message */
190
191 read_lock_bh(&tipc_net_lock); 243 read_lock_bh(&tipc_net_lock);
192 n_ptr = tipc_node_find(node); 244 n_ptr = tipc_node_find(node);
193 if (n_ptr) { 245 if (n_ptr) {
@@ -203,32 +255,11 @@ void tipc_named_node_up(unsigned long nodearg)
203 return; 255 return;
204 256
205 /* create list of publication messages, then send them as a unit */ 257 /* create list of publication messages, then send them as a unit */
206
207 INIT_LIST_HEAD(&message_list); 258 INIT_LIST_HEAD(&message_list);
208 259
209 read_lock_bh(&tipc_nametbl_lock); 260 read_lock_bh(&tipc_nametbl_lock);
210 rest = publ_cnt * ITEM_SIZE; 261 named_distribute(&message_list, node, &publ_cluster, max_item_buf);
211 262 named_distribute(&message_list, node, &publ_zone, max_item_buf);
212 list_for_each_entry(publ, &publ_root, local_list) {
213 if (!buf) {
214 left = (rest <= max_item_buf) ? rest : max_item_buf;
215 rest -= left;
216 buf = named_prepare_buf(PUBLICATION, left, node);
217 if (!buf) {
218 warn("Bulk publication distribution failure\n");
219 goto exit;
220 }
221 item = (struct distr_item *)msg_data(buf_msg(buf));
222 }
223 publ_to_item(item, publ);
224 item++;
225 left -= ITEM_SIZE;
226 if (!left) {
227 list_add_tail((struct list_head *)buf, &message_list);
228 buf = NULL;
229 }
230 }
231exit:
232 read_unlock_bh(&tipc_nametbl_lock); 263 read_unlock_bh(&tipc_nametbl_lock);
233 264
234 tipc_link_send_names(&message_list, (u32)node); 265 tipc_link_send_names(&message_list, (u32)node);
@@ -239,17 +270,12 @@ exit:
239 * 270 *
240 * Invoked for each publication issued by a newly failed node. 271 * Invoked for each publication issued by a newly failed node.
241 * Removes publication structure from name table & deletes it. 272 * Removes publication structure from name table & deletes it.
242 * In rare cases the link may have come back up again when this
243 * function is called, and we have two items representing the same
244 * publication. Nudge this item's key to distinguish it from the other.
245 */ 273 */
246
247static void named_purge_publ(struct publication *publ) 274static void named_purge_publ(struct publication *publ)
248{ 275{
249 struct publication *p; 276 struct publication *p;
250 277
251 write_lock_bh(&tipc_nametbl_lock); 278 write_lock_bh(&tipc_nametbl_lock);
252 publ->key += 1222345;
253 p = tipc_nametbl_remove_publ(publ->type, publ->lower, 279 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
254 publ->node, publ->ref, publ->key); 280 publ->node, publ->ref, publ->key);
255 if (p) 281 if (p)
@@ -268,7 +294,6 @@ static void named_purge_publ(struct publication *publ)
268/** 294/**
269 * tipc_named_recv - process name table update message sent by another node 295 * tipc_named_recv - process name table update message sent by another node
270 */ 296 */
271
272void tipc_named_recv(struct sk_buff *buf) 297void tipc_named_recv(struct sk_buff *buf)
273{ 298{
274 struct publication *publ; 299 struct publication *publ;
@@ -316,25 +341,26 @@ void tipc_named_recv(struct sk_buff *buf)
316 item++; 341 item++;
317 } 342 }
318 write_unlock_bh(&tipc_nametbl_lock); 343 write_unlock_bh(&tipc_nametbl_lock);
319 buf_discard(buf); 344 kfree_skb(buf);
320} 345}
321 346
322/** 347/**
323 * tipc_named_reinit - re-initialize local publication list 348 * tipc_named_reinit - re-initialize local publications
324 * 349 *
325 * This routine is called whenever TIPC networking is enabled. 350 * This routine is called whenever TIPC networking is enabled.
326 * All existing publications by this node that have "cluster" or "zone" scope 351 * All name table entries published by this node are updated to reflect
327 * are updated to reflect the node's new network address. 352 * the node's new network address.
328 */ 353 */
329
330void tipc_named_reinit(void) 354void tipc_named_reinit(void)
331{ 355{
332 struct publication *publ; 356 struct publication *publ;
357 int scope;
333 358
334 write_lock_bh(&tipc_nametbl_lock); 359 write_lock_bh(&tipc_nametbl_lock);
335 360
336 list_for_each_entry(publ, &publ_root, local_list) 361 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
337 publ->node = tipc_own_addr; 362 list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
363 publ->node = tipc_own_addr;
338 364
339 write_unlock_bh(&tipc_nametbl_lock); 365 write_unlock_bh(&tipc_nametbl_lock);
340} 366}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 89eb5621ebba..010f24a59da2 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -56,7 +56,6 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */
56 * publications of the associated name sequence belong to it. 56 * publications of the associated name sequence belong to it.
57 * (The cluster and node lists may be empty.) 57 * (The cluster and node lists may be empty.)
58 */ 58 */
59
60struct name_info { 59struct name_info {
61 struct list_head node_list; 60 struct list_head node_list;
62 struct list_head cluster_list; 61 struct list_head cluster_list;
@@ -72,7 +71,6 @@ struct name_info {
72 * @upper: name sequence upper bound 71 * @upper: name sequence upper bound
73 * @info: pointer to name sequence publication info 72 * @info: pointer to name sequence publication info
74 */ 73 */
75
76struct sub_seq { 74struct sub_seq {
77 u32 lower; 75 u32 lower;
78 u32 upper; 76 u32 upper;
@@ -90,7 +88,6 @@ struct sub_seq {
90 * @subscriptions: list of subscriptions for this 'type' 88 * @subscriptions: list of subscriptions for this 'type'
91 * @lock: spinlock controlling access to publication lists of all sub-sequences 89 * @lock: spinlock controlling access to publication lists of all sub-sequences
92 */ 90 */
93
94struct name_seq { 91struct name_seq {
95 u32 type; 92 u32 type;
96 struct sub_seq *sseqs; 93 struct sub_seq *sseqs;
@@ -107,17 +104,14 @@ struct name_seq {
107 * accessed via hashing on 'type'; name sequence lists are *not* sorted 104 * accessed via hashing on 'type'; name sequence lists are *not* sorted
108 * @local_publ_count: number of publications issued by this node 105 * @local_publ_count: number of publications issued by this node
109 */ 106 */
110
111struct name_table { 107struct name_table {
112 struct hlist_head *types; 108 struct hlist_head *types;
113 u32 local_publ_count; 109 u32 local_publ_count;
114}; 110};
115 111
116static struct name_table table; 112static struct name_table table;
117static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
118DEFINE_RWLOCK(tipc_nametbl_lock); 113DEFINE_RWLOCK(tipc_nametbl_lock);
119 114
120
121static int hash(int x) 115static int hash(int x)
122{ 116{
123 return x & (tipc_nametbl_size - 1); 117 return x & (tipc_nametbl_size - 1);
@@ -126,7 +120,6 @@ static int hash(int x)
126/** 120/**
127 * publ_create - create a publication structure 121 * publ_create - create a publication structure
128 */ 122 */
129
130static struct publication *publ_create(u32 type, u32 lower, u32 upper, 123static struct publication *publ_create(u32 type, u32 lower, u32 upper,
131 u32 scope, u32 node, u32 port_ref, 124 u32 scope, u32 node, u32 port_ref,
132 u32 key) 125 u32 key)
@@ -153,7 +146,6 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
153/** 146/**
154 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures 147 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
155 */ 148 */
156
157static struct sub_seq *tipc_subseq_alloc(u32 cnt) 149static struct sub_seq *tipc_subseq_alloc(u32 cnt)
158{ 150{
159 struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); 151 struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
@@ -165,7 +157,6 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
165 * 157 *
166 * Allocates a single sub-sequence structure and sets it to all 0's. 158 * Allocates a single sub-sequence structure and sets it to all 0's.
167 */ 159 */
168
169static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) 160static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
170{ 161{
171 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); 162 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
@@ -188,12 +179,23 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
188 return nseq; 179 return nseq;
189} 180}
190 181
191/** 182/*
183 * nameseq_delete_empty - deletes a name sequence structure if now unused
184 */
185static void nameseq_delete_empty(struct name_seq *seq)
186{
187 if (!seq->first_free && list_empty(&seq->subscriptions)) {
188 hlist_del_init(&seq->ns_list);
189 kfree(seq->sseqs);
190 kfree(seq);
191 }
192}
193
194/*
192 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance 195 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
193 * 196 *
194 * Very time-critical, so binary searches through sub-sequence array. 197 * Very time-critical, so binary searches through sub-sequence array.
195 */ 198 */
196
197static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, 199static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
198 u32 instance) 200 u32 instance)
199{ 201{
@@ -223,7 +225,6 @@ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
223 * 225 *
224 * Note: Similar to binary search code for locating a sub-sequence. 226 * Note: Similar to binary search code for locating a sub-sequence.
225 */ 227 */
226
227static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) 228static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
228{ 229{
229 struct sub_seq *sseqs = nseq->sseqs; 230 struct sub_seq *sseqs = nseq->sseqs;
@@ -244,9 +245,8 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
244} 245}
245 246
246/** 247/**
247 * tipc_nameseq_insert_publ - 248 * tipc_nameseq_insert_publ
248 */ 249 */
249
250static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, 250static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
251 u32 type, u32 lower, u32 upper, 251 u32 type, u32 lower, u32 upper,
252 u32 scope, u32 node, u32 port, u32 key) 252 u32 scope, u32 node, u32 port, u32 key)
@@ -262,7 +262,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
262 if (sseq) { 262 if (sseq) {
263 263
264 /* Lower end overlaps existing entry => need an exact match */ 264 /* Lower end overlaps existing entry => need an exact match */
265
266 if ((sseq->lower != lower) || (sseq->upper != upper)) { 265 if ((sseq->lower != lower) || (sseq->upper != upper)) {
267 warn("Cannot publish {%u,%u,%u}, overlap error\n", 266 warn("Cannot publish {%u,%u,%u}, overlap error\n",
268 type, lower, upper); 267 type, lower, upper);
@@ -270,16 +269,21 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
270 } 269 }
271 270
272 info = sseq->info; 271 info = sseq->info;
272
273 /* Check if an identical publication already exists */
274 list_for_each_entry(publ, &info->zone_list, zone_list) {
275 if ((publ->ref == port) && (publ->key == key) &&
276 (!publ->node || (publ->node == node)))
277 return NULL;
278 }
273 } else { 279 } else {
274 u32 inspos; 280 u32 inspos;
275 struct sub_seq *freesseq; 281 struct sub_seq *freesseq;
276 282
277 /* Find where lower end should be inserted */ 283 /* Find where lower end should be inserted */
278
279 inspos = nameseq_locate_subseq(nseq, lower); 284 inspos = nameseq_locate_subseq(nseq, lower);
280 285
281 /* Fail if upper end overlaps into an existing entry */ 286 /* Fail if upper end overlaps into an existing entry */
282
283 if ((inspos < nseq->first_free) && 287 if ((inspos < nseq->first_free) &&
284 (upper >= nseq->sseqs[inspos].lower)) { 288 (upper >= nseq->sseqs[inspos].lower)) {
285 warn("Cannot publish {%u,%u,%u}, overlap error\n", 289 warn("Cannot publish {%u,%u,%u}, overlap error\n",
@@ -288,7 +292,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
288 } 292 }
289 293
290 /* Ensure there is space for new sub-sequence */ 294 /* Ensure there is space for new sub-sequence */
291
292 if (nseq->first_free == nseq->alloc) { 295 if (nseq->first_free == nseq->alloc) {
293 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2); 296 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
294 297
@@ -316,7 +319,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
316 INIT_LIST_HEAD(&info->zone_list); 319 INIT_LIST_HEAD(&info->zone_list);
317 320
318 /* Insert new sub-sequence */ 321 /* Insert new sub-sequence */
319
320 sseq = &nseq->sseqs[inspos]; 322 sseq = &nseq->sseqs[inspos];
321 freesseq = &nseq->sseqs[nseq->first_free]; 323 freesseq = &nseq->sseqs[nseq->first_free];
322 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq)); 324 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
@@ -328,8 +330,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
328 created_subseq = 1; 330 created_subseq = 1;
329 } 331 }
330 332
331 /* Insert a publication: */ 333 /* Insert a publication */
332
333 publ = publ_create(type, lower, upper, scope, node, port, key); 334 publ = publ_create(type, lower, upper, scope, node, port, key);
334 if (!publ) 335 if (!publ)
335 return NULL; 336 return NULL;
@@ -342,14 +343,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
342 info->cluster_list_size++; 343 info->cluster_list_size++;
343 } 344 }
344 345
345 if (node == tipc_own_addr) { 346 if (in_own_node(node)) {
346 list_add(&publ->node_list, &info->node_list); 347 list_add(&publ->node_list, &info->node_list);
347 info->node_list_size++; 348 info->node_list_size++;
348 } 349 }
349 350
350 /* 351 /* Any subscriptions waiting for notification? */
351 * Any subscriptions waiting for notification?
352 */
353 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 352 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
354 tipc_subscr_report_overlap(s, 353 tipc_subscr_report_overlap(s,
355 publ->lower, 354 publ->lower,
@@ -363,7 +362,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
363} 362}
364 363
365/** 364/**
366 * tipc_nameseq_remove_publ - 365 * tipc_nameseq_remove_publ
367 * 366 *
368 * NOTE: There may be cases where TIPC is asked to remove a publication 367 * NOTE: There may be cases where TIPC is asked to remove a publication
369 * that is not in the name table. For example, if another node issues a 368 * that is not in the name table. For example, if another node issues a
@@ -373,7 +372,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
373 * A failed withdraw request simply returns a failure indication and lets the 372 * A failed withdraw request simply returns a failure indication and lets the
374 * caller issue any error or warning messages associated with such a problem. 373 * caller issue any error or warning messages associated with such a problem.
375 */ 374 */
376
377static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, 375static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
378 u32 node, u32 ref, u32 key) 376 u32 node, u32 ref, u32 key)
379{ 377{
@@ -390,7 +388,6 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
390 info = sseq->info; 388 info = sseq->info;
391 389
392 /* Locate publication, if it exists */ 390 /* Locate publication, if it exists */
393
394 list_for_each_entry(publ, &info->zone_list, zone_list) { 391 list_for_each_entry(publ, &info->zone_list, zone_list) {
395 if ((publ->key == key) && (publ->ref == ref) && 392 if ((publ->key == key) && (publ->ref == ref) &&
396 (!publ->node || (publ->node == node))) 393 (!publ->node || (publ->node == node)))
@@ -400,26 +397,22 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
400 397
401found: 398found:
402 /* Remove publication from zone scope list */ 399 /* Remove publication from zone scope list */
403
404 list_del(&publ->zone_list); 400 list_del(&publ->zone_list);
405 info->zone_list_size--; 401 info->zone_list_size--;
406 402
407 /* Remove publication from cluster scope list, if present */ 403 /* Remove publication from cluster scope list, if present */
408
409 if (in_own_cluster(node)) { 404 if (in_own_cluster(node)) {
410 list_del(&publ->cluster_list); 405 list_del(&publ->cluster_list);
411 info->cluster_list_size--; 406 info->cluster_list_size--;
412 } 407 }
413 408
414 /* Remove publication from node scope list, if present */ 409 /* Remove publication from node scope list, if present */
415 410 if (in_own_node(node)) {
416 if (node == tipc_own_addr) {
417 list_del(&publ->node_list); 411 list_del(&publ->node_list);
418 info->node_list_size--; 412 info->node_list_size--;
419 } 413 }
420 414
421 /* Contract subseq list if no more publications for that subseq */ 415 /* Contract subseq list if no more publications for that subseq */
422
423 if (list_empty(&info->zone_list)) { 416 if (list_empty(&info->zone_list)) {
424 kfree(info); 417 kfree(info);
425 free = &nseq->sseqs[nseq->first_free--]; 418 free = &nseq->sseqs[nseq->first_free--];
@@ -428,7 +421,6 @@ found:
428 } 421 }
429 422
430 /* Notify any waiting subscriptions */ 423 /* Notify any waiting subscriptions */
431
432 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 424 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
433 tipc_subscr_report_overlap(s, 425 tipc_subscr_report_overlap(s,
434 publ->lower, 426 publ->lower,
@@ -447,7 +439,6 @@ found:
447 * the prescribed number of events if there is any sub- 439 * the prescribed number of events if there is any sub-
448 * sequence overlapping with the requested sequence 440 * sequence overlapping with the requested sequence
449 */ 441 */
450
451static void tipc_nameseq_subscribe(struct name_seq *nseq, 442static void tipc_nameseq_subscribe(struct name_seq *nseq,
452 struct tipc_subscription *s) 443 struct tipc_subscription *s)
453{ 444{
@@ -499,9 +490,10 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
499{ 490{
500 struct name_seq *seq = nametbl_find_seq(type); 491 struct name_seq *seq = nametbl_find_seq(type);
501 492
502 if (lower > upper) { 493 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
503 warn("Failed to publish illegal {%u,%u,%u}\n", 494 (lower > upper)) {
504 type, lower, upper); 495 dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n",
496 type, lower, upper, scope);
505 return NULL; 497 return NULL;
506 } 498 }
507 499
@@ -524,22 +516,23 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
524 return NULL; 516 return NULL;
525 517
526 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); 518 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
527 519 nameseq_delete_empty(seq);
528 if (!seq->first_free && list_empty(&seq->subscriptions)) {
529 hlist_del_init(&seq->ns_list);
530 kfree(seq->sseqs);
531 kfree(seq);
532 }
533 return publ; 520 return publ;
534} 521}
535 522
536/* 523/*
537 * tipc_nametbl_translate - translate name to port id 524 * tipc_nametbl_translate - perform name translation
525 *
526 * On entry, 'destnode' is the search domain used during translation.
538 * 527 *
539 * Note: on entry 'destnode' is the search domain used during translation; 528 * On exit:
540 * on exit it passes back the node address of the matching port (if any) 529 * - if name translation is deferred to another node/cluster/zone,
530 * leaves 'destnode' unchanged (will be non-zero) and returns 0
531 * - if name translation is attempted and succeeds, sets 'destnode'
532 * to publishing node and returns port reference (will be non-zero)
533 * - if name translation is attempted and fails, sets 'destnode' to 0
534 * and returns 0
541 */ 535 */
542
543u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) 536u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
544{ 537{
545 struct sub_seq *sseq; 538 struct sub_seq *sseq;
@@ -547,6 +540,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
547 struct publication *publ; 540 struct publication *publ;
548 struct name_seq *seq; 541 struct name_seq *seq;
549 u32 ref = 0; 542 u32 ref = 0;
543 u32 node = 0;
550 544
551 if (!tipc_in_scope(*destnode, tipc_own_addr)) 545 if (!tipc_in_scope(*destnode, tipc_own_addr))
552 return 0; 546 return 0;
@@ -561,7 +555,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
561 spin_lock_bh(&seq->lock); 555 spin_lock_bh(&seq->lock);
562 info = sseq->info; 556 info = sseq->info;
563 557
564 /* Closest-First Algorithm: */ 558 /* Closest-First Algorithm */
565 if (likely(!*destnode)) { 559 if (likely(!*destnode)) {
566 if (!list_empty(&info->node_list)) { 560 if (!list_empty(&info->node_list)) {
567 publ = list_first_entry(&info->node_list, 561 publ = list_first_entry(&info->node_list,
@@ -584,14 +578,14 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
584 } 578 }
585 } 579 }
586 580
587 /* Round-Robin Algorithm: */ 581 /* Round-Robin Algorithm */
588 else if (*destnode == tipc_own_addr) { 582 else if (*destnode == tipc_own_addr) {
589 if (list_empty(&info->node_list)) 583 if (list_empty(&info->node_list))
590 goto no_match; 584 goto no_match;
591 publ = list_first_entry(&info->node_list, struct publication, 585 publ = list_first_entry(&info->node_list, struct publication,
592 node_list); 586 node_list);
593 list_move_tail(&publ->node_list, &info->node_list); 587 list_move_tail(&publ->node_list, &info->node_list);
594 } else if (in_own_cluster(*destnode)) { 588 } else if (in_own_cluster_exact(*destnode)) {
595 if (list_empty(&info->cluster_list)) 589 if (list_empty(&info->cluster_list))
596 goto no_match; 590 goto no_match;
597 publ = list_first_entry(&info->cluster_list, struct publication, 591 publ = list_first_entry(&info->cluster_list, struct publication,
@@ -604,11 +598,12 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
604 } 598 }
605 599
606 ref = publ->ref; 600 ref = publ->ref;
607 *destnode = publ->node; 601 node = publ->node;
608no_match: 602no_match:
609 spin_unlock_bh(&seq->lock); 603 spin_unlock_bh(&seq->lock);
610not_found: 604not_found:
611 read_unlock_bh(&tipc_nametbl_lock); 605 read_unlock_bh(&tipc_nametbl_lock);
606 *destnode = node;
612 return ref; 607 return ref;
613} 608}
614 609
@@ -624,7 +619,6 @@ not_found:
624 * 619 *
625 * Returns non-zero if any off-node ports overlap 620 * Returns non-zero if any off-node ports overlap
626 */ 621 */
627
628int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 622int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
629 struct tipc_port_list *dports) 623 struct tipc_port_list *dports)
630{ 624{
@@ -665,25 +659,9 @@ exit:
665 return res; 659 return res;
666} 660}
667 661
668/** 662/*
669 * tipc_nametbl_publish_rsv - publish port name using a reserved name type
670 */
671
672int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
673 struct tipc_name_seq const *seq)
674{
675 int res;
676
677 atomic_inc(&rsv_publ_ok);
678 res = tipc_publish(ref, scope, seq);
679 atomic_dec(&rsv_publ_ok);
680 return res;
681}
682
683/**
684 * tipc_nametbl_publish - add name publication to network name tables 663 * tipc_nametbl_publish - add name publication to network name tables
685 */ 664 */
686
687struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 665struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
688 u32 scope, u32 port_ref, u32 key) 666 u32 scope, u32 port_ref, u32 key)
689{ 667{
@@ -694,18 +672,14 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
694 tipc_max_publications); 672 tipc_max_publications);
695 return NULL; 673 return NULL;
696 } 674 }
697 if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
698 warn("Publication failed, reserved name {%u,%u,%u}\n",
699 type, lower, upper);
700 return NULL;
701 }
702 675
703 write_lock_bh(&tipc_nametbl_lock); 676 write_lock_bh(&tipc_nametbl_lock);
704 table.local_publ_count++;
705 publ = tipc_nametbl_insert_publ(type, lower, upper, scope, 677 publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
706 tipc_own_addr, port_ref, key); 678 tipc_own_addr, port_ref, key);
707 if (publ && (scope != TIPC_NODE_SCOPE)) 679 if (likely(publ)) {
680 table.local_publ_count++;
708 tipc_named_publish(publ); 681 tipc_named_publish(publ);
682 }
709 write_unlock_bh(&tipc_nametbl_lock); 683 write_unlock_bh(&tipc_nametbl_lock);
710 return publ; 684 return publ;
711} 685}
@@ -713,7 +687,6 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
713/** 687/**
714 * tipc_nametbl_withdraw - withdraw name publication from network name tables 688 * tipc_nametbl_withdraw - withdraw name publication from network name tables
715 */ 689 */
716
717int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 690int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
718{ 691{
719 struct publication *publ; 692 struct publication *publ;
@@ -722,8 +695,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
722 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 695 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
723 if (likely(publ)) { 696 if (likely(publ)) {
724 table.local_publ_count--; 697 table.local_publ_count--;
725 if (publ->scope != TIPC_NODE_SCOPE) 698 tipc_named_withdraw(publ);
726 tipc_named_withdraw(publ);
727 write_unlock_bh(&tipc_nametbl_lock); 699 write_unlock_bh(&tipc_nametbl_lock);
728 list_del_init(&publ->pport_list); 700 list_del_init(&publ->pport_list);
729 kfree(publ); 701 kfree(publ);
@@ -739,7 +711,6 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
739/** 711/**
740 * tipc_nametbl_subscribe - add a subscription object to the name table 712 * tipc_nametbl_subscribe - add a subscription object to the name table
741 */ 713 */
742
743void tipc_nametbl_subscribe(struct tipc_subscription *s) 714void tipc_nametbl_subscribe(struct tipc_subscription *s)
744{ 715{
745 u32 type = s->seq.type; 716 u32 type = s->seq.type;
@@ -763,7 +734,6 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
763/** 734/**
764 * tipc_nametbl_unsubscribe - remove a subscription object from name table 735 * tipc_nametbl_unsubscribe - remove a subscription object from name table
765 */ 736 */
766
767void tipc_nametbl_unsubscribe(struct tipc_subscription *s) 737void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
768{ 738{
769 struct name_seq *seq; 739 struct name_seq *seq;
@@ -774,11 +744,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
774 spin_lock_bh(&seq->lock); 744 spin_lock_bh(&seq->lock);
775 list_del_init(&s->nameseq_list); 745 list_del_init(&s->nameseq_list);
776 spin_unlock_bh(&seq->lock); 746 spin_unlock_bh(&seq->lock);
777 if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) { 747 nameseq_delete_empty(seq);
778 hlist_del_init(&seq->ns_list);
779 kfree(seq->sseqs);
780 kfree(seq);
781 }
782 } 748 }
783 write_unlock_bh(&tipc_nametbl_lock); 749 write_unlock_bh(&tipc_nametbl_lock);
784} 750}
@@ -787,7 +753,6 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
787/** 753/**
788 * subseq_list: print specified sub-sequence contents into the given buffer 754 * subseq_list: print specified sub-sequence contents into the given buffer
789 */ 755 */
790
791static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, 756static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
792 u32 index) 757 u32 index)
793{ 758{
@@ -824,7 +789,6 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
824/** 789/**
825 * nameseq_list: print specified name sequence contents into the given buffer 790 * nameseq_list: print specified name sequence contents into the given buffer
826 */ 791 */
827
828static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, 792static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
829 u32 type, u32 lowbound, u32 upbound, u32 index) 793 u32 type, u32 lowbound, u32 upbound, u32 index)
830{ 794{
@@ -855,7 +819,6 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
855/** 819/**
856 * nametbl_header - print name table header into the given buffer 820 * nametbl_header - print name table header into the given buffer
857 */ 821 */
858
859static void nametbl_header(struct print_buf *buf, u32 depth) 822static void nametbl_header(struct print_buf *buf, u32 depth)
860{ 823{
861 const char *header[] = { 824 const char *header[] = {
@@ -877,7 +840,6 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
877/** 840/**
878 * nametbl_list - print specified name table contents into the given buffer 841 * nametbl_list - print specified name table contents into the given buffer
879 */ 842 */
880
881static void nametbl_list(struct print_buf *buf, u32 depth_info, 843static void nametbl_list(struct print_buf *buf, u32 depth_info,
882 u32 type, u32 lowbound, u32 upbound) 844 u32 type, u32 lowbound, u32 upbound)
883{ 845{
@@ -976,7 +938,6 @@ void tipc_nametbl_stop(void)
976 return; 938 return;
977 939
978 /* Verify name table is empty, then release it */ 940 /* Verify name table is empty, then release it */
979
980 write_lock_bh(&tipc_nametbl_lock); 941 write_lock_bh(&tipc_nametbl_lock);
981 for (i = 0; i < tipc_nametbl_size; i++) { 942 for (i = 0; i < tipc_nametbl_size; i++) {
982 if (!hlist_empty(&table.types[i])) 943 if (!hlist_empty(&table.types[i]))
@@ -986,4 +947,3 @@ void tipc_nametbl_stop(void)
986 table.types = NULL; 947 table.types = NULL;
987 write_unlock_bh(&tipc_nametbl_lock); 948 write_unlock_bh(&tipc_nametbl_lock);
988} 949}
989
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 8086b42f92ad..71cb4dc712df 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -45,10 +45,8 @@ struct tipc_port_list;
45/* 45/*
46 * TIPC name types reserved for internal TIPC use (both current and planned) 46 * TIPC name types reserved for internal TIPC use (both current and planned)
47 */ 47 */
48
49#define TIPC_ZM_SRV 3 /* zone master service name type */ 48#define TIPC_ZM_SRV 3 /* zone master service name type */
50 49
51
52/** 50/**
53 * struct publication - info about a published (name or) name sequence 51 * struct publication - info about a published (name or) name sequence
54 * @type: name sequence type 52 * @type: name sequence type
@@ -67,7 +65,6 @@ struct tipc_port_list;
67 * 65 *
68 * Note that the node list, cluster list, and zone list are circular lists. 66 * Note that the node list, cluster list, and zone list are circular lists.
69 */ 67 */
70
71struct publication { 68struct publication {
72 u32 type; 69 u32 type;
73 u32 lower; 70 u32 lower;
@@ -91,8 +88,6 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
91u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); 88u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
92int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 89int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
93 struct tipc_port_list *dports); 90 struct tipc_port_list *dports);
94int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
95 struct tipc_name_seq const *seq);
96struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 91struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
97 u32 scope, u32 port_ref, u32 key); 92 u32 scope, u32 port_ref, u32 key);
98int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key); 93int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 61afee7e8291..7c236c89cf5e 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -117,7 +117,7 @@ static void net_route_named_msg(struct sk_buff *buf)
117 u32 dport; 117 u32 dport;
118 118
119 if (!msg_named(msg)) { 119 if (!msg_named(msg)) {
120 buf_discard(buf); 120 kfree_skb(buf);
121 return; 121 return;
122 } 122 }
123 123
@@ -161,7 +161,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
161 tipc_port_recv_proto_msg(buf); 161 tipc_port_recv_proto_msg(buf);
162 break; 162 break;
163 default: 163 default:
164 buf_discard(buf); 164 kfree_skb(buf);
165 } 165 }
166 return; 166 return;
167 } 167 }
@@ -175,21 +175,14 @@ int tipc_net_start(u32 addr)
175{ 175{
176 char addr_string[16]; 176 char addr_string[16];
177 177
178 if (tipc_mode != TIPC_NODE_MODE) 178 write_lock_bh(&tipc_net_lock);
179 return -ENOPROTOOPT;
180
181 tipc_subscr_stop();
182 tipc_cfg_stop();
183
184 tipc_own_addr = addr; 179 tipc_own_addr = addr;
185 tipc_mode = TIPC_NET_MODE;
186 tipc_named_reinit(); 180 tipc_named_reinit();
187 tipc_port_reinit(); 181 tipc_port_reinit();
188
189 tipc_bclink_init(); 182 tipc_bclink_init();
183 write_unlock_bh(&tipc_net_lock);
190 184
191 tipc_k_signal((Handler)tipc_subscr_start, 0); 185 tipc_cfg_reinit();
192 tipc_k_signal((Handler)tipc_cfg_init, 0);
193 186
194 info("Started in network mode\n"); 187 info("Started in network mode\n");
195 info("Own node address %s, network identity %u\n", 188 info("Own node address %s, network identity %u\n",
@@ -201,10 +194,9 @@ void tipc_net_stop(void)
201{ 194{
202 struct tipc_node *node, *t_node; 195 struct tipc_node *node, *t_node;
203 196
204 if (tipc_mode != TIPC_NET_MODE) 197 if (!tipc_own_addr)
205 return; 198 return;
206 write_lock_bh(&tipc_net_lock); 199 write_lock_bh(&tipc_net_lock);
207 tipc_mode = TIPC_NODE_MODE;
208 tipc_bearer_stop(); 200 tipc_bearer_stop();
209 tipc_bclink_stop(); 201 tipc_bclink_stop();
210 list_for_each_entry_safe(node, t_node, &tipc_node_list, list) 202 list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 6b226faad89f..d4fd341e6e0d 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -39,6 +39,8 @@
39#include "node.h" 39#include "node.h"
40#include "name_distr.h" 40#include "name_distr.h"
41 41
42#define NODE_HTABLE_SIZE 512
43
42static void node_lost_contact(struct tipc_node *n_ptr); 44static void node_lost_contact(struct tipc_node *n_ptr);
43static void node_established_contact(struct tipc_node *n_ptr); 45static void node_established_contact(struct tipc_node *n_ptr);
44 46
@@ -49,18 +51,27 @@ LIST_HEAD(tipc_node_list);
49static u32 tipc_num_nodes; 51static u32 tipc_num_nodes;
50 52
51static atomic_t tipc_num_links = ATOMIC_INIT(0); 53static atomic_t tipc_num_links = ATOMIC_INIT(0);
52u32 tipc_own_tag;
53 54
54/** 55/*
55 * tipc_node_find - locate specified node object, if it exists 56 * A trivial power-of-two bitmask technique is used for speed, since this
57 * operation is done for every incoming TIPC packet. The number of hash table
58 * entries has been chosen so that no hash chain exceeds 8 nodes and will
59 * usually be much smaller (typically only a single node).
56 */ 60 */
61static unsigned int tipc_hashfn(u32 addr)
62{
63 return addr & (NODE_HTABLE_SIZE - 1);
64}
57 65
66/*
67 * tipc_node_find - locate specified node object, if it exists
68 */
58struct tipc_node *tipc_node_find(u32 addr) 69struct tipc_node *tipc_node_find(u32 addr)
59{ 70{
60 struct tipc_node *node; 71 struct tipc_node *node;
61 struct hlist_node *pos; 72 struct hlist_node *pos;
62 73
63 if (unlikely(!in_own_cluster(addr))) 74 if (unlikely(!in_own_cluster_exact(addr)))
64 return NULL; 75 return NULL;
65 76
66 hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { 77 hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
@@ -79,7 +90,6 @@ struct tipc_node *tipc_node_find(u32 addr)
79 * time. (It would be preferable to switch to holding net_lock in write mode, 90 * time. (It would be preferable to switch to holding net_lock in write mode,
80 * but this is a non-trivial change.) 91 * but this is a non-trivial change.)
81 */ 92 */
82
83struct tipc_node *tipc_node_create(u32 addr) 93struct tipc_node *tipc_node_create(u32 addr)
84{ 94{
85 struct tipc_node *n_ptr, *temp_node; 95 struct tipc_node *n_ptr, *temp_node;
@@ -113,6 +123,7 @@ struct tipc_node *tipc_node_create(u32 addr)
113 } 123 }
114 list_add_tail(&n_ptr->list, &temp_node->list); 124 list_add_tail(&n_ptr->list, &temp_node->list);
115 n_ptr->block_setup = WAIT_PEER_DOWN; 125 n_ptr->block_setup = WAIT_PEER_DOWN;
126 n_ptr->signature = INVALID_NODE_SIG;
116 127
117 tipc_num_nodes++; 128 tipc_num_nodes++;
118 129
@@ -129,13 +140,11 @@ void tipc_node_delete(struct tipc_node *n_ptr)
129 tipc_num_nodes--; 140 tipc_num_nodes--;
130} 141}
131 142
132
133/** 143/**
134 * tipc_node_link_up - handle addition of link 144 * tipc_node_link_up - handle addition of link
135 * 145 *
136 * Link becomes active (alone or shared) or standby, depending on its priority. 146 * Link becomes active (alone or shared) or standby, depending on its priority.
137 */ 147 */
138
139void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 148void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
140{ 149{
141 struct tipc_link **active = &n_ptr->active_links[0]; 150 struct tipc_link **active = &n_ptr->active_links[0];
@@ -168,7 +177,6 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
168/** 177/**
169 * node_select_active_links - select active link 178 * node_select_active_links - select active link
170 */ 179 */
171
172static void node_select_active_links(struct tipc_node *n_ptr) 180static void node_select_active_links(struct tipc_node *n_ptr)
173{ 181{
174 struct tipc_link **active = &n_ptr->active_links[0]; 182 struct tipc_link **active = &n_ptr->active_links[0];
@@ -196,7 +204,6 @@ static void node_select_active_links(struct tipc_node *n_ptr)
196/** 204/**
197 * tipc_node_link_down - handle loss of link 205 * tipc_node_link_down - handle loss of link
198 */ 206 */
199
200void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 207void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
201{ 208{
202 struct tipc_link **active; 209 struct tipc_link **active;
@@ -253,63 +260,14 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
253 n_ptr->link_cnt--; 260 n_ptr->link_cnt--;
254} 261}
255 262
256/*
257 * Routing table management - five cases to handle:
258 *
259 * 1: A link towards a zone/cluster external node comes up.
260 * => Send a multicast message updating routing tables of all
261 * system nodes within own cluster that the new destination
262 * can be reached via this node.
263 * (node.establishedContact()=>cluster.multicastNewRoute())
264 *
265 * 2: A link towards a slave node comes up.
266 * => Send a multicast message updating routing tables of all
267 * system nodes within own cluster that the new destination
268 * can be reached via this node.
269 * (node.establishedContact()=>cluster.multicastNewRoute())
270 * => Send a message to the slave node about existence
271 * of all system nodes within cluster:
272 * (node.establishedContact()=>cluster.sendLocalRoutes())
273 *
274 * 3: A new cluster local system node becomes available.
275 * => Send message(s) to this particular node containing
276 * information about all cluster external and slave
277 * nodes which can be reached via this node.
278 * (node.establishedContact()==>network.sendExternalRoutes())
279 * (node.establishedContact()==>network.sendSlaveRoutes())
280 * => Send messages to all directly connected slave nodes
281 * containing information about the existence of the new node
282 * (node.establishedContact()=>cluster.multicastNewRoute())
283 *
284 * 4: The link towards a zone/cluster external node or slave
285 * node goes down.
286 * => Send a multcast message updating routing tables of all
287 * nodes within cluster that the new destination can not any
288 * longer be reached via this node.
289 * (node.lostAllLinks()=>cluster.bcastLostRoute())
290 *
291 * 5: A cluster local system node becomes unavailable.
292 * => Remove all references to this node from the local
293 * routing tables. Note: This is a completely node
294 * local operation.
295 * (node.lostAllLinks()=>network.removeAsRouter())
296 * => Send messages to all directly connected slave nodes
297 * containing information about loss of the node
298 * (node.establishedContact()=>cluster.multicastLostRoute())
299 *
300 */
301
302static void node_established_contact(struct tipc_node *n_ptr) 263static void node_established_contact(struct tipc_node *n_ptr)
303{ 264{
304 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 265 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
305 266
306 /* Syncronize broadcast acks */ 267 if (n_ptr->bclink.supportable) {
307 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 268 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
308
309 if (n_ptr->bclink.supported) {
310 tipc_bclink_add_node(n_ptr->addr); 269 tipc_bclink_add_node(n_ptr->addr);
311 if (n_ptr->addr < tipc_own_addr) 270 n_ptr->bclink.supported = 1;
312 tipc_own_tag++;
313 } 271 }
314} 272}
315 273
@@ -336,24 +294,21 @@ static void node_lost_contact(struct tipc_node *n_ptr)
336 tipc_addr_string_fill(addr_string, n_ptr->addr)); 294 tipc_addr_string_fill(addr_string, n_ptr->addr));
337 295
338 /* Flush broadcast link info associated with lost node */ 296 /* Flush broadcast link info associated with lost node */
339
340 if (n_ptr->bclink.supported) { 297 if (n_ptr->bclink.supported) {
341 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
342 while (n_ptr->bclink.deferred_head) { 298 while (n_ptr->bclink.deferred_head) {
343 struct sk_buff *buf = n_ptr->bclink.deferred_head; 299 struct sk_buff *buf = n_ptr->bclink.deferred_head;
344 n_ptr->bclink.deferred_head = buf->next; 300 n_ptr->bclink.deferred_head = buf->next;
345 buf_discard(buf); 301 kfree_skb(buf);
346 } 302 }
303 n_ptr->bclink.deferred_size = 0;
347 304
348 if (n_ptr->bclink.defragm) { 305 if (n_ptr->bclink.defragm) {
349 buf_discard(n_ptr->bclink.defragm); 306 kfree_skb(n_ptr->bclink.defragm);
350 n_ptr->bclink.defragm = NULL; 307 n_ptr->bclink.defragm = NULL;
351 } 308 }
352 309
353 tipc_bclink_remove_node(n_ptr->addr); 310 tipc_bclink_remove_node(n_ptr->addr);
354 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); 311 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
355 if (n_ptr->addr < tipc_own_addr)
356 tipc_own_tag--;
357 312
358 n_ptr->bclink.supported = 0; 313 n_ptr->bclink.supported = 0;
359 } 314 }
@@ -372,7 +327,6 @@ static void node_lost_contact(struct tipc_node *n_ptr)
372 tipc_nodesub_notify(n_ptr); 327 tipc_nodesub_notify(n_ptr);
373 328
374 /* Prevent re-contact with node until cleanup is done */ 329 /* Prevent re-contact with node until cleanup is done */
375
376 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; 330 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
377 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); 331 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
378} 332}
@@ -400,7 +354,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
400 } 354 }
401 355
402 /* For now, get space for all other nodes */ 356 /* For now, get space for all other nodes */
403
404 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; 357 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
405 if (payload_size > 32768u) { 358 if (payload_size > 32768u) {
406 read_unlock_bh(&tipc_net_lock); 359 read_unlock_bh(&tipc_net_lock);
@@ -414,7 +367,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
414 } 367 }
415 368
416 /* Add TLVs for all nodes in scope */ 369 /* Add TLVs for all nodes in scope */
417
418 list_for_each_entry(n_ptr, &tipc_node_list, list) { 370 list_for_each_entry(n_ptr, &tipc_node_list, list) {
419 if (!tipc_in_scope(domain, n_ptr->addr)) 371 if (!tipc_in_scope(domain, n_ptr->addr))
420 continue; 372 continue;
@@ -444,13 +396,12 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
444 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 396 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
445 " (network address)"); 397 " (network address)");
446 398
447 if (tipc_mode != TIPC_NET_MODE) 399 if (!tipc_own_addr)
448 return tipc_cfg_reply_none(); 400 return tipc_cfg_reply_none();
449 401
450 read_lock_bh(&tipc_net_lock); 402 read_lock_bh(&tipc_net_lock);
451 403
452 /* Get space for all unicast links + multicast link */ 404 /* Get space for all unicast links + broadcast link */
453
454 payload_size = TLV_SPACE(sizeof(link_info)) * 405 payload_size = TLV_SPACE(sizeof(link_info)) *
455 (atomic_read(&tipc_num_links) + 1); 406 (atomic_read(&tipc_num_links) + 1);
456 if (payload_size > 32768u) { 407 if (payload_size > 32768u) {
@@ -465,14 +416,12 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
465 } 416 }
466 417
467 /* Add TLV for broadcast link */ 418 /* Add TLV for broadcast link */
468
469 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); 419 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
470 link_info.up = htonl(1); 420 link_info.up = htonl(1);
471 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); 421 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
472 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 422 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
473 423
474 /* Add TLVs for any other links in scope */ 424 /* Add TLVs for any other links in scope */
475
476 list_for_each_entry(n_ptr, &tipc_node_list, list) { 425 list_for_each_entry(n_ptr, &tipc_node_list, list) {
477 u32 i; 426 u32 i;
478 427
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 0b1c5f8b6996..cfcaf4d6e480 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -42,8 +42,12 @@
42#include "net.h" 42#include "net.h"
43#include "bearer.h" 43#include "bearer.h"
44 44
45/* Flags used to block (re)establishment of contact with a neighboring node */ 45/*
46 * Out-of-range value for node signature
47 */
48#define INVALID_NODE_SIG 0x10000
46 49
50/* Flags used to block (re)establishment of contact with a neighboring node */
47#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */ 51#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */
48#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */ 52#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */
49#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */ 53#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */
@@ -61,18 +65,19 @@
61 * @block_setup: bit mask of conditions preventing link establishment to node 65 * @block_setup: bit mask of conditions preventing link establishment to node
62 * @link_cnt: number of links to node 66 * @link_cnt: number of links to node
63 * @permit_changeover: non-zero if node has redundant links to this system 67 * @permit_changeover: non-zero if node has redundant links to this system
68 * @signature: node instance identifier
64 * @bclink: broadcast-related info 69 * @bclink: broadcast-related info
70 * @supportable: non-zero if node supports TIPC b'cast link capability
65 * @supported: non-zero if node supports TIPC b'cast capability 71 * @supported: non-zero if node supports TIPC b'cast capability
66 * @acked: sequence # of last outbound b'cast message acknowledged by node 72 * @acked: sequence # of last outbound b'cast message acknowledged by node
67 * @last_in: sequence # of last in-sequence b'cast message received from node 73 * @last_in: sequence # of last in-sequence b'cast message received from node
68 * @gap_after: sequence # of last message not requiring a NAK request 74 * @last_sent: sequence # of last b'cast message sent by node
69 * @gap_to: sequence # of last message requiring a NAK request 75 * @oos_state: state tracker for handling OOS b'cast messages
70 * @nack_sync: counter that determines when NAK requests should be sent 76 * @deferred_size: number of OOS b'cast messages in deferred queue
71 * @deferred_head: oldest OOS b'cast message received from node 77 * @deferred_head: oldest OOS b'cast message received from node
72 * @deferred_tail: newest OOS b'cast message received from node 78 * @deferred_tail: newest OOS b'cast message received from node
73 * @defragm: list of partially reassembled b'cast message fragments from node 79 * @defragm: list of partially reassembled b'cast message fragments from node
74 */ 80 */
75
76struct tipc_node { 81struct tipc_node {
77 u32 addr; 82 u32 addr;
78 spinlock_t lock; 83 spinlock_t lock;
@@ -85,35 +90,23 @@ struct tipc_node {
85 int working_links; 90 int working_links;
86 int block_setup; 91 int block_setup;
87 int permit_changeover; 92 int permit_changeover;
93 u32 signature;
88 struct { 94 struct {
89 int supported; 95 u8 supportable;
96 u8 supported;
90 u32 acked; 97 u32 acked;
91 u32 last_in; 98 u32 last_in;
92 u32 gap_after; 99 u32 last_sent;
93 u32 gap_to; 100 u32 oos_state;
94 u32 nack_sync; 101 u32 deferred_size;
95 struct sk_buff *deferred_head; 102 struct sk_buff *deferred_head;
96 struct sk_buff *deferred_tail; 103 struct sk_buff *deferred_tail;
97 struct sk_buff *defragm; 104 struct sk_buff *defragm;
98 } bclink; 105 } bclink;
99}; 106};
100 107
101#define NODE_HTABLE_SIZE 512
102extern struct list_head tipc_node_list; 108extern struct list_head tipc_node_list;
103 109
104/*
105 * A trivial power-of-two bitmask technique is used for speed, since this
106 * operation is done for every incoming TIPC packet. The number of hash table
107 * entries has been chosen so that no hash chain exceeds 8 nodes and will
108 * usually be much smaller (typically only a single node).
109 */
110static inline unsigned int tipc_hashfn(u32 addr)
111{
112 return addr & (NODE_HTABLE_SIZE - 1);
113}
114
115extern u32 tipc_own_tag;
116
117struct tipc_node *tipc_node_find(u32 addr); 110struct tipc_node *tipc_node_find(u32 addr);
118struct tipc_node *tipc_node_create(u32 addr); 111struct tipc_node *tipc_node_create(u32 addr);
119void tipc_node_delete(struct tipc_node *n_ptr); 112void tipc_node_delete(struct tipc_node *n_ptr);
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index c3c2815ae630..7a27344108fe 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -41,11 +41,10 @@
41/** 41/**
42 * tipc_nodesub_subscribe - create "node down" subscription for specified node 42 * tipc_nodesub_subscribe - create "node down" subscription for specified node
43 */ 43 */
44
45void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, 44void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
46 void *usr_handle, net_ev_handler handle_down) 45 void *usr_handle, net_ev_handler handle_down)
47{ 46{
48 if (addr == tipc_own_addr) { 47 if (in_own_node(addr)) {
49 node_sub->node = NULL; 48 node_sub->node = NULL;
50 return; 49 return;
51 } 50 }
@@ -66,7 +65,6 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
66/** 65/**
67 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) 66 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
68 */ 67 */
69
70void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) 68void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
71{ 69{
72 if (!node_sub->node) 70 if (!node_sub->node)
@@ -82,7 +80,6 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
82 * 80 *
83 * Note: node is locked by caller 81 * Note: node is locked by caller
84 */ 82 */
85
86void tipc_nodesub_notify(struct tipc_node *node) 83void tipc_nodesub_notify(struct tipc_node *node)
87{ 84{
88 struct tipc_node_subscr *ns; 85 struct tipc_node_subscr *ns;
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index 4bc2ca0867a1..c95d20727ded 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -48,7 +48,6 @@ typedef void (*net_ev_handler) (void *usr_handle);
48 * @usr_handle: argument to pass to routine when node fails 48 * @usr_handle: argument to pass to routine when node fails
49 * @nodesub_list: adjacent entries in list of subscriptions for the node 49 * @nodesub_list: adjacent entries in list of subscriptions for the node
50 */ 50 */
51
52struct tipc_node_subscr { 51struct tipc_node_subscr {
53 struct tipc_node *node; 52 struct tipc_node *node;
54 net_ev_handler handle_node_down; 53 net_ev_handler handle_node_down;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index d91efc69e6f9..2ad37a4db376 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,10 +69,30 @@ static u32 port_peerport(struct tipc_port *p_ptr)
69 return msg_destport(&p_ptr->phdr); 69 return msg_destport(&p_ptr->phdr);
70} 70}
71 71
72/*
73 * tipc_port_peer_msg - verify message was sent by connected port's peer
74 *
75 * Handles cases where the node's network address has changed from
76 * the default of <0.0.0> to its configured setting.
77 */
78int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
79{
80 u32 peernode;
81 u32 orignode;
82
83 if (msg_origport(msg) != port_peerport(p_ptr))
84 return 0;
85
86 orignode = msg_orignode(msg);
87 peernode = port_peernode(p_ptr);
88 return (orignode == peernode) ||
89 (!orignode && (peernode == tipc_own_addr)) ||
90 (!peernode && (orignode == tipc_own_addr));
91}
92
72/** 93/**
73 * tipc_multicast - send a multicast message to local and remote destinations 94 * tipc_multicast - send a multicast message to local and remote destinations
74 */ 95 */
75
76int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, 96int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
77 u32 num_sect, struct iovec const *msg_sect, 97 u32 num_sect, struct iovec const *msg_sect,
78 unsigned int total_len) 98 unsigned int total_len)
@@ -89,7 +109,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
89 return -EINVAL; 109 return -EINVAL;
90 110
91 /* Create multicast message */ 111 /* Create multicast message */
92
93 hdr = &oport->phdr; 112 hdr = &oport->phdr;
94 msg_set_type(hdr, TIPC_MCAST_MSG); 113 msg_set_type(hdr, TIPC_MCAST_MSG);
95 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 114 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
@@ -105,24 +124,22 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
105 return res; 124 return res;
106 125
107 /* Figure out where to send multicast message */ 126 /* Figure out where to send multicast message */
108
109 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper, 127 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
110 TIPC_NODE_SCOPE, &dports); 128 TIPC_NODE_SCOPE, &dports);
111 129
112 /* Send message to destinations (duplicate it only if necessary) */ 130 /* Send message to destinations (duplicate it only if necessary) */
113
114 if (ext_targets) { 131 if (ext_targets) {
115 if (dports.count != 0) { 132 if (dports.count != 0) {
116 ibuf = skb_copy(buf, GFP_ATOMIC); 133 ibuf = skb_copy(buf, GFP_ATOMIC);
117 if (ibuf == NULL) { 134 if (ibuf == NULL) {
118 tipc_port_list_free(&dports); 135 tipc_port_list_free(&dports);
119 buf_discard(buf); 136 kfree_skb(buf);
120 return -ENOMEM; 137 return -ENOMEM;
121 } 138 }
122 } 139 }
123 res = tipc_bclink_send_msg(buf); 140 res = tipc_bclink_send_msg(buf);
124 if ((res < 0) && (dports.count != 0)) 141 if ((res < 0) && (dports.count != 0))
125 buf_discard(ibuf); 142 kfree_skb(ibuf);
126 } else { 143 } else {
127 ibuf = buf; 144 ibuf = buf;
128 } 145 }
@@ -141,7 +158,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
141 * 158 *
142 * If there is no port list, perform a lookup to create one 159 * If there is no port list, perform a lookup to create one
143 */ 160 */
144
145void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) 161void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
146{ 162{
147 struct tipc_msg *msg; 163 struct tipc_msg *msg;
@@ -152,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
152 msg = buf_msg(buf); 168 msg = buf_msg(buf);
153 169
154 /* Create destination port list, if one wasn't supplied */ 170 /* Create destination port list, if one wasn't supplied */
155
156 if (dp == NULL) { 171 if (dp == NULL) {
157 tipc_nametbl_mc_translate(msg_nametype(msg), 172 tipc_nametbl_mc_translate(msg_nametype(msg),
158 msg_namelower(msg), 173 msg_namelower(msg),
@@ -163,7 +178,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
163 } 178 }
164 179
165 /* Deliver a copy of message to each destination port */ 180 /* Deliver a copy of message to each destination port */
166
167 if (dp->count != 0) { 181 if (dp->count != 0) {
168 msg_set_destnode(msg, tipc_own_addr); 182 msg_set_destnode(msg, tipc_own_addr);
169 if (dp->count == 1) { 183 if (dp->count == 1) {
@@ -187,7 +201,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
187 } 201 }
188 } 202 }
189exit: 203exit:
190 buf_discard(buf); 204 kfree_skb(buf);
191 tipc_port_list_free(dp); 205 tipc_port_list_free(dp);
192} 206}
193 207
@@ -196,7 +210,6 @@ exit:
196 * 210 *
197 * Returns pointer to (locked) TIPC port, or NULL if unable to create it 211 * Returns pointer to (locked) TIPC port, or NULL if unable to create it
198 */ 212 */
199
200struct tipc_port *tipc_createport_raw(void *usr_handle, 213struct tipc_port *tipc_createport_raw(void *usr_handle,
201 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), 214 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
202 void (*wakeup)(struct tipc_port *), 215 void (*wakeup)(struct tipc_port *),
@@ -221,18 +234,24 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
221 p_ptr->usr_handle = usr_handle; 234 p_ptr->usr_handle = usr_handle;
222 p_ptr->max_pkt = MAX_PKT_DEFAULT; 235 p_ptr->max_pkt = MAX_PKT_DEFAULT;
223 p_ptr->ref = ref; 236 p_ptr->ref = ref;
224 msg = &p_ptr->phdr;
225 tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
226 msg_set_origport(msg, ref);
227 INIT_LIST_HEAD(&p_ptr->wait_list); 237 INIT_LIST_HEAD(&p_ptr->wait_list);
228 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); 238 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
229 p_ptr->dispatcher = dispatcher; 239 p_ptr->dispatcher = dispatcher;
230 p_ptr->wakeup = wakeup; 240 p_ptr->wakeup = wakeup;
231 p_ptr->user_port = NULL; 241 p_ptr->user_port = NULL;
232 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); 242 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
233 spin_lock_bh(&tipc_port_list_lock);
234 INIT_LIST_HEAD(&p_ptr->publications); 243 INIT_LIST_HEAD(&p_ptr->publications);
235 INIT_LIST_HEAD(&p_ptr->port_list); 244 INIT_LIST_HEAD(&p_ptr->port_list);
245
246 /*
247 * Must hold port list lock while initializing message header template
248 * to ensure a change to node's own network address doesn't result
249 * in template containing out-dated network address information
250 */
251 spin_lock_bh(&tipc_port_list_lock);
252 msg = &p_ptr->phdr;
253 tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
254 msg_set_origport(msg, ref);
236 list_add_tail(&p_ptr->port_list, &ports); 255 list_add_tail(&p_ptr->port_list, &ports);
237 spin_unlock_bh(&tipc_port_list_lock); 256 spin_unlock_bh(&tipc_port_list_lock);
238 return p_ptr; 257 return p_ptr;
@@ -361,7 +380,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
361 u32 rmsg_sz; 380 u32 rmsg_sz;
362 381
363 /* discard rejected message if it shouldn't be returned to sender */ 382 /* discard rejected message if it shouldn't be returned to sender */
364
365 if (WARN(!msg_isdata(msg), 383 if (WARN(!msg_isdata(msg),
366 "attempt to reject message with user=%u", msg_user(msg))) { 384 "attempt to reject message with user=%u", msg_user(msg))) {
367 dump_stack(); 385 dump_stack();
@@ -374,7 +392,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
374 * construct returned message by copying rejected message header and 392 * construct returned message by copying rejected message header and
375 * data (or subset), then updating header fields that need adjusting 393 * data (or subset), then updating header fields that need adjusting
376 */ 394 */
377
378 hdr_sz = msg_hdr_sz(msg); 395 hdr_sz = msg_hdr_sz(msg);
379 rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE); 396 rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE);
380 397
@@ -400,26 +417,26 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
400 417
401 /* send self-abort message when rejecting on a connected port */ 418 /* send self-abort message when rejecting on a connected port */
402 if (msg_connected(msg)) { 419 if (msg_connected(msg)) {
403 struct sk_buff *abuf = NULL;
404 struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg)); 420 struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
405 421
406 if (p_ptr) { 422 if (p_ptr) {
423 struct sk_buff *abuf = NULL;
424
407 if (p_ptr->connected) 425 if (p_ptr->connected)
408 abuf = port_build_self_abort_msg(p_ptr, err); 426 abuf = port_build_self_abort_msg(p_ptr, err);
409 tipc_port_unlock(p_ptr); 427 tipc_port_unlock(p_ptr);
428 tipc_net_route_msg(abuf);
410 } 429 }
411 tipc_net_route_msg(abuf);
412 } 430 }
413 431
414 /* send returned message & dispose of rejected message */ 432 /* send returned message & dispose of rejected message */
415
416 src_node = msg_prevnode(msg); 433 src_node = msg_prevnode(msg);
417 if (src_node == tipc_own_addr) 434 if (in_own_node(src_node))
418 tipc_port_recv_msg(rbuf); 435 tipc_port_recv_msg(rbuf);
419 else 436 else
420 tipc_link_send(rbuf, src_node, msg_link_selector(rmsg)); 437 tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
421exit: 438exit:
422 buf_discard(buf); 439 kfree_skb(buf);
423 return data_sz; 440 return data_sz;
424} 441}
425 442
@@ -518,25 +535,20 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
518 struct tipc_msg *msg = buf_msg(buf); 535 struct tipc_msg *msg = buf_msg(buf);
519 struct tipc_port *p_ptr; 536 struct tipc_port *p_ptr;
520 struct sk_buff *r_buf = NULL; 537 struct sk_buff *r_buf = NULL;
521 u32 orignode = msg_orignode(msg);
522 u32 origport = msg_origport(msg);
523 u32 destport = msg_destport(msg); 538 u32 destport = msg_destport(msg);
524 int wakeable; 539 int wakeable;
525 540
526 /* Validate connection */ 541 /* Validate connection */
527
528 p_ptr = tipc_port_lock(destport); 542 p_ptr = tipc_port_lock(destport);
529 if (!p_ptr || !p_ptr->connected || 543 if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) {
530 (port_peernode(p_ptr) != orignode) ||
531 (port_peerport(p_ptr) != origport)) {
532 r_buf = tipc_buf_acquire(BASIC_H_SIZE); 544 r_buf = tipc_buf_acquire(BASIC_H_SIZE);
533 if (r_buf) { 545 if (r_buf) {
534 msg = buf_msg(r_buf); 546 msg = buf_msg(r_buf);
535 tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG, 547 tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
536 BASIC_H_SIZE, orignode); 548 BASIC_H_SIZE, msg_orignode(msg));
537 msg_set_errcode(msg, TIPC_ERR_NO_PORT); 549 msg_set_errcode(msg, TIPC_ERR_NO_PORT);
538 msg_set_origport(msg, destport); 550 msg_set_origport(msg, destport);
539 msg_set_destport(msg, origport); 551 msg_set_destport(msg, msg_origport(msg));
540 } 552 }
541 if (p_ptr) 553 if (p_ptr)
542 tipc_port_unlock(p_ptr); 554 tipc_port_unlock(p_ptr);
@@ -544,7 +556,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
544 } 556 }
545 557
546 /* Process protocol message sent by peer */ 558 /* Process protocol message sent by peer */
547
548 switch (msg_type(msg)) { 559 switch (msg_type(msg)) {
549 case CONN_ACK: 560 case CONN_ACK:
550 wakeable = tipc_port_congested(p_ptr) && p_ptr->congested && 561 wakeable = tipc_port_congested(p_ptr) && p_ptr->congested &&
@@ -567,7 +578,7 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
567 tipc_port_unlock(p_ptr); 578 tipc_port_unlock(p_ptr);
568exit: 579exit:
569 tipc_net_route_msg(r_buf); 580 tipc_net_route_msg(r_buf);
570 buf_discard(buf); 581 kfree_skb(buf);
571} 582}
572 583
573static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id) 584static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id)
@@ -645,8 +656,6 @@ void tipc_port_reinit(void)
645 spin_lock_bh(&tipc_port_list_lock); 656 spin_lock_bh(&tipc_port_list_lock);
646 list_for_each_entry(p_ptr, &ports, port_list) { 657 list_for_each_entry(p_ptr, &ports, port_list) {
647 msg = &p_ptr->phdr; 658 msg = &p_ptr->phdr;
648 if (msg_orignode(msg) == tipc_own_addr)
649 break;
650 msg_set_prevnode(msg, tipc_own_addr); 659 msg_set_prevnode(msg, tipc_own_addr);
651 msg_set_orignode(msg, tipc_own_addr); 660 msg_set_orignode(msg, tipc_own_addr);
652 } 661 }
@@ -658,7 +667,6 @@ void tipc_port_reinit(void)
658 * port_dispatcher_sigh(): Signal handler for messages destinated 667 * port_dispatcher_sigh(): Signal handler for messages destinated
659 * to the tipc_port interface. 668 * to the tipc_port interface.
660 */ 669 */
661
662static void port_dispatcher_sigh(void *dummy) 670static void port_dispatcher_sigh(void *dummy)
663{ 671{
664 struct sk_buff *buf; 672 struct sk_buff *buf;
@@ -675,6 +683,7 @@ static void port_dispatcher_sigh(void *dummy)
675 struct tipc_name_seq dseq; 683 struct tipc_name_seq dseq;
676 void *usr_handle; 684 void *usr_handle;
677 int connected; 685 int connected;
686 int peer_invalid;
678 int published; 687 int published;
679 u32 message_type; 688 u32 message_type;
680 689
@@ -695,6 +704,7 @@ static void port_dispatcher_sigh(void *dummy)
695 up_ptr = p_ptr->user_port; 704 up_ptr = p_ptr->user_port;
696 usr_handle = up_ptr->usr_handle; 705 usr_handle = up_ptr->usr_handle;
697 connected = p_ptr->connected; 706 connected = p_ptr->connected;
707 peer_invalid = connected && !tipc_port_peer_msg(p_ptr, msg);
698 published = p_ptr->published; 708 published = p_ptr->published;
699 709
700 if (unlikely(msg_errcode(msg))) 710 if (unlikely(msg_errcode(msg)))
@@ -704,8 +714,6 @@ static void port_dispatcher_sigh(void *dummy)
704 714
705 case TIPC_CONN_MSG:{ 715 case TIPC_CONN_MSG:{
706 tipc_conn_msg_event cb = up_ptr->conn_msg_cb; 716 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
707 u32 peer_port = port_peerport(p_ptr);
708 u32 peer_node = port_peernode(p_ptr);
709 u32 dsz; 717 u32 dsz;
710 718
711 tipc_port_unlock(p_ptr); 719 tipc_port_unlock(p_ptr);
@@ -714,8 +722,7 @@ static void port_dispatcher_sigh(void *dummy)
714 if (unlikely(!connected)) { 722 if (unlikely(!connected)) {
715 if (tipc_connect2port(dref, &orig)) 723 if (tipc_connect2port(dref, &orig))
716 goto reject; 724 goto reject;
717 } else if ((msg_origport(msg) != peer_port) || 725 } else if (peer_invalid)
718 (msg_orignode(msg) != peer_node))
719 goto reject; 726 goto reject;
720 dsz = msg_data_sz(msg); 727 dsz = msg_data_sz(msg);
721 if (unlikely(dsz && 728 if (unlikely(dsz &&
@@ -758,7 +765,7 @@ static void port_dispatcher_sigh(void *dummy)
758 } 765 }
759 } 766 }
760 if (buf) 767 if (buf)
761 buf_discard(buf); 768 kfree_skb(buf);
762 buf = next; 769 buf = next;
763 continue; 770 continue;
764err: 771err:
@@ -767,14 +774,9 @@ err:
767 case TIPC_CONN_MSG:{ 774 case TIPC_CONN_MSG:{
768 tipc_conn_shutdown_event cb = 775 tipc_conn_shutdown_event cb =
769 up_ptr->conn_err_cb; 776 up_ptr->conn_err_cb;
770 u32 peer_port = port_peerport(p_ptr);
771 u32 peer_node = port_peernode(p_ptr);
772 777
773 tipc_port_unlock(p_ptr); 778 tipc_port_unlock(p_ptr);
774 if (!cb || !connected) 779 if (!cb || !connected || peer_invalid)
775 break;
776 if ((msg_origport(msg) != peer_port) ||
777 (msg_orignode(msg) != peer_node))
778 break; 780 break;
779 tipc_disconnect(dref); 781 tipc_disconnect(dref);
780 skb_pull(buf, msg_hdr_sz(msg)); 782 skb_pull(buf, msg_hdr_sz(msg));
@@ -812,7 +814,7 @@ err:
812 } 814 }
813 } 815 }
814 if (buf) 816 if (buf)
815 buf_discard(buf); 817 kfree_skb(buf);
816 buf = next; 818 buf = next;
817 continue; 819 continue;
818reject: 820reject:
@@ -825,7 +827,6 @@ reject:
825 * port_dispatcher(): Dispatcher for messages destinated 827 * port_dispatcher(): Dispatcher for messages destinated
826 * to the tipc_port interface. Called with port locked. 828 * to the tipc_port interface. Called with port locked.
827 */ 829 */
828
829static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) 830static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
830{ 831{
831 buf->next = NULL; 832 buf->next = NULL;
@@ -842,10 +843,8 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
842} 843}
843 844
844/* 845/*
845 * Wake up port after congestion: Called with port locked, 846 * Wake up port after congestion: Called with port locked
846 *
847 */ 847 */
848
849static void port_wakeup_sh(unsigned long ref) 848static void port_wakeup_sh(unsigned long ref)
850{ 849{
851 struct tipc_port *p_ptr; 850 struct tipc_port *p_ptr;
@@ -891,7 +890,6 @@ void tipc_acknowledge(u32 ref, u32 ack)
891/* 890/*
892 * tipc_createport(): user level call. 891 * tipc_createport(): user level call.
893 */ 892 */
894
895int tipc_createport(void *usr_handle, 893int tipc_createport(void *usr_handle,
896 unsigned int importance, 894 unsigned int importance,
897 tipc_msg_err_event error_cb, 895 tipc_msg_err_event error_cb,
@@ -900,7 +898,7 @@ int tipc_createport(void *usr_handle,
900 tipc_msg_event msg_cb, 898 tipc_msg_event msg_cb,
901 tipc_named_msg_event named_msg_cb, 899 tipc_named_msg_event named_msg_cb,
902 tipc_conn_msg_event conn_msg_cb, 900 tipc_conn_msg_event conn_msg_cb,
903 tipc_continue_event continue_event_cb,/* May be zero */ 901 tipc_continue_event continue_event_cb, /* May be zero */
904 u32 *portref) 902 u32 *portref)
905{ 903{
906 struct user_port *up_ptr; 904 struct user_port *up_ptr;
@@ -974,10 +972,6 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
974 972
975 if (p_ptr->connected) 973 if (p_ptr->connected)
976 goto exit; 974 goto exit;
977 if (seq->lower > seq->upper)
978 goto exit;
979 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
980 goto exit;
981 key = ref + p_ptr->pub_count + 1; 975 key = ref + p_ptr->pub_count + 1;
982 if (key == ref) { 976 if (key == ref) {
983 res = -EADDRINUSE; 977 res = -EADDRINUSE;
@@ -1053,8 +1047,6 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1053 msg = &p_ptr->phdr; 1047 msg = &p_ptr->phdr;
1054 msg_set_destnode(msg, peer->node); 1048 msg_set_destnode(msg, peer->node);
1055 msg_set_destport(msg, peer->ref); 1049 msg_set_destport(msg, peer->ref);
1056 msg_set_orignode(msg, tipc_own_addr);
1057 msg_set_origport(msg, p_ptr->ref);
1058 msg_set_type(msg, TIPC_CONN_MSG); 1050 msg_set_type(msg, TIPC_CONN_MSG);
1059 msg_set_lookup_scope(msg, 0); 1051 msg_set_lookup_scope(msg, 0);
1060 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1052 msg_set_hdr_sz(msg, SHORT_H_SIZE);
@@ -1079,7 +1071,6 @@ exit:
1079 * 1071 *
1080 * Port must be locked. 1072 * Port must be locked.
1081 */ 1073 */
1082
1083int tipc_disconnect_port(struct tipc_port *tp_ptr) 1074int tipc_disconnect_port(struct tipc_port *tp_ptr)
1084{ 1075{
1085 int res; 1076 int res;
@@ -1100,7 +1091,6 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
1100 * tipc_disconnect(): Disconnect port form peer. 1091 * tipc_disconnect(): Disconnect port form peer.
1101 * This is a node local operation. 1092 * This is a node local operation.
1102 */ 1093 */
1103
1104int tipc_disconnect(u32 ref) 1094int tipc_disconnect(u32 ref)
1105{ 1095{
1106 struct tipc_port *p_ptr; 1096 struct tipc_port *p_ptr;
@@ -1132,11 +1122,41 @@ int tipc_shutdown(u32 ref)
1132 return tipc_disconnect(ref); 1122 return tipc_disconnect(ref);
1133} 1123}
1134 1124
1125/**
1126 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
1127 */
1128int tipc_port_recv_msg(struct sk_buff *buf)
1129{
1130 struct tipc_port *p_ptr;
1131 struct tipc_msg *msg = buf_msg(buf);
1132 u32 destport = msg_destport(msg);
1133 u32 dsz = msg_data_sz(msg);
1134 u32 err;
1135
1136 /* forward unresolved named message */
1137 if (unlikely(!destport)) {
1138 tipc_net_route_msg(buf);
1139 return dsz;
1140 }
1141
1142 /* validate destination & pass to port, otherwise reject message */
1143 p_ptr = tipc_port_lock(destport);
1144 if (likely(p_ptr)) {
1145 err = p_ptr->dispatcher(p_ptr, buf);
1146 tipc_port_unlock(p_ptr);
1147 if (likely(!err))
1148 return dsz;
1149 } else {
1150 err = TIPC_ERR_NO_PORT;
1151 }
1152
1153 return tipc_reject_msg(buf, err);
1154}
1155
1135/* 1156/*
1136 * tipc_port_recv_sections(): Concatenate and deliver sectioned 1157 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1137 * message for this node. 1158 * message for this node.
1138 */ 1159 */
1139
1140static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect, 1160static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
1141 struct iovec const *msg_sect, 1161 struct iovec const *msg_sect,
1142 unsigned int total_len) 1162 unsigned int total_len)
@@ -1154,7 +1174,6 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
1154/** 1174/**
1155 * tipc_send - send message sections on connection 1175 * tipc_send - send message sections on connection
1156 */ 1176 */
1157
1158int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, 1177int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
1159 unsigned int total_len) 1178 unsigned int total_len)
1160{ 1179{
@@ -1169,7 +1188,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
1169 p_ptr->congested = 1; 1188 p_ptr->congested = 1;
1170 if (!tipc_port_congested(p_ptr)) { 1189 if (!tipc_port_congested(p_ptr)) {
1171 destnode = port_peernode(p_ptr); 1190 destnode = port_peernode(p_ptr);
1172 if (likely(destnode != tipc_own_addr)) 1191 if (likely(!in_own_node(destnode)))
1173 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1192 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1174 total_len, destnode); 1193 total_len, destnode);
1175 else 1194 else
@@ -1193,7 +1212,6 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
1193/** 1212/**
1194 * tipc_send2name - send message sections to port name 1213 * tipc_send2name - send message sections to port name
1195 */ 1214 */
1196
1197int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, 1215int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1198 unsigned int num_sect, struct iovec const *msg_sect, 1216 unsigned int num_sect, struct iovec const *msg_sect,
1199 unsigned int total_len) 1217 unsigned int total_len)
@@ -1210,8 +1228,6 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1210 1228
1211 msg = &p_ptr->phdr; 1229 msg = &p_ptr->phdr;
1212 msg_set_type(msg, TIPC_NAMED_MSG); 1230 msg_set_type(msg, TIPC_NAMED_MSG);
1213 msg_set_orignode(msg, tipc_own_addr);
1214 msg_set_origport(msg, ref);
1215 msg_set_hdr_sz(msg, NAMED_H_SIZE); 1231 msg_set_hdr_sz(msg, NAMED_H_SIZE);
1216 msg_set_nametype(msg, name->type); 1232 msg_set_nametype(msg, name->type);
1217 msg_set_nameinst(msg, name->instance); 1233 msg_set_nameinst(msg, name->instance);
@@ -1220,14 +1236,18 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1220 msg_set_destnode(msg, destnode); 1236 msg_set_destnode(msg, destnode);
1221 msg_set_destport(msg, destport); 1237 msg_set_destport(msg, destport);
1222 1238
1223 if (likely(destport)) { 1239 if (likely(destport || destnode)) {
1224 if (likely(destnode == tipc_own_addr)) 1240 if (likely(in_own_node(destnode)))
1225 res = tipc_port_recv_sections(p_ptr, num_sect, 1241 res = tipc_port_recv_sections(p_ptr, num_sect,
1226 msg_sect, total_len); 1242 msg_sect, total_len);
1227 else 1243 else if (tipc_own_addr)
1228 res = tipc_link_send_sections_fast(p_ptr, msg_sect, 1244 res = tipc_link_send_sections_fast(p_ptr, msg_sect,
1229 num_sect, total_len, 1245 num_sect, total_len,
1230 destnode); 1246 destnode);
1247 else
1248 res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
1249 num_sect, total_len,
1250 TIPC_ERR_NO_NODE);
1231 if (likely(res != -ELINKCONG)) { 1251 if (likely(res != -ELINKCONG)) {
1232 if (res > 0) 1252 if (res > 0)
1233 p_ptr->sent++; 1253 p_ptr->sent++;
@@ -1245,7 +1265,6 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1245/** 1265/**
1246 * tipc_send2port - send message sections to port identity 1266 * tipc_send2port - send message sections to port identity
1247 */ 1267 */
1248
1249int tipc_send2port(u32 ref, struct tipc_portid const *dest, 1268int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1250 unsigned int num_sect, struct iovec const *msg_sect, 1269 unsigned int num_sect, struct iovec const *msg_sect,
1251 unsigned int total_len) 1270 unsigned int total_len)
@@ -1261,18 +1280,19 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1261 msg = &p_ptr->phdr; 1280 msg = &p_ptr->phdr;
1262 msg_set_type(msg, TIPC_DIRECT_MSG); 1281 msg_set_type(msg, TIPC_DIRECT_MSG);
1263 msg_set_lookup_scope(msg, 0); 1282 msg_set_lookup_scope(msg, 0);
1264 msg_set_orignode(msg, tipc_own_addr);
1265 msg_set_origport(msg, ref);
1266 msg_set_destnode(msg, dest->node); 1283 msg_set_destnode(msg, dest->node);
1267 msg_set_destport(msg, dest->ref); 1284 msg_set_destport(msg, dest->ref);
1268 msg_set_hdr_sz(msg, BASIC_H_SIZE); 1285 msg_set_hdr_sz(msg, BASIC_H_SIZE);
1269 1286
1270 if (dest->node == tipc_own_addr) 1287 if (in_own_node(dest->node))
1271 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, 1288 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
1272 total_len); 1289 total_len);
1273 else 1290 else if (tipc_own_addr)
1274 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1291 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1275 total_len, dest->node); 1292 total_len, dest->node);
1293 else
1294 res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1295 total_len, TIPC_ERR_NO_NODE);
1276 if (likely(res != -ELINKCONG)) { 1296 if (likely(res != -ELINKCONG)) {
1277 if (res > 0) 1297 if (res > 0)
1278 p_ptr->sent++; 1298 p_ptr->sent++;
@@ -1287,7 +1307,6 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1287/** 1307/**
1288 * tipc_send_buf2port - send message buffer to port identity 1308 * tipc_send_buf2port - send message buffer to port identity
1289 */ 1309 */
1290
1291int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, 1310int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1292 struct sk_buff *buf, unsigned int dsz) 1311 struct sk_buff *buf, unsigned int dsz)
1293{ 1312{
@@ -1301,8 +1320,6 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1301 1320
1302 msg = &p_ptr->phdr; 1321 msg = &p_ptr->phdr;
1303 msg_set_type(msg, TIPC_DIRECT_MSG); 1322 msg_set_type(msg, TIPC_DIRECT_MSG);
1304 msg_set_orignode(msg, tipc_own_addr);
1305 msg_set_origport(msg, ref);
1306 msg_set_destnode(msg, dest->node); 1323 msg_set_destnode(msg, dest->node);
1307 msg_set_destport(msg, dest->ref); 1324 msg_set_destport(msg, dest->ref);
1308 msg_set_hdr_sz(msg, BASIC_H_SIZE); 1325 msg_set_hdr_sz(msg, BASIC_H_SIZE);
@@ -1313,7 +1330,7 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1313 skb_push(buf, BASIC_H_SIZE); 1330 skb_push(buf, BASIC_H_SIZE);
1314 skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE); 1331 skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE);
1315 1332
1316 if (dest->node == tipc_own_addr) 1333 if (in_own_node(dest->node))
1317 res = tipc_port_recv_msg(buf); 1334 res = tipc_port_recv_msg(buf);
1318 else 1335 else
1319 res = tipc_send_buf_fast(buf, dest->node); 1336 res = tipc_send_buf_fast(buf, dest->node);
@@ -1326,4 +1343,3 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1326 return dsz; 1343 return dsz;
1327 return -ELINKCONG; 1344 return -ELINKCONG;
1328} 1345}
1329
diff --git a/net/tipc/port.h b/net/tipc/port.h
index f751807e2a91..98cbec9c4532 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -81,7 +81,6 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
81 * @ref: object reference to associated TIPC port 81 * @ref: object reference to associated TIPC port
82 * <various callback routines> 82 * <various callback routines>
83 */ 83 */
84
85struct user_port { 84struct user_port {
86 void *usr_handle; 85 void *usr_handle;
87 u32 ref; 86 u32 ref;
@@ -201,10 +200,12 @@ int tipc_shutdown(u32 ref);
201 * The following routines require that the port be locked on entry 200 * The following routines require that the port be locked on entry
202 */ 201 */
203int tipc_disconnect_port(struct tipc_port *tp_ptr); 202int tipc_disconnect_port(struct tipc_port *tp_ptr);
203int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
204 204
205/* 205/*
206 * TIPC messaging routines 206 * TIPC messaging routines
207 */ 207 */
208int tipc_port_recv_msg(struct sk_buff *buf);
208int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect, 209int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect,
209 unsigned int total_len); 210 unsigned int total_len);
210 211
@@ -234,7 +235,6 @@ void tipc_port_reinit(void);
234/** 235/**
235 * tipc_port_lock - lock port instance referred to and return its pointer 236 * tipc_port_lock - lock port instance referred to and return its pointer
236 */ 237 */
237
238static inline struct tipc_port *tipc_port_lock(u32 ref) 238static inline struct tipc_port *tipc_port_lock(u32 ref)
239{ 239{
240 return (struct tipc_port *)tipc_ref_lock(ref); 240 return (struct tipc_port *)tipc_ref_lock(ref);
@@ -245,7 +245,6 @@ static inline struct tipc_port *tipc_port_lock(u32 ref)
245 * 245 *
246 * Can use pointer instead of tipc_ref_unlock() since port is already locked. 246 * Can use pointer instead of tipc_ref_unlock() since port is already locked.
247 */ 247 */
248
249static inline void tipc_port_unlock(struct tipc_port *p_ptr) 248static inline void tipc_port_unlock(struct tipc_port *p_ptr)
250{ 249{
251 spin_unlock_bh(p_ptr->lock); 250 spin_unlock_bh(p_ptr->lock);
@@ -256,60 +255,9 @@ static inline struct tipc_port *tipc_port_deref(u32 ref)
256 return (struct tipc_port *)tipc_ref_deref(ref); 255 return (struct tipc_port *)tipc_ref_deref(ref);
257} 256}
258 257
259static inline u32 tipc_peer_port(struct tipc_port *p_ptr)
260{
261 return msg_destport(&p_ptr->phdr);
262}
263
264static inline u32 tipc_peer_node(struct tipc_port *p_ptr)
265{
266 return msg_destnode(&p_ptr->phdr);
267}
268
269static inline int tipc_port_congested(struct tipc_port *p_ptr) 258static inline int tipc_port_congested(struct tipc_port *p_ptr)
270{ 259{
271 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2); 260 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
272} 261}
273 262
274/**
275 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
276 */
277
278static inline int tipc_port_recv_msg(struct sk_buff *buf)
279{
280 struct tipc_port *p_ptr;
281 struct tipc_msg *msg = buf_msg(buf);
282 u32 destport = msg_destport(msg);
283 u32 dsz = msg_data_sz(msg);
284 u32 err;
285
286 /* forward unresolved named message */
287 if (unlikely(!destport)) {
288 tipc_net_route_msg(buf);
289 return dsz;
290 }
291
292 /* validate destination & pass to port, otherwise reject message */
293 p_ptr = tipc_port_lock(destport);
294 if (likely(p_ptr)) {
295 if (likely(p_ptr->connected)) {
296 if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
297 (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
298 (unlikely(!msg_connected(msg)))) {
299 err = TIPC_ERR_NO_PORT;
300 tipc_port_unlock(p_ptr);
301 goto reject;
302 }
303 }
304 err = p_ptr->dispatcher(p_ptr, buf);
305 tipc_port_unlock(p_ptr);
306 if (likely(!err))
307 return dsz;
308 } else {
309 err = TIPC_ERR_NO_PORT;
310 }
311reject:
312 return tipc_reject_msg(buf, err);
313}
314
315#endif 263#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 9e37b7812c3c..5cada0e38e03 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -43,7 +43,6 @@
43 * @lock: spinlock controlling access to object 43 * @lock: spinlock controlling access to object
44 * @ref: reference value for object (combines instance & array index info) 44 * @ref: reference value for object (combines instance & array index info)
45 */ 45 */
46
47struct reference { 46struct reference {
48 void *object; 47 void *object;
49 spinlock_t lock; 48 spinlock_t lock;
@@ -60,7 +59,6 @@ struct reference {
60 * @index_mask: bitmask for array index portion of reference values 59 * @index_mask: bitmask for array index portion of reference values
61 * @start_mask: initial value for instance value portion of reference values 60 * @start_mask: initial value for instance value portion of reference values
62 */ 61 */
63
64struct ref_table { 62struct ref_table {
65 struct reference *entries; 63 struct reference *entries;
66 u32 capacity; 64 u32 capacity;
@@ -96,7 +94,6 @@ static DEFINE_RWLOCK(ref_table_lock);
96/** 94/**
97 * tipc_ref_table_init - create reference table for objects 95 * tipc_ref_table_init - create reference table for objects
98 */ 96 */
99
100int tipc_ref_table_init(u32 requested_size, u32 start) 97int tipc_ref_table_init(u32 requested_size, u32 start)
101{ 98{
102 struct reference *table; 99 struct reference *table;
@@ -109,7 +106,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
109 /* do nothing */ ; 106 /* do nothing */ ;
110 107
111 /* allocate table & mark all entries as uninitialized */ 108 /* allocate table & mark all entries as uninitialized */
112
113 table = vzalloc(actual_size * sizeof(struct reference)); 109 table = vzalloc(actual_size * sizeof(struct reference));
114 if (table == NULL) 110 if (table == NULL)
115 return -ENOMEM; 111 return -ENOMEM;
@@ -128,7 +124,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
128/** 124/**
129 * tipc_ref_table_stop - destroy reference table for objects 125 * tipc_ref_table_stop - destroy reference table for objects
130 */ 126 */
131
132void tipc_ref_table_stop(void) 127void tipc_ref_table_stop(void)
133{ 128{
134 if (!tipc_ref_table.entries) 129 if (!tipc_ref_table.entries)
@@ -149,7 +144,6 @@ void tipc_ref_table_stop(void)
149 * register a partially initialized object, without running the risk that 144 * register a partially initialized object, without running the risk that
150 * the object will be accessed before initialization is complete. 145 * the object will be accessed before initialization is complete.
151 */ 146 */
152
153u32 tipc_ref_acquire(void *object, spinlock_t **lock) 147u32 tipc_ref_acquire(void *object, spinlock_t **lock)
154{ 148{
155 u32 index; 149 u32 index;
@@ -168,7 +162,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
168 } 162 }
169 163
170 /* take a free entry, if available; otherwise initialize a new entry */ 164 /* take a free entry, if available; otherwise initialize a new entry */
171
172 write_lock_bh(&ref_table_lock); 165 write_lock_bh(&ref_table_lock);
173 if (tipc_ref_table.first_free) { 166 if (tipc_ref_table.first_free) {
174 index = tipc_ref_table.first_free; 167 index = tipc_ref_table.first_free;
@@ -211,7 +204,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
211 * Disallow future references to an object and free up the entry for re-use. 204 * Disallow future references to an object and free up the entry for re-use.
212 * Note: The entry's spin_lock may still be busy after discard 205 * Note: The entry's spin_lock may still be busy after discard
213 */ 206 */
214
215void tipc_ref_discard(u32 ref) 207void tipc_ref_discard(u32 ref)
216{ 208{
217 struct reference *entry; 209 struct reference *entry;
@@ -242,12 +234,10 @@ void tipc_ref_discard(u32 ref)
242 * mark entry as unused; increment instance part of entry's reference 234 * mark entry as unused; increment instance part of entry's reference
243 * to invalidate any subsequent references 235 * to invalidate any subsequent references
244 */ 236 */
245
246 entry->object = NULL; 237 entry->object = NULL;
247 entry->ref = (ref & ~index_mask) + (index_mask + 1); 238 entry->ref = (ref & ~index_mask) + (index_mask + 1);
248 239
249 /* append entry to free entry list */ 240 /* append entry to free entry list */
250
251 if (tipc_ref_table.first_free == 0) 241 if (tipc_ref_table.first_free == 0)
252 tipc_ref_table.first_free = index; 242 tipc_ref_table.first_free = index;
253 else 243 else
@@ -261,7 +251,6 @@ exit:
261/** 251/**
262 * tipc_ref_lock - lock referenced object and return pointer to it 252 * tipc_ref_lock - lock referenced object and return pointer to it
263 */ 253 */
264
265void *tipc_ref_lock(u32 ref) 254void *tipc_ref_lock(u32 ref)
266{ 255{
267 if (likely(tipc_ref_table.entries)) { 256 if (likely(tipc_ref_table.entries)) {
@@ -283,7 +272,6 @@ void *tipc_ref_lock(u32 ref)
283/** 272/**
284 * tipc_ref_deref - return pointer referenced object (without locking it) 273 * tipc_ref_deref - return pointer referenced object (without locking it)
285 */ 274 */
286
287void *tipc_ref_deref(u32 ref) 275void *tipc_ref_deref(u32 ref)
288{ 276{
289 if (likely(tipc_ref_table.entries)) { 277 if (likely(tipc_ref_table.entries)) {
@@ -296,4 +284,3 @@ void *tipc_ref_deref(u32 ref)
296 } 284 }
297 return NULL; 285 return NULL;
298} 286}
299
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e2f7c5d370ba..5577a447f531 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -123,10 +123,9 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0);
123 * 123 *
124 * Caller must hold socket lock 124 * Caller must hold socket lock
125 */ 125 */
126
127static void advance_rx_queue(struct sock *sk) 126static void advance_rx_queue(struct sock *sk)
128{ 127{
129 buf_discard(__skb_dequeue(&sk->sk_receive_queue)); 128 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
130 atomic_dec(&tipc_queue_size); 129 atomic_dec(&tipc_queue_size);
131} 130}
132 131
@@ -135,14 +134,13 @@ static void advance_rx_queue(struct sock *sk)
135 * 134 *
136 * Caller must hold socket lock 135 * Caller must hold socket lock
137 */ 136 */
138
139static void discard_rx_queue(struct sock *sk) 137static void discard_rx_queue(struct sock *sk)
140{ 138{
141 struct sk_buff *buf; 139 struct sk_buff *buf;
142 140
143 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 141 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
144 atomic_dec(&tipc_queue_size); 142 atomic_dec(&tipc_queue_size);
145 buf_discard(buf); 143 kfree_skb(buf);
146 } 144 }
147} 145}
148 146
@@ -151,7 +149,6 @@ static void discard_rx_queue(struct sock *sk)
151 * 149 *
152 * Caller must hold socket lock 150 * Caller must hold socket lock
153 */ 151 */
154
155static void reject_rx_queue(struct sock *sk) 152static void reject_rx_queue(struct sock *sk)
156{ 153{
157 struct sk_buff *buf; 154 struct sk_buff *buf;
@@ -174,7 +171,6 @@ static void reject_rx_queue(struct sock *sk)
174 * 171 *
175 * Returns 0 on success, errno otherwise 172 * Returns 0 on success, errno otherwise
176 */ 173 */
177
178static int tipc_create(struct net *net, struct socket *sock, int protocol, 174static int tipc_create(struct net *net, struct socket *sock, int protocol,
179 int kern) 175 int kern)
180{ 176{
@@ -184,7 +180,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
184 struct tipc_port *tp_ptr; 180 struct tipc_port *tp_ptr;
185 181
186 /* Validate arguments */ 182 /* Validate arguments */
187
188 if (unlikely(protocol != 0)) 183 if (unlikely(protocol != 0))
189 return -EPROTONOSUPPORT; 184 return -EPROTONOSUPPORT;
190 185
@@ -207,13 +202,11 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
207 } 202 }
208 203
209 /* Allocate socket's protocol area */ 204 /* Allocate socket's protocol area */
210
211 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); 205 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
212 if (sk == NULL) 206 if (sk == NULL)
213 return -ENOMEM; 207 return -ENOMEM;
214 208
215 /* Allocate TIPC port for socket to use */ 209 /* Allocate TIPC port for socket to use */
216
217 tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, 210 tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
218 TIPC_LOW_IMPORTANCE); 211 TIPC_LOW_IMPORTANCE);
219 if (unlikely(!tp_ptr)) { 212 if (unlikely(!tp_ptr)) {
@@ -222,7 +215,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
222 } 215 }
223 216
224 /* Finish initializing socket data structures */ 217 /* Finish initializing socket data structures */
225
226 sock->ops = ops; 218 sock->ops = ops;
227 sock->state = state; 219 sock->state = state;
228 220
@@ -258,7 +250,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
258 * 250 *
259 * Returns 0 on success, errno otherwise 251 * Returns 0 on success, errno otherwise
260 */ 252 */
261
262static int release(struct socket *sock) 253static int release(struct socket *sock)
263{ 254{
264 struct sock *sk = sock->sk; 255 struct sock *sk = sock->sk;
@@ -270,7 +261,6 @@ static int release(struct socket *sock)
270 * Exit if socket isn't fully initialized (occurs when a failed accept() 261 * Exit if socket isn't fully initialized (occurs when a failed accept()
271 * releases a pre-allocated child socket that was never used) 262 * releases a pre-allocated child socket that was never used)
272 */ 263 */
273
274 if (sk == NULL) 264 if (sk == NULL)
275 return 0; 265 return 0;
276 266
@@ -281,14 +271,13 @@ static int release(struct socket *sock)
281 * Reject all unreceived messages, except on an active connection 271 * Reject all unreceived messages, except on an active connection
282 * (which disconnects locally & sends a 'FIN+' to peer) 272 * (which disconnects locally & sends a 'FIN+' to peer)
283 */ 273 */
284
285 while (sock->state != SS_DISCONNECTING) { 274 while (sock->state != SS_DISCONNECTING) {
286 buf = __skb_dequeue(&sk->sk_receive_queue); 275 buf = __skb_dequeue(&sk->sk_receive_queue);
287 if (buf == NULL) 276 if (buf == NULL)
288 break; 277 break;
289 atomic_dec(&tipc_queue_size); 278 atomic_dec(&tipc_queue_size);
290 if (TIPC_SKB_CB(buf)->handle != 0) 279 if (TIPC_SKB_CB(buf)->handle != 0)
291 buf_discard(buf); 280 kfree_skb(buf);
292 else { 281 else {
293 if ((sock->state == SS_CONNECTING) || 282 if ((sock->state == SS_CONNECTING) ||
294 (sock->state == SS_CONNECTED)) { 283 (sock->state == SS_CONNECTED)) {
@@ -303,15 +292,12 @@ static int release(struct socket *sock)
303 * Delete TIPC port; this ensures no more messages are queued 292 * Delete TIPC port; this ensures no more messages are queued
304 * (also disconnects an active connection & sends a 'FIN-' to peer) 293 * (also disconnects an active connection & sends a 'FIN-' to peer)
305 */ 294 */
306
307 res = tipc_deleteport(tport->ref); 295 res = tipc_deleteport(tport->ref);
308 296
309 /* Discard any remaining (connection-based) messages in receive queue */ 297 /* Discard any remaining (connection-based) messages in receive queue */
310
311 discard_rx_queue(sk); 298 discard_rx_queue(sk);
312 299
313 /* Reject any messages that accumulated in backlog queue */ 300 /* Reject any messages that accumulated in backlog queue */
314
315 sock->state = SS_DISCONNECTING; 301 sock->state = SS_DISCONNECTING;
316 release_sock(sk); 302 release_sock(sk);
317 303
@@ -336,7 +322,6 @@ static int release(struct socket *sock)
336 * NOTE: This routine doesn't need to take the socket lock since it doesn't 322 * NOTE: This routine doesn't need to take the socket lock since it doesn't
337 * access any non-constant socket information. 323 * access any non-constant socket information.
338 */ 324 */
339
340static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) 325static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
341{ 326{
342 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 327 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
@@ -355,6 +340,9 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
355 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) 340 else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
356 return -EAFNOSUPPORT; 341 return -EAFNOSUPPORT;
357 342
343 if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES)
344 return -EACCES;
345
358 return (addr->scope > 0) ? 346 return (addr->scope > 0) ?
359 tipc_publish(portref, addr->scope, &addr->addr.nameseq) : 347 tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
360 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq); 348 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
@@ -373,7 +361,6 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
373 * accesses socket information that is unchanging (or which changes in 361 * accesses socket information that is unchanging (or which changes in
374 * a completely predictable manner). 362 * a completely predictable manner).
375 */ 363 */
376
377static int get_name(struct socket *sock, struct sockaddr *uaddr, 364static int get_name(struct socket *sock, struct sockaddr *uaddr,
378 int *uaddr_len, int peer) 365 int *uaddr_len, int peer)
379{ 366{
@@ -441,7 +428,6 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
441 * imply that the operation will succeed, merely that it should be performed 428 * imply that the operation will succeed, merely that it should be performed
442 * and will not block. 429 * and will not block.
443 */ 430 */
444
445static unsigned int poll(struct file *file, struct socket *sock, 431static unsigned int poll(struct file *file, struct socket *sock,
446 poll_table *wait) 432 poll_table *wait)
447{ 433{
@@ -479,7 +465,6 @@ static unsigned int poll(struct file *file, struct socket *sock,
479 * 465 *
480 * Returns 0 if permission is granted, otherwise errno 466 * Returns 0 if permission is granted, otherwise errno
481 */ 467 */
482
483static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) 468static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
484{ 469{
485 struct tipc_cfg_msg_hdr hdr; 470 struct tipc_cfg_msg_hdr hdr;
@@ -515,7 +500,6 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
515 * 500 *
516 * Returns the number of bytes sent on success, or errno otherwise 501 * Returns the number of bytes sent on success, or errno otherwise
517 */ 502 */
518
519static int send_msg(struct kiocb *iocb, struct socket *sock, 503static int send_msg(struct kiocb *iocb, struct socket *sock,
520 struct msghdr *m, size_t total_len) 504 struct msghdr *m, size_t total_len)
521{ 505{
@@ -532,7 +516,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
532 (dest->family != AF_TIPC))) 516 (dest->family != AF_TIPC)))
533 return -EINVAL; 517 return -EINVAL;
534 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 518 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
535 (m->msg_iovlen > (unsigned)INT_MAX)) 519 (m->msg_iovlen > (unsigned int)INT_MAX))
536 return -EMSGSIZE; 520 return -EMSGSIZE;
537 521
538 if (iocb) 522 if (iocb)
@@ -559,7 +543,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
559 } 543 }
560 544
561 /* Abort any pending connection attempts (very unlikely) */ 545 /* Abort any pending connection attempts (very unlikely) */
562
563 reject_rx_queue(sk); 546 reject_rx_queue(sk);
564 } 547 }
565 548
@@ -628,7 +611,6 @@ exit:
628 * 611 *
629 * Returns the number of bytes sent on success, or errno otherwise 612 * Returns the number of bytes sent on success, or errno otherwise
630 */ 613 */
631
632static int send_packet(struct kiocb *iocb, struct socket *sock, 614static int send_packet(struct kiocb *iocb, struct socket *sock,
633 struct msghdr *m, size_t total_len) 615 struct msghdr *m, size_t total_len)
634{ 616{
@@ -639,12 +621,11 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
639 int res; 621 int res;
640 622
641 /* Handle implied connection establishment */ 623 /* Handle implied connection establishment */
642
643 if (unlikely(dest)) 624 if (unlikely(dest))
644 return send_msg(iocb, sock, m, total_len); 625 return send_msg(iocb, sock, m, total_len);
645 626
646 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 627 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
647 (m->msg_iovlen > (unsigned)INT_MAX)) 628 (m->msg_iovlen > (unsigned int)INT_MAX))
648 return -EMSGSIZE; 629 return -EMSGSIZE;
649 630
650 if (iocb) 631 if (iocb)
@@ -692,7 +673,6 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
692 * Returns the number of bytes sent on success (or partial success), 673 * Returns the number of bytes sent on success (or partial success),
693 * or errno if no data sent 674 * or errno if no data sent
694 */ 675 */
695
696static int send_stream(struct kiocb *iocb, struct socket *sock, 676static int send_stream(struct kiocb *iocb, struct socket *sock,
697 struct msghdr *m, size_t total_len) 677 struct msghdr *m, size_t total_len)
698{ 678{
@@ -712,7 +692,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
712 lock_sock(sk); 692 lock_sock(sk);
713 693
714 /* Handle special cases where there is no connection */ 694 /* Handle special cases where there is no connection */
715
716 if (unlikely(sock->state != SS_CONNECTED)) { 695 if (unlikely(sock->state != SS_CONNECTED)) {
717 if (sock->state == SS_UNCONNECTED) { 696 if (sock->state == SS_UNCONNECTED) {
718 res = send_packet(NULL, sock, m, total_len); 697 res = send_packet(NULL, sock, m, total_len);
@@ -731,8 +710,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
731 goto exit; 710 goto exit;
732 } 711 }
733 712
734 if ((total_len > (unsigned)INT_MAX) || 713 if ((total_len > (unsigned int)INT_MAX) ||
735 (m->msg_iovlen > (unsigned)INT_MAX)) { 714 (m->msg_iovlen > (unsigned int)INT_MAX)) {
736 res = -EMSGSIZE; 715 res = -EMSGSIZE;
737 goto exit; 716 goto exit;
738 } 717 }
@@ -744,7 +723,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
744 * (i.e. one large iovec entry), but could be improved to pass sets 723 * (i.e. one large iovec entry), but could be improved to pass sets
745 * of small iovec entries into send_packet(). 724 * of small iovec entries into send_packet().
746 */ 725 */
747
748 curr_iov = m->msg_iov; 726 curr_iov = m->msg_iov;
749 curr_iovlen = m->msg_iovlen; 727 curr_iovlen = m->msg_iovlen;
750 my_msg.msg_iov = &my_iov; 728 my_msg.msg_iov = &my_iov;
@@ -793,7 +771,6 @@ exit:
793 * 771 *
794 * Returns 0 on success, errno otherwise 772 * Returns 0 on success, errno otherwise
795 */ 773 */
796
797static int auto_connect(struct socket *sock, struct tipc_msg *msg) 774static int auto_connect(struct socket *sock, struct tipc_msg *msg)
798{ 775{
799 struct tipc_sock *tsock = tipc_sk(sock->sk); 776 struct tipc_sock *tsock = tipc_sk(sock->sk);
@@ -818,7 +795,6 @@ static int auto_connect(struct socket *sock, struct tipc_msg *msg)
818 * 795 *
819 * Note: Address is not captured if not requested by receiver. 796 * Note: Address is not captured if not requested by receiver.
820 */ 797 */
821
822static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) 798static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
823{ 799{
824 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name; 800 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
@@ -844,7 +820,6 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
844 * 820 *
845 * Returns 0 if successful, otherwise errno 821 * Returns 0 if successful, otherwise errno
846 */ 822 */
847
848static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 823static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
849 struct tipc_port *tport) 824 struct tipc_port *tport)
850{ 825{
@@ -858,7 +833,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
858 return 0; 833 return 0;
859 834
860 /* Optionally capture errored message object(s) */ 835 /* Optionally capture errored message object(s) */
861
862 err = msg ? msg_errcode(msg) : 0; 836 err = msg ? msg_errcode(msg) : 0;
863 if (unlikely(err)) { 837 if (unlikely(err)) {
864 anc_data[0] = err; 838 anc_data[0] = err;
@@ -875,7 +849,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
875 } 849 }
876 850
877 /* Optionally capture message destination object */ 851 /* Optionally capture message destination object */
878
879 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 852 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
880 switch (dest_type) { 853 switch (dest_type) {
881 case TIPC_NAMED_MSG: 854 case TIPC_NAMED_MSG:
@@ -920,7 +893,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
920 * 893 *
921 * Returns size of returned message data, errno otherwise 894 * Returns size of returned message data, errno otherwise
922 */ 895 */
923
924static int recv_msg(struct kiocb *iocb, struct socket *sock, 896static int recv_msg(struct kiocb *iocb, struct socket *sock,
925 struct msghdr *m, size_t buf_len, int flags) 897 struct msghdr *m, size_t buf_len, int flags)
926{ 898{
@@ -934,7 +906,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
934 int res; 906 int res;
935 907
936 /* Catch invalid receive requests */ 908 /* Catch invalid receive requests */
937
938 if (unlikely(!buf_len)) 909 if (unlikely(!buf_len))
939 return -EINVAL; 910 return -EINVAL;
940 911
@@ -949,7 +920,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
949restart: 920restart:
950 921
951 /* Look for a message in receive queue; wait if necessary */ 922 /* Look for a message in receive queue; wait if necessary */
952
953 while (skb_queue_empty(&sk->sk_receive_queue)) { 923 while (skb_queue_empty(&sk->sk_receive_queue)) {
954 if (sock->state == SS_DISCONNECTING) { 924 if (sock->state == SS_DISCONNECTING) {
955 res = -ENOTCONN; 925 res = -ENOTCONN;
@@ -967,14 +937,12 @@ restart:
967 } 937 }
968 938
969 /* Look at first message in receive queue */ 939 /* Look at first message in receive queue */
970
971 buf = skb_peek(&sk->sk_receive_queue); 940 buf = skb_peek(&sk->sk_receive_queue);
972 msg = buf_msg(buf); 941 msg = buf_msg(buf);
973 sz = msg_data_sz(msg); 942 sz = msg_data_sz(msg);
974 err = msg_errcode(msg); 943 err = msg_errcode(msg);
975 944
976 /* Complete connection setup for an implied connect */ 945 /* Complete connection setup for an implied connect */
977
978 if (unlikely(sock->state == SS_CONNECTING)) { 946 if (unlikely(sock->state == SS_CONNECTING)) {
979 res = auto_connect(sock, msg); 947 res = auto_connect(sock, msg);
980 if (res) 948 if (res)
@@ -982,24 +950,20 @@ restart:
982 } 950 }
983 951
984 /* Discard an empty non-errored message & try again */ 952 /* Discard an empty non-errored message & try again */
985
986 if ((!sz) && (!err)) { 953 if ((!sz) && (!err)) {
987 advance_rx_queue(sk); 954 advance_rx_queue(sk);
988 goto restart; 955 goto restart;
989 } 956 }
990 957
991 /* Capture sender's address (optional) */ 958 /* Capture sender's address (optional) */
992
993 set_orig_addr(m, msg); 959 set_orig_addr(m, msg);
994 960
995 /* Capture ancillary data (optional) */ 961 /* Capture ancillary data (optional) */
996
997 res = anc_data_recv(m, msg, tport); 962 res = anc_data_recv(m, msg, tport);
998 if (res) 963 if (res)
999 goto exit; 964 goto exit;
1000 965
1001 /* Capture message data (if valid) & compute return value (always) */ 966 /* Capture message data (if valid) & compute return value (always) */
1002
1003 if (!err) { 967 if (!err) {
1004 if (unlikely(buf_len < sz)) { 968 if (unlikely(buf_len < sz)) {
1005 sz = buf_len; 969 sz = buf_len;
@@ -1019,7 +983,6 @@ restart:
1019 } 983 }
1020 984
1021 /* Consume received message (optional) */ 985 /* Consume received message (optional) */
1022
1023 if (likely(!(flags & MSG_PEEK))) { 986 if (likely(!(flags & MSG_PEEK))) {
1024 if ((sock->state != SS_READY) && 987 if ((sock->state != SS_READY) &&
1025 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 988 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
@@ -1043,7 +1006,6 @@ exit:
1043 * 1006 *
1044 * Returns size of returned message data, errno otherwise 1007 * Returns size of returned message data, errno otherwise
1045 */ 1008 */
1046
1047static int recv_stream(struct kiocb *iocb, struct socket *sock, 1009static int recv_stream(struct kiocb *iocb, struct socket *sock,
1048 struct msghdr *m, size_t buf_len, int flags) 1010 struct msghdr *m, size_t buf_len, int flags)
1049{ 1011{
@@ -1059,7 +1021,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1059 int res = 0; 1021 int res = 0;
1060 1022
1061 /* Catch invalid receive attempts */ 1023 /* Catch invalid receive attempts */
1062
1063 if (unlikely(!buf_len)) 1024 if (unlikely(!buf_len))
1064 return -EINVAL; 1025 return -EINVAL;
1065 1026
@@ -1073,10 +1034,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1073 1034
1074 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); 1035 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1075 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1036 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1076restart:
1077 1037
1038restart:
1078 /* Look for a message in receive queue; wait if necessary */ 1039 /* Look for a message in receive queue; wait if necessary */
1079
1080 while (skb_queue_empty(&sk->sk_receive_queue)) { 1040 while (skb_queue_empty(&sk->sk_receive_queue)) {
1081 if (sock->state == SS_DISCONNECTING) { 1041 if (sock->state == SS_DISCONNECTING) {
1082 res = -ENOTCONN; 1042 res = -ENOTCONN;
@@ -1094,21 +1054,18 @@ restart:
1094 } 1054 }
1095 1055
1096 /* Look at first message in receive queue */ 1056 /* Look at first message in receive queue */
1097
1098 buf = skb_peek(&sk->sk_receive_queue); 1057 buf = skb_peek(&sk->sk_receive_queue);
1099 msg = buf_msg(buf); 1058 msg = buf_msg(buf);
1100 sz = msg_data_sz(msg); 1059 sz = msg_data_sz(msg);
1101 err = msg_errcode(msg); 1060 err = msg_errcode(msg);
1102 1061
1103 /* Discard an empty non-errored message & try again */ 1062 /* Discard an empty non-errored message & try again */
1104
1105 if ((!sz) && (!err)) { 1063 if ((!sz) && (!err)) {
1106 advance_rx_queue(sk); 1064 advance_rx_queue(sk);
1107 goto restart; 1065 goto restart;
1108 } 1066 }
1109 1067
1110 /* Optionally capture sender's address & ancillary data of first msg */ 1068 /* Optionally capture sender's address & ancillary data of first msg */
1111
1112 if (sz_copied == 0) { 1069 if (sz_copied == 0) {
1113 set_orig_addr(m, msg); 1070 set_orig_addr(m, msg);
1114 res = anc_data_recv(m, msg, tport); 1071 res = anc_data_recv(m, msg, tport);
@@ -1117,7 +1074,6 @@ restart:
1117 } 1074 }
1118 1075
1119 /* Capture message data (if valid) & compute return value (always) */ 1076 /* Capture message data (if valid) & compute return value (always) */
1120
1121 if (!err) { 1077 if (!err) {
1122 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle); 1078 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1123 1079
@@ -1149,7 +1105,6 @@ restart:
1149 } 1105 }
1150 1106
1151 /* Consume received message (optional) */ 1107 /* Consume received message (optional) */
1152
1153 if (likely(!(flags & MSG_PEEK))) { 1108 if (likely(!(flags & MSG_PEEK))) {
1154 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1109 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1155 tipc_acknowledge(tport->ref, tport->conn_unacked); 1110 tipc_acknowledge(tport->ref, tport->conn_unacked);
@@ -1157,7 +1112,6 @@ restart:
1157 } 1112 }
1158 1113
1159 /* Loop around if more data is required */ 1114 /* Loop around if more data is required */
1160
1161 if ((sz_copied < buf_len) && /* didn't get all requested data */ 1115 if ((sz_copied < buf_len) && /* didn't get all requested data */
1162 (!skb_queue_empty(&sk->sk_receive_queue) || 1116 (!skb_queue_empty(&sk->sk_receive_queue) ||
1163 (sz_copied < target)) && /* and more is ready or required */ 1117 (sz_copied < target)) && /* and more is ready or required */
@@ -1178,7 +1132,6 @@ exit:
1178 * 1132 *
1179 * Returns 1 if queue is unable to accept message, 0 otherwise 1133 * Returns 1 if queue is unable to accept message, 0 otherwise
1180 */ 1134 */
1181
1182static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) 1135static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1183{ 1136{
1184 u32 threshold; 1137 u32 threshold;
@@ -1211,7 +1164,6 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1211 * 1164 *
1212 * Returns TIPC error status code (TIPC_OK if message is not to be rejected) 1165 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1213 */ 1166 */
1214
1215static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) 1167static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1216{ 1168{
1217 struct socket *sock = sk->sk_socket; 1169 struct socket *sock = sk->sk_socket;
@@ -1219,12 +1171,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1219 u32 recv_q_len; 1171 u32 recv_q_len;
1220 1172
1221 /* Reject message if it is wrong sort of message for socket */ 1173 /* Reject message if it is wrong sort of message for socket */
1222 1174 if (msg_type(msg) > TIPC_DIRECT_MSG)
1223 /* 1175 return TIPC_ERR_NO_PORT;
1224 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
1225 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
1226 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
1227 */
1228 1176
1229 if (sock->state == SS_READY) { 1177 if (sock->state == SS_READY) {
1230 if (msg_connected(msg)) 1178 if (msg_connected(msg))
@@ -1233,7 +1181,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1233 if (msg_mcast(msg)) 1181 if (msg_mcast(msg))
1234 return TIPC_ERR_NO_PORT; 1182 return TIPC_ERR_NO_PORT;
1235 if (sock->state == SS_CONNECTED) { 1183 if (sock->state == SS_CONNECTED) {
1236 if (!msg_connected(msg)) 1184 if (!msg_connected(msg) ||
1185 !tipc_port_peer_msg(tipc_sk_port(sk), msg))
1237 return TIPC_ERR_NO_PORT; 1186 return TIPC_ERR_NO_PORT;
1238 } else if (sock->state == SS_CONNECTING) { 1187 } else if (sock->state == SS_CONNECTING) {
1239 if (!msg_connected(msg) && (msg_errcode(msg) == 0)) 1188 if (!msg_connected(msg) && (msg_errcode(msg) == 0))
@@ -1250,7 +1199,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1250 } 1199 }
1251 1200
1252 /* Reject message if there isn't room to queue it */ 1201 /* Reject message if there isn't room to queue it */
1253
1254 recv_q_len = (u32)atomic_read(&tipc_queue_size); 1202 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1255 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) { 1203 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1256 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE)) 1204 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
@@ -1263,13 +1211,11 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1263 } 1211 }
1264 1212
1265 /* Enqueue message (finally!) */ 1213 /* Enqueue message (finally!) */
1266
1267 TIPC_SKB_CB(buf)->handle = 0; 1214 TIPC_SKB_CB(buf)->handle = 0;
1268 atomic_inc(&tipc_queue_size); 1215 atomic_inc(&tipc_queue_size);
1269 __skb_queue_tail(&sk->sk_receive_queue, buf); 1216 __skb_queue_tail(&sk->sk_receive_queue, buf);
1270 1217
1271 /* Initiate connection termination for an incoming 'FIN' */ 1218 /* Initiate connection termination for an incoming 'FIN' */
1272
1273 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { 1219 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1274 sock->state = SS_DISCONNECTING; 1220 sock->state = SS_DISCONNECTING;
1275 tipc_disconnect_port(tipc_sk_port(sk)); 1221 tipc_disconnect_port(tipc_sk_port(sk));
@@ -1289,7 +1235,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1289 * 1235 *
1290 * Returns 0 1236 * Returns 0
1291 */ 1237 */
1292
1293static int backlog_rcv(struct sock *sk, struct sk_buff *buf) 1238static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1294{ 1239{
1295 u32 res; 1240 u32 res;
@@ -1309,7 +1254,6 @@ static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1309 * 1254 *
1310 * Returns TIPC error status code (TIPC_OK if message is not to be rejected) 1255 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1311 */ 1256 */
1312
1313static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) 1257static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1314{ 1258{
1315 struct sock *sk = (struct sock *)tport->usr_handle; 1259 struct sock *sk = (struct sock *)tport->usr_handle;
@@ -1321,12 +1265,11 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1321 * This code is based on sk_receive_skb(), but must be distinct from it 1265 * This code is based on sk_receive_skb(), but must be distinct from it
1322 * since a TIPC-specific filter/reject mechanism is utilized 1266 * since a TIPC-specific filter/reject mechanism is utilized
1323 */ 1267 */
1324
1325 bh_lock_sock(sk); 1268 bh_lock_sock(sk);
1326 if (!sock_owned_by_user(sk)) { 1269 if (!sock_owned_by_user(sk)) {
1327 res = filter_rcv(sk, buf); 1270 res = filter_rcv(sk, buf);
1328 } else { 1271 } else {
1329 if (sk_add_backlog(sk, buf)) 1272 if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
1330 res = TIPC_ERR_OVERLOAD; 1273 res = TIPC_ERR_OVERLOAD;
1331 else 1274 else
1332 res = TIPC_OK; 1275 res = TIPC_OK;
@@ -1342,7 +1285,6 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1342 * 1285 *
1343 * Called with port lock already taken. 1286 * Called with port lock already taken.
1344 */ 1287 */
1345
1346static void wakeupdispatch(struct tipc_port *tport) 1288static void wakeupdispatch(struct tipc_port *tport)
1347{ 1289{
1348 struct sock *sk = (struct sock *)tport->usr_handle; 1290 struct sock *sk = (struct sock *)tport->usr_handle;
@@ -1360,7 +1302,6 @@ static void wakeupdispatch(struct tipc_port *tport)
1360 * 1302 *
1361 * Returns 0 on success, errno otherwise 1303 * Returns 0 on success, errno otherwise
1362 */ 1304 */
1363
1364static int connect(struct socket *sock, struct sockaddr *dest, int destlen, 1305static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1365 int flags) 1306 int flags)
1366{ 1307{
@@ -1375,21 +1316,18 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1375 lock_sock(sk); 1316 lock_sock(sk);
1376 1317
1377 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ 1318 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1378
1379 if (sock->state == SS_READY) { 1319 if (sock->state == SS_READY) {
1380 res = -EOPNOTSUPP; 1320 res = -EOPNOTSUPP;
1381 goto exit; 1321 goto exit;
1382 } 1322 }
1383 1323
1384 /* For now, TIPC does not support the non-blocking form of connect() */ 1324 /* For now, TIPC does not support the non-blocking form of connect() */
1385
1386 if (flags & O_NONBLOCK) { 1325 if (flags & O_NONBLOCK) {
1387 res = -EOPNOTSUPP; 1326 res = -EOPNOTSUPP;
1388 goto exit; 1327 goto exit;
1389 } 1328 }
1390 1329
1391 /* Issue Posix-compliant error code if socket is in the wrong state */ 1330 /* Issue Posix-compliant error code if socket is in the wrong state */
1392
1393 if (sock->state == SS_LISTENING) { 1331 if (sock->state == SS_LISTENING) {
1394 res = -EOPNOTSUPP; 1332 res = -EOPNOTSUPP;
1395 goto exit; 1333 goto exit;
@@ -1409,18 +1347,15 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1409 * Note: send_msg() validates the rest of the address fields, 1347 * Note: send_msg() validates the rest of the address fields,
1410 * so there's no need to do it here 1348 * so there's no need to do it here
1411 */ 1349 */
1412
1413 if (dst->addrtype == TIPC_ADDR_MCAST) { 1350 if (dst->addrtype == TIPC_ADDR_MCAST) {
1414 res = -EINVAL; 1351 res = -EINVAL;
1415 goto exit; 1352 goto exit;
1416 } 1353 }
1417 1354
1418 /* Reject any messages already in receive queue (very unlikely) */ 1355 /* Reject any messages already in receive queue (very unlikely) */
1419
1420 reject_rx_queue(sk); 1356 reject_rx_queue(sk);
1421 1357
1422 /* Send a 'SYN-' to destination */ 1358 /* Send a 'SYN-' to destination */
1423
1424 m.msg_name = dest; 1359 m.msg_name = dest;
1425 m.msg_namelen = destlen; 1360 m.msg_namelen = destlen;
1426 res = send_msg(NULL, sock, &m, 0); 1361 res = send_msg(NULL, sock, &m, 0);
@@ -1428,7 +1363,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1428 goto exit; 1363 goto exit;
1429 1364
1430 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1365 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1431
1432 timeout = tipc_sk(sk)->conn_timeout; 1366 timeout = tipc_sk(sk)->conn_timeout;
1433 release_sock(sk); 1367 release_sock(sk);
1434 res = wait_event_interruptible_timeout(*sk_sleep(sk), 1368 res = wait_event_interruptible_timeout(*sk_sleep(sk),
@@ -1473,7 +1407,6 @@ exit:
1473 * 1407 *
1474 * Returns 0 on success, errno otherwise 1408 * Returns 0 on success, errno otherwise
1475 */ 1409 */
1476
1477static int listen(struct socket *sock, int len) 1410static int listen(struct socket *sock, int len)
1478{ 1411{
1479 struct sock *sk = sock->sk; 1412 struct sock *sk = sock->sk;
@@ -1500,7 +1433,6 @@ static int listen(struct socket *sock, int len)
1500 * 1433 *
1501 * Returns 0 on success, errno otherwise 1434 * Returns 0 on success, errno otherwise
1502 */ 1435 */
1503
1504static int accept(struct socket *sock, struct socket *new_sock, int flags) 1436static int accept(struct socket *sock, struct socket *new_sock, int flags)
1505{ 1437{
1506 struct sock *sk = sock->sk; 1438 struct sock *sk = sock->sk;
@@ -1543,11 +1475,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1543 * Reject any stray messages received by new socket 1475 * Reject any stray messages received by new socket
1544 * before the socket lock was taken (very, very unlikely) 1476 * before the socket lock was taken (very, very unlikely)
1545 */ 1477 */
1546
1547 reject_rx_queue(new_sk); 1478 reject_rx_queue(new_sk);
1548 1479
1549 /* Connect new socket to it's peer */ 1480 /* Connect new socket to it's peer */
1550
1551 new_tsock->peer_name.ref = msg_origport(msg); 1481 new_tsock->peer_name.ref = msg_origport(msg);
1552 new_tsock->peer_name.node = msg_orignode(msg); 1482 new_tsock->peer_name.node = msg_orignode(msg);
1553 tipc_connect2port(new_ref, &new_tsock->peer_name); 1483 tipc_connect2port(new_ref, &new_tsock->peer_name);
@@ -1563,7 +1493,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1563 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 1493 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1564 * Respond to 'SYN+' by queuing it on new socket. 1494 * Respond to 'SYN+' by queuing it on new socket.
1565 */ 1495 */
1566
1567 if (!msg_data_sz(msg)) { 1496 if (!msg_data_sz(msg)) {
1568 struct msghdr m = {NULL,}; 1497 struct msghdr m = {NULL,};
1569 1498
@@ -1589,7 +1518,6 @@ exit:
1589 * 1518 *
1590 * Returns 0 on success, errno otherwise 1519 * Returns 0 on success, errno otherwise
1591 */ 1520 */
1592
1593static int shutdown(struct socket *sock, int how) 1521static int shutdown(struct socket *sock, int how)
1594{ 1522{
1595 struct sock *sk = sock->sk; 1523 struct sock *sk = sock->sk;
@@ -1606,13 +1534,13 @@ static int shutdown(struct socket *sock, int how)
1606 case SS_CONNECTING: 1534 case SS_CONNECTING:
1607 case SS_CONNECTED: 1535 case SS_CONNECTED:
1608 1536
1609 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1610restart: 1537restart:
1538 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1611 buf = __skb_dequeue(&sk->sk_receive_queue); 1539 buf = __skb_dequeue(&sk->sk_receive_queue);
1612 if (buf) { 1540 if (buf) {
1613 atomic_dec(&tipc_queue_size); 1541 atomic_dec(&tipc_queue_size);
1614 if (TIPC_SKB_CB(buf)->handle != 0) { 1542 if (TIPC_SKB_CB(buf)->handle != 0) {
1615 buf_discard(buf); 1543 kfree_skb(buf);
1616 goto restart; 1544 goto restart;
1617 } 1545 }
1618 tipc_disconnect(tport->ref); 1546 tipc_disconnect(tport->ref);
@@ -1628,7 +1556,6 @@ restart:
1628 case SS_DISCONNECTING: 1556 case SS_DISCONNECTING:
1629 1557
1630 /* Discard any unreceived messages; wake up sleeping tasks */ 1558 /* Discard any unreceived messages; wake up sleeping tasks */
1631
1632 discard_rx_queue(sk); 1559 discard_rx_queue(sk);
1633 if (waitqueue_active(sk_sleep(sk))) 1560 if (waitqueue_active(sk_sleep(sk)))
1634 wake_up_interruptible(sk_sleep(sk)); 1561 wake_up_interruptible(sk_sleep(sk));
@@ -1656,7 +1583,6 @@ restart:
1656 * 1583 *
1657 * Returns 0 on success, errno otherwise 1584 * Returns 0 on success, errno otherwise
1658 */ 1585 */
1659
1660static int setsockopt(struct socket *sock, 1586static int setsockopt(struct socket *sock,
1661 int lvl, int opt, char __user *ov, unsigned int ol) 1587 int lvl, int opt, char __user *ov, unsigned int ol)
1662{ 1588{
@@ -1716,7 +1642,6 @@ static int setsockopt(struct socket *sock,
1716 * 1642 *
1717 * Returns 0 on success, errno otherwise 1643 * Returns 0 on success, errno otherwise
1718 */ 1644 */
1719
1720static int getsockopt(struct socket *sock, 1645static int getsockopt(struct socket *sock,
1721 int lvl, int opt, char __user *ov, int __user *ol) 1646 int lvl, int opt, char __user *ov, int __user *ol)
1722{ 1647{
@@ -1777,7 +1702,6 @@ static int getsockopt(struct socket *sock,
1777/** 1702/**
1778 * Protocol switches for the various types of TIPC sockets 1703 * Protocol switches for the various types of TIPC sockets
1779 */ 1704 */
1780
1781static const struct proto_ops msg_ops = { 1705static const struct proto_ops msg_ops = {
1782 .owner = THIS_MODULE, 1706 .owner = THIS_MODULE,
1783 .family = AF_TIPC, 1707 .family = AF_TIPC,
@@ -1883,7 +1807,6 @@ int tipc_socket_init(void)
1883/** 1807/**
1884 * tipc_socket_stop - stop TIPC socket interface 1808 * tipc_socket_stop - stop TIPC socket interface
1885 */ 1809 */
1886
1887void tipc_socket_stop(void) 1810void tipc_socket_stop(void)
1888{ 1811{
1889 if (!sockets_enabled) 1812 if (!sockets_enabled)
@@ -1893,4 +1816,3 @@ void tipc_socket_stop(void)
1893 sock_unregister(tipc_family_ops.family); 1816 sock_unregister(tipc_family_ops.family);
1894 proto_unregister(&tipc_proto); 1817 proto_unregister(&tipc_proto);
1895} 1818}
1896
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 8c49566da8f3..f976e9cd6a72 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -46,7 +46,6 @@
46 * @subscriber_list: adjacent subscribers in top. server's list of subscribers 46 * @subscriber_list: adjacent subscribers in top. server's list of subscribers
47 * @subscription_list: list of subscription objects for this subscriber 47 * @subscription_list: list of subscription objects for this subscriber
48 */ 48 */
49
50struct tipc_subscriber { 49struct tipc_subscriber {
51 u32 port_ref; 50 u32 port_ref;
52 spinlock_t *lock; 51 spinlock_t *lock;
@@ -56,13 +55,11 @@ struct tipc_subscriber {
56 55
57/** 56/**
58 * struct top_srv - TIPC network topology subscription service 57 * struct top_srv - TIPC network topology subscription service
59 * @user_ref: TIPC userid of subscription service
60 * @setup_port: reference to TIPC port that handles subscription requests 58 * @setup_port: reference to TIPC port that handles subscription requests
61 * @subscription_count: number of active subscriptions (not subscribers!) 59 * @subscription_count: number of active subscriptions (not subscribers!)
62 * @subscriber_list: list of ports subscribing to service 60 * @subscriber_list: list of ports subscribing to service
63 * @lock: spinlock govering access to subscriber list 61 * @lock: spinlock govering access to subscriber list
64 */ 62 */
65
66struct top_srv { 63struct top_srv {
67 u32 setup_port; 64 u32 setup_port;
68 atomic_t subscription_count; 65 atomic_t subscription_count;
@@ -79,7 +76,6 @@ static struct top_srv topsrv;
79 * 76 *
80 * Returns converted value 77 * Returns converted value
81 */ 78 */
82
83static u32 htohl(u32 in, int swap) 79static u32 htohl(u32 in, int swap)
84{ 80{
85 return swap ? swab32(in) : in; 81 return swap ? swab32(in) : in;
@@ -91,7 +87,6 @@ static u32 htohl(u32 in, int swap)
91 * Note: Must not hold subscriber's server port lock, since tipc_send() will 87 * Note: Must not hold subscriber's server port lock, since tipc_send() will
92 * try to take the lock if the message is rejected and returned! 88 * try to take the lock if the message is rejected and returned!
93 */ 89 */
94
95static void subscr_send_event(struct tipc_subscription *sub, 90static void subscr_send_event(struct tipc_subscription *sub,
96 u32 found_lower, 91 u32 found_lower,
97 u32 found_upper, 92 u32 found_upper,
@@ -117,7 +112,6 @@ static void subscr_send_event(struct tipc_subscription *sub,
117 * 112 *
118 * Returns 1 if there is overlap, otherwise 0. 113 * Returns 1 if there is overlap, otherwise 0.
119 */ 114 */
120
121int tipc_subscr_overlap(struct tipc_subscription *sub, 115int tipc_subscr_overlap(struct tipc_subscription *sub,
122 u32 found_lower, 116 u32 found_lower,
123 u32 found_upper) 117 u32 found_upper)
@@ -137,7 +131,6 @@ int tipc_subscr_overlap(struct tipc_subscription *sub,
137 * 131 *
138 * Protected by nameseq.lock in name_table.c 132 * Protected by nameseq.lock in name_table.c
139 */ 133 */
140
141void tipc_subscr_report_overlap(struct tipc_subscription *sub, 134void tipc_subscr_report_overlap(struct tipc_subscription *sub,
142 u32 found_lower, 135 u32 found_lower,
143 u32 found_upper, 136 u32 found_upper,
@@ -157,43 +150,35 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub,
157/** 150/**
158 * subscr_timeout - subscription timeout has occurred 151 * subscr_timeout - subscription timeout has occurred
159 */ 152 */
160
161static void subscr_timeout(struct tipc_subscription *sub) 153static void subscr_timeout(struct tipc_subscription *sub)
162{ 154{
163 struct tipc_port *server_port; 155 struct tipc_port *server_port;
164 156
165 /* Validate server port reference (in case subscriber is terminating) */ 157 /* Validate server port reference (in case subscriber is terminating) */
166
167 server_port = tipc_port_lock(sub->server_ref); 158 server_port = tipc_port_lock(sub->server_ref);
168 if (server_port == NULL) 159 if (server_port == NULL)
169 return; 160 return;
170 161
171 /* Validate timeout (in case subscription is being cancelled) */ 162 /* Validate timeout (in case subscription is being cancelled) */
172
173 if (sub->timeout == TIPC_WAIT_FOREVER) { 163 if (sub->timeout == TIPC_WAIT_FOREVER) {
174 tipc_port_unlock(server_port); 164 tipc_port_unlock(server_port);
175 return; 165 return;
176 } 166 }
177 167
178 /* Unlink subscription from name table */ 168 /* Unlink subscription from name table */
179
180 tipc_nametbl_unsubscribe(sub); 169 tipc_nametbl_unsubscribe(sub);
181 170
182 /* Unlink subscription from subscriber */ 171 /* Unlink subscription from subscriber */
183
184 list_del(&sub->subscription_list); 172 list_del(&sub->subscription_list);
185 173
186 /* Release subscriber's server port */ 174 /* Release subscriber's server port */
187
188 tipc_port_unlock(server_port); 175 tipc_port_unlock(server_port);
189 176
190 /* Notify subscriber of timeout */ 177 /* Notify subscriber of timeout */
191
192 subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, 178 subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
193 TIPC_SUBSCR_TIMEOUT, 0, 0); 179 TIPC_SUBSCR_TIMEOUT, 0, 0);
194 180
195 /* Now destroy subscription */ 181 /* Now destroy subscription */
196
197 k_term_timer(&sub->timer); 182 k_term_timer(&sub->timer);
198 kfree(sub); 183 kfree(sub);
199 atomic_dec(&topsrv.subscription_count); 184 atomic_dec(&topsrv.subscription_count);
@@ -204,7 +189,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
204 * 189 *
205 * Called with subscriber port locked. 190 * Called with subscriber port locked.
206 */ 191 */
207
208static void subscr_del(struct tipc_subscription *sub) 192static void subscr_del(struct tipc_subscription *sub)
209{ 193{
210 tipc_nametbl_unsubscribe(sub); 194 tipc_nametbl_unsubscribe(sub);
@@ -223,7 +207,6 @@ static void subscr_del(struct tipc_subscription *sub)
223 * a new object reference in the interim that uses this lock; this routine will 207 * a new object reference in the interim that uses this lock; this routine will
224 * simply wait for it to be released, then claim it.) 208 * simply wait for it to be released, then claim it.)
225 */ 209 */
226
227static void subscr_terminate(struct tipc_subscriber *subscriber) 210static void subscr_terminate(struct tipc_subscriber *subscriber)
228{ 211{
229 u32 port_ref; 212 u32 port_ref;
@@ -231,18 +214,15 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
231 struct tipc_subscription *sub_temp; 214 struct tipc_subscription *sub_temp;
232 215
233 /* Invalidate subscriber reference */ 216 /* Invalidate subscriber reference */
234
235 port_ref = subscriber->port_ref; 217 port_ref = subscriber->port_ref;
236 subscriber->port_ref = 0; 218 subscriber->port_ref = 0;
237 spin_unlock_bh(subscriber->lock); 219 spin_unlock_bh(subscriber->lock);
238 220
239 /* Sever connection to subscriber */ 221 /* Sever connection to subscriber */
240
241 tipc_shutdown(port_ref); 222 tipc_shutdown(port_ref);
242 tipc_deleteport(port_ref); 223 tipc_deleteport(port_ref);
243 224
244 /* Destroy any existing subscriptions for subscriber */ 225 /* Destroy any existing subscriptions for subscriber */
245
246 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 226 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
247 subscription_list) { 227 subscription_list) {
248 if (sub->timeout != TIPC_WAIT_FOREVER) { 228 if (sub->timeout != TIPC_WAIT_FOREVER) {
@@ -253,17 +233,14 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
253 } 233 }
254 234
255 /* Remove subscriber from topology server's subscriber list */ 235 /* Remove subscriber from topology server's subscriber list */
256
257 spin_lock_bh(&topsrv.lock); 236 spin_lock_bh(&topsrv.lock);
258 list_del(&subscriber->subscriber_list); 237 list_del(&subscriber->subscriber_list);
259 spin_unlock_bh(&topsrv.lock); 238 spin_unlock_bh(&topsrv.lock);
260 239
261 /* Reclaim subscriber lock */ 240 /* Reclaim subscriber lock */
262
263 spin_lock_bh(subscriber->lock); 241 spin_lock_bh(subscriber->lock);
264 242
265 /* Now destroy subscriber */ 243 /* Now destroy subscriber */
266
267 kfree(subscriber); 244 kfree(subscriber);
268} 245}
269 246
@@ -276,7 +253,6 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
276 * 253 *
277 * Note that fields of 's' use subscriber's endianness! 254 * Note that fields of 's' use subscriber's endianness!
278 */ 255 */
279
280static void subscr_cancel(struct tipc_subscr *s, 256static void subscr_cancel(struct tipc_subscr *s,
281 struct tipc_subscriber *subscriber) 257 struct tipc_subscriber *subscriber)
282{ 258{
@@ -285,7 +261,6 @@ static void subscr_cancel(struct tipc_subscr *s,
285 int found = 0; 261 int found = 0;
286 262
287 /* Find first matching subscription, exit if not found */ 263 /* Find first matching subscription, exit if not found */
288
289 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 264 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
290 subscription_list) { 265 subscription_list) {
291 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { 266 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
@@ -297,7 +272,6 @@ static void subscr_cancel(struct tipc_subscr *s,
297 return; 272 return;
298 273
299 /* Cancel subscription timer (if used), then delete subscription */ 274 /* Cancel subscription timer (if used), then delete subscription */
300
301 if (sub->timeout != TIPC_WAIT_FOREVER) { 275 if (sub->timeout != TIPC_WAIT_FOREVER) {
302 sub->timeout = TIPC_WAIT_FOREVER; 276 sub->timeout = TIPC_WAIT_FOREVER;
303 spin_unlock_bh(subscriber->lock); 277 spin_unlock_bh(subscriber->lock);
@@ -313,7 +287,6 @@ static void subscr_cancel(struct tipc_subscr *s,
313 * 287 *
314 * Called with subscriber port locked. 288 * Called with subscriber port locked.
315 */ 289 */
316
317static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, 290static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
318 struct tipc_subscriber *subscriber) 291 struct tipc_subscriber *subscriber)
319{ 292{
@@ -321,11 +294,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
321 int swap; 294 int swap;
322 295
323 /* Determine subscriber's endianness */ 296 /* Determine subscriber's endianness */
324
325 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); 297 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
326 298
327 /* Detect & process a subscription cancellation request */ 299 /* Detect & process a subscription cancellation request */
328
329 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { 300 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
330 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); 301 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
331 subscr_cancel(s, subscriber); 302 subscr_cancel(s, subscriber);
@@ -333,7 +304,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
333 } 304 }
334 305
335 /* Refuse subscription if global limit exceeded */ 306 /* Refuse subscription if global limit exceeded */
336
337 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { 307 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
338 warn("Subscription rejected, subscription limit reached (%u)\n", 308 warn("Subscription rejected, subscription limit reached (%u)\n",
339 tipc_max_subscriptions); 309 tipc_max_subscriptions);
@@ -342,7 +312,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
342 } 312 }
343 313
344 /* Allocate subscription object */ 314 /* Allocate subscription object */
345
346 sub = kmalloc(sizeof(*sub), GFP_ATOMIC); 315 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
347 if (!sub) { 316 if (!sub) {
348 warn("Subscription rejected, no memory\n"); 317 warn("Subscription rejected, no memory\n");
@@ -351,7 +320,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
351 } 320 }
352 321
353 /* Initialize subscription object */ 322 /* Initialize subscription object */
354
355 sub->seq.type = htohl(s->seq.type, swap); 323 sub->seq.type = htohl(s->seq.type, swap);
356 sub->seq.lower = htohl(s->seq.lower, swap); 324 sub->seq.lower = htohl(s->seq.lower, swap);
357 sub->seq.upper = htohl(s->seq.upper, swap); 325 sub->seq.upper = htohl(s->seq.upper, swap);
@@ -385,7 +353,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
385 * 353 *
386 * Called with subscriber's server port unlocked. 354 * Called with subscriber's server port unlocked.
387 */ 355 */
388
389static void subscr_conn_shutdown_event(void *usr_handle, 356static void subscr_conn_shutdown_event(void *usr_handle,
390 u32 port_ref, 357 u32 port_ref,
391 struct sk_buff **buf, 358 struct sk_buff **buf,
@@ -409,7 +376,6 @@ static void subscr_conn_shutdown_event(void *usr_handle,
409 * 376 *
410 * Called with subscriber's server port unlocked. 377 * Called with subscriber's server port unlocked.
411 */ 378 */
412
413static void subscr_conn_msg_event(void *usr_handle, 379static void subscr_conn_msg_event(void *usr_handle,
414 u32 port_ref, 380 u32 port_ref,
415 struct sk_buff **buf, 381 struct sk_buff **buf,
@@ -424,7 +390,6 @@ static void subscr_conn_msg_event(void *usr_handle,
424 * Lock subscriber's server port (& make a local copy of lock pointer, 390 * Lock subscriber's server port (& make a local copy of lock pointer,
425 * in case subscriber is deleted while processing subscription request) 391 * in case subscriber is deleted while processing subscription request)
426 */ 392 */
427
428 if (tipc_port_lock(port_ref) == NULL) 393 if (tipc_port_lock(port_ref) == NULL)
429 return; 394 return;
430 395
@@ -452,7 +417,6 @@ static void subscr_conn_msg_event(void *usr_handle,
452 * timeout code cannot delete the subscription, 417 * timeout code cannot delete the subscription,
453 * so the subscription object is still protected. 418 * so the subscription object is still protected.
454 */ 419 */
455
456 tipc_nametbl_subscribe(sub); 420 tipc_nametbl_subscribe(sub);
457 } 421 }
458 } 422 }
@@ -461,7 +425,6 @@ static void subscr_conn_msg_event(void *usr_handle,
461/** 425/**
462 * subscr_named_msg_event - handle request to establish a new subscriber 426 * subscr_named_msg_event - handle request to establish a new subscriber
463 */ 427 */
464
465static void subscr_named_msg_event(void *usr_handle, 428static void subscr_named_msg_event(void *usr_handle,
466 u32 port_ref, 429 u32 port_ref,
467 struct sk_buff **buf, 430 struct sk_buff **buf,
@@ -475,7 +438,6 @@ static void subscr_named_msg_event(void *usr_handle,
475 u32 server_port_ref; 438 u32 server_port_ref;
476 439
477 /* Create subscriber object */ 440 /* Create subscriber object */
478
479 subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC); 441 subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
480 if (subscriber == NULL) { 442 if (subscriber == NULL) {
481 warn("Subscriber rejected, no memory\n"); 443 warn("Subscriber rejected, no memory\n");
@@ -485,7 +447,6 @@ static void subscr_named_msg_event(void *usr_handle,
485 INIT_LIST_HEAD(&subscriber->subscriber_list); 447 INIT_LIST_HEAD(&subscriber->subscriber_list);
486 448
487 /* Create server port & establish connection to subscriber */ 449 /* Create server port & establish connection to subscriber */
488
489 tipc_createport(subscriber, 450 tipc_createport(subscriber,
490 importance, 451 importance,
491 NULL, 452 NULL,
@@ -504,26 +465,21 @@ static void subscr_named_msg_event(void *usr_handle,
504 tipc_connect2port(subscriber->port_ref, orig); 465 tipc_connect2port(subscriber->port_ref, orig);
505 466
506 /* Lock server port (& save lock address for future use) */ 467 /* Lock server port (& save lock address for future use) */
507
508 subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock; 468 subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
509 469
510 /* Add subscriber to topology server's subscriber list */ 470 /* Add subscriber to topology server's subscriber list */
511
512 spin_lock_bh(&topsrv.lock); 471 spin_lock_bh(&topsrv.lock);
513 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); 472 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
514 spin_unlock_bh(&topsrv.lock); 473 spin_unlock_bh(&topsrv.lock);
515 474
516 /* Unlock server port */ 475 /* Unlock server port */
517
518 server_port_ref = subscriber->port_ref; 476 server_port_ref = subscriber->port_ref;
519 spin_unlock_bh(subscriber->lock); 477 spin_unlock_bh(subscriber->lock);
520 478
521 /* Send an ACK- to complete connection handshaking */ 479 /* Send an ACK- to complete connection handshaking */
522
523 tipc_send(server_port_ref, 0, NULL, 0); 480 tipc_send(server_port_ref, 0, NULL, 0);
524 481
525 /* Handle optional subscription request */ 482 /* Handle optional subscription request */
526
527 if (size != 0) { 483 if (size != 0) {
528 subscr_conn_msg_event(subscriber, server_port_ref, 484 subscr_conn_msg_event(subscriber, server_port_ref,
529 buf, data, size); 485 buf, data, size);
@@ -535,7 +491,6 @@ int tipc_subscr_start(void)
535 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV}; 491 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
536 int res; 492 int res;
537 493
538 memset(&topsrv, 0, sizeof(topsrv));
539 spin_lock_init(&topsrv.lock); 494 spin_lock_init(&topsrv.lock);
540 INIT_LIST_HEAD(&topsrv.subscriber_list); 495 INIT_LIST_HEAD(&topsrv.subscriber_list);
541 496
@@ -552,7 +507,7 @@ int tipc_subscr_start(void)
552 if (res) 507 if (res)
553 goto failed; 508 goto failed;
554 509
555 res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq); 510 res = tipc_publish(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
556 if (res) { 511 if (res) {
557 tipc_deleteport(topsrv.setup_port); 512 tipc_deleteport(topsrv.setup_port);
558 topsrv.setup_port = 0; 513 topsrv.setup_port = 0;
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ef6529c8456f..218d2e07f0cc 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -51,7 +51,6 @@ struct tipc_subscription;
51 * @swap: indicates if subscriber uses opposite endianness in its messages 51 * @swap: indicates if subscriber uses opposite endianness in its messages
52 * @evt: template for events generated by subscription 52 * @evt: template for events generated by subscription
53 */ 53 */
54
55struct tipc_subscription { 54struct tipc_subscription {
56 struct tipc_name_seq seq; 55 struct tipc_name_seq seq;
57 u32 timeout; 56 u32 timeout;
@@ -80,5 +79,4 @@ int tipc_subscr_start(void);
80 79
81void tipc_subscr_stop(void); 80void tipc_subscr_stop(void);
82 81
83
84#endif 82#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 85d3bb7490aa..641f2e47f165 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -149,9 +149,10 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
149 * each socket state is protected by separate spin lock. 149 * each socket state is protected by separate spin lock.
150 */ 150 */
151 151
152static inline unsigned unix_hash_fold(__wsum n) 152static inline unsigned int unix_hash_fold(__wsum n)
153{ 153{
154 unsigned hash = (__force unsigned)n; 154 unsigned int hash = (__force unsigned int)n;
155
155 hash ^= hash>>16; 156 hash ^= hash>>16;
156 hash ^= hash>>8; 157 hash ^= hash>>8;
157 return hash&(UNIX_HASH_SIZE-1); 158 return hash&(UNIX_HASH_SIZE-1);
@@ -200,7 +201,7 @@ static inline void unix_release_addr(struct unix_address *addr)
200 * - if started by zero, it is abstract name. 201 * - if started by zero, it is abstract name.
201 */ 202 */
202 203
203static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp) 204static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
204{ 205{
205 if (len <= sizeof(short) || len > sizeof(*sunaddr)) 206 if (len <= sizeof(short) || len > sizeof(*sunaddr))
206 return -EINVAL; 207 return -EINVAL;
@@ -250,7 +251,7 @@ static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
250 251
251static struct sock *__unix_find_socket_byname(struct net *net, 252static struct sock *__unix_find_socket_byname(struct net *net,
252 struct sockaddr_un *sunname, 253 struct sockaddr_un *sunname,
253 int len, int type, unsigned hash) 254 int len, int type, unsigned int hash)
254{ 255{
255 struct sock *s; 256 struct sock *s;
256 struct hlist_node *node; 257 struct hlist_node *node;
@@ -273,7 +274,7 @@ found:
273static inline struct sock *unix_find_socket_byname(struct net *net, 274static inline struct sock *unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname, 275 struct sockaddr_un *sunname,
275 int len, int type, 276 int len, int type,
276 unsigned hash) 277 unsigned int hash)
277{ 278{
278 struct sock *s; 279 struct sock *s;
279 280
@@ -293,7 +294,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
293 spin_lock(&unix_table_lock); 294 spin_lock(&unix_table_lock);
294 sk_for_each(s, node, 295 sk_for_each(s, node,
295 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 296 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
296 struct dentry *dentry = unix_sk(s)->dentry; 297 struct dentry *dentry = unix_sk(s)->path.dentry;
297 298
298 if (dentry && dentry->d_inode == i) { 299 if (dentry && dentry->d_inode == i) {
299 sock_hold(s); 300 sock_hold(s);
@@ -377,8 +378,7 @@ static void unix_sock_destructor(struct sock *sk)
377static int unix_release_sock(struct sock *sk, int embrion) 378static int unix_release_sock(struct sock *sk, int embrion)
378{ 379{
379 struct unix_sock *u = unix_sk(sk); 380 struct unix_sock *u = unix_sk(sk);
380 struct dentry *dentry; 381 struct path path;
381 struct vfsmount *mnt;
382 struct sock *skpair; 382 struct sock *skpair;
383 struct sk_buff *skb; 383 struct sk_buff *skb;
384 int state; 384 int state;
@@ -389,10 +389,9 @@ static int unix_release_sock(struct sock *sk, int embrion)
389 unix_state_lock(sk); 389 unix_state_lock(sk);
390 sock_orphan(sk); 390 sock_orphan(sk);
391 sk->sk_shutdown = SHUTDOWN_MASK; 391 sk->sk_shutdown = SHUTDOWN_MASK;
392 dentry = u->dentry; 392 path = u->path;
393 u->dentry = NULL; 393 u->path.dentry = NULL;
394 mnt = u->mnt; 394 u->path.mnt = NULL;
395 u->mnt = NULL;
396 state = sk->sk_state; 395 state = sk->sk_state;
397 sk->sk_state = TCP_CLOSE; 396 sk->sk_state = TCP_CLOSE;
398 unix_state_unlock(sk); 397 unix_state_unlock(sk);
@@ -425,10 +424,8 @@ static int unix_release_sock(struct sock *sk, int embrion)
425 kfree_skb(skb); 424 kfree_skb(skb);
426 } 425 }
427 426
428 if (dentry) { 427 if (path.dentry)
429 dput(dentry); 428 path_put(&path);
430 mntput(mnt);
431 }
432 429
433 sock_put(sk); 430 sock_put(sk);
434 431
@@ -530,6 +527,16 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
530static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, 527static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
531 struct msghdr *, size_t, int); 528 struct msghdr *, size_t, int);
532 529
530static void unix_set_peek_off(struct sock *sk, int val)
531{
532 struct unix_sock *u = unix_sk(sk);
533
534 mutex_lock(&u->readlock);
535 sk->sk_peek_off = val;
536 mutex_unlock(&u->readlock);
537}
538
539
533static const struct proto_ops unix_stream_ops = { 540static const struct proto_ops unix_stream_ops = {
534 .family = PF_UNIX, 541 .family = PF_UNIX,
535 .owner = THIS_MODULE, 542 .owner = THIS_MODULE,
@@ -549,6 +556,7 @@ static const struct proto_ops unix_stream_ops = {
549 .recvmsg = unix_stream_recvmsg, 556 .recvmsg = unix_stream_recvmsg,
550 .mmap = sock_no_mmap, 557 .mmap = sock_no_mmap,
551 .sendpage = sock_no_sendpage, 558 .sendpage = sock_no_sendpage,
559 .set_peek_off = unix_set_peek_off,
552}; 560};
553 561
554static const struct proto_ops unix_dgram_ops = { 562static const struct proto_ops unix_dgram_ops = {
@@ -570,6 +578,7 @@ static const struct proto_ops unix_dgram_ops = {
570 .recvmsg = unix_dgram_recvmsg, 578 .recvmsg = unix_dgram_recvmsg,
571 .mmap = sock_no_mmap, 579 .mmap = sock_no_mmap,
572 .sendpage = sock_no_sendpage, 580 .sendpage = sock_no_sendpage,
581 .set_peek_off = unix_set_peek_off,
573}; 582};
574 583
575static const struct proto_ops unix_seqpacket_ops = { 584static const struct proto_ops unix_seqpacket_ops = {
@@ -591,6 +600,7 @@ static const struct proto_ops unix_seqpacket_ops = {
591 .recvmsg = unix_seqpacket_recvmsg, 600 .recvmsg = unix_seqpacket_recvmsg,
592 .mmap = sock_no_mmap, 601 .mmap = sock_no_mmap,
593 .sendpage = sock_no_sendpage, 602 .sendpage = sock_no_sendpage,
603 .set_peek_off = unix_set_peek_off,
594}; 604};
595 605
596static struct proto unix_proto = { 606static struct proto unix_proto = {
@@ -628,8 +638,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
628 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; 638 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
629 sk->sk_destruct = unix_sock_destructor; 639 sk->sk_destruct = unix_sock_destructor;
630 u = unix_sk(sk); 640 u = unix_sk(sk);
631 u->dentry = NULL; 641 u->path.dentry = NULL;
632 u->mnt = NULL; 642 u->path.mnt = NULL;
633 spin_lock_init(&u->lock); 643 spin_lock_init(&u->lock);
634 atomic_long_set(&u->inflight, 0); 644 atomic_long_set(&u->inflight, 0);
635 INIT_LIST_HEAD(&u->link); 645 INIT_LIST_HEAD(&u->link);
@@ -751,7 +761,7 @@ out: mutex_unlock(&u->readlock);
751 761
752static struct sock *unix_find_other(struct net *net, 762static struct sock *unix_find_other(struct net *net,
753 struct sockaddr_un *sunname, int len, 763 struct sockaddr_un *sunname, int len,
754 int type, unsigned hash, int *error) 764 int type, unsigned int hash, int *error)
755{ 765{
756 struct sock *u; 766 struct sock *u;
757 struct path path; 767 struct path path;
@@ -775,7 +785,7 @@ static struct sock *unix_find_other(struct net *net,
775 goto put_fail; 785 goto put_fail;
776 786
777 if (u->sk_type == type) 787 if (u->sk_type == type)
778 touch_atime(path.mnt, path.dentry); 788 touch_atime(&path);
779 789
780 path_put(&path); 790 path_put(&path);
781 791
@@ -789,9 +799,9 @@ static struct sock *unix_find_other(struct net *net,
789 u = unix_find_socket_byname(net, sunname, len, type, hash); 799 u = unix_find_socket_byname(net, sunname, len, type, hash);
790 if (u) { 800 if (u) {
791 struct dentry *dentry; 801 struct dentry *dentry;
792 dentry = unix_sk(u)->dentry; 802 dentry = unix_sk(u)->path.dentry;
793 if (dentry) 803 if (dentry)
794 touch_atime(unix_sk(u)->mnt, dentry); 804 touch_atime(&unix_sk(u)->path);
795 } else 805 } else
796 goto fail; 806 goto fail;
797 } 807 }
@@ -815,7 +825,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
815 struct dentry *dentry = NULL; 825 struct dentry *dentry = NULL;
816 struct path path; 826 struct path path;
817 int err; 827 int err;
818 unsigned hash; 828 unsigned int hash;
819 struct unix_address *addr; 829 struct unix_address *addr;
820 struct hlist_head *list; 830 struct hlist_head *list;
821 831
@@ -897,8 +907,7 @@ out_mknod_drop_write:
897 list = &unix_socket_table[addr->hash]; 907 list = &unix_socket_table[addr->hash];
898 } else { 908 } else {
899 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; 909 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
900 u->dentry = path.dentry; 910 u->path = path;
901 u->mnt = path.mnt;
902 } 911 }
903 912
904 err = 0; 913 err = 0;
@@ -956,7 +965,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
956 struct net *net = sock_net(sk); 965 struct net *net = sock_net(sk);
957 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; 966 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
958 struct sock *other; 967 struct sock *other;
959 unsigned hash; 968 unsigned int hash;
960 int err; 969 int err;
961 970
962 if (addr->sa_family != AF_UNSPEC) { 971 if (addr->sa_family != AF_UNSPEC) {
@@ -1054,7 +1063,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1054 struct sock *newsk = NULL; 1063 struct sock *newsk = NULL;
1055 struct sock *other = NULL; 1064 struct sock *other = NULL;
1056 struct sk_buff *skb = NULL; 1065 struct sk_buff *skb = NULL;
1057 unsigned hash; 1066 unsigned int hash;
1058 int st; 1067 int st;
1059 int err; 1068 int err;
1060 long timeo; 1069 long timeo;
@@ -1180,9 +1189,9 @@ restart:
1180 atomic_inc(&otheru->addr->refcnt); 1189 atomic_inc(&otheru->addr->refcnt);
1181 newu->addr = otheru->addr; 1190 newu->addr = otheru->addr;
1182 } 1191 }
1183 if (otheru->dentry) { 1192 if (otheru->path.dentry) {
1184 newu->dentry = dget(otheru->dentry); 1193 path_get(&otheru->path);
1185 newu->mnt = mntget(otheru->mnt); 1194 newu->path = otheru->path;
1186 } 1195 }
1187 1196
1188 /* Set credentials */ 1197 /* Set credentials */
@@ -1429,11 +1438,12 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1429 struct sock *other = NULL; 1438 struct sock *other = NULL;
1430 int namelen = 0; /* fake GCC */ 1439 int namelen = 0; /* fake GCC */
1431 int err; 1440 int err;
1432 unsigned hash; 1441 unsigned int hash;
1433 struct sk_buff *skb; 1442 struct sk_buff *skb;
1434 long timeo; 1443 long timeo;
1435 struct scm_cookie tmp_scm; 1444 struct scm_cookie tmp_scm;
1436 int max_level; 1445 int max_level;
1446 int data_len = 0;
1437 1447
1438 if (NULL == siocb->scm) 1448 if (NULL == siocb->scm)
1439 siocb->scm = &tmp_scm; 1449 siocb->scm = &tmp_scm;
@@ -1467,7 +1477,13 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1467 if (len > sk->sk_sndbuf - 32) 1477 if (len > sk->sk_sndbuf - 32)
1468 goto out; 1478 goto out;
1469 1479
1470 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err); 1480 if (len > SKB_MAX_ALLOC)
1481 data_len = min_t(size_t,
1482 len - SKB_MAX_ALLOC,
1483 MAX_SKB_FRAGS * PAGE_SIZE);
1484
1485 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1486 msg->msg_flags & MSG_DONTWAIT, &err);
1471 if (skb == NULL) 1487 if (skb == NULL)
1472 goto out; 1488 goto out;
1473 1489
@@ -1477,8 +1493,10 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1477 max_level = err + 1; 1493 max_level = err + 1;
1478 unix_get_secdata(siocb->scm, skb); 1494 unix_get_secdata(siocb->scm, skb);
1479 1495
1480 skb_reset_transport_header(skb); 1496 skb_put(skb, len - data_len);
1481 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 1497 skb->data_len = data_len;
1498 skb->len = len;
1499 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1482 if (err) 1500 if (err)
1483 goto out_free; 1501 goto out_free;
1484 1502
@@ -1756,6 +1774,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1756 int noblock = flags & MSG_DONTWAIT; 1774 int noblock = flags & MSG_DONTWAIT;
1757 struct sk_buff *skb; 1775 struct sk_buff *skb;
1758 int err; 1776 int err;
1777 int peeked, skip;
1759 1778
1760 err = -EOPNOTSUPP; 1779 err = -EOPNOTSUPP;
1761 if (flags&MSG_OOB) 1780 if (flags&MSG_OOB)
@@ -1769,7 +1788,9 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1769 goto out; 1788 goto out;
1770 } 1789 }
1771 1790
1772 skb = skb_recv_datagram(sk, flags, noblock, &err); 1791 skip = sk_peek_offset(sk, flags);
1792
1793 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1773 if (!skb) { 1794 if (!skb) {
1774 unix_state_lock(sk); 1795 unix_state_lock(sk);
1775 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ 1796 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
@@ -1786,12 +1807,12 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1786 if (msg->msg_name) 1807 if (msg->msg_name)
1787 unix_copy_addr(msg, skb->sk); 1808 unix_copy_addr(msg, skb->sk);
1788 1809
1789 if (size > skb->len) 1810 if (size > skb->len - skip)
1790 size = skb->len; 1811 size = skb->len - skip;
1791 else if (size < skb->len) 1812 else if (size < skb->len - skip)
1792 msg->msg_flags |= MSG_TRUNC; 1813 msg->msg_flags |= MSG_TRUNC;
1793 1814
1794 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size); 1815 err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1795 if (err) 1816 if (err)
1796 goto out_free; 1817 goto out_free;
1797 1818
@@ -1808,6 +1829,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1808 if (!(flags & MSG_PEEK)) { 1829 if (!(flags & MSG_PEEK)) {
1809 if (UNIXCB(skb).fp) 1830 if (UNIXCB(skb).fp)
1810 unix_detach_fds(siocb->scm, skb); 1831 unix_detach_fds(siocb->scm, skb);
1832
1833 sk_peek_offset_bwd(sk, skb->len);
1811 } else { 1834 } else {
1812 /* It is questionable: on PEEK we could: 1835 /* It is questionable: on PEEK we could:
1813 - do not return fds - good, but too simple 8) 1836 - do not return fds - good, but too simple 8)
@@ -1821,10 +1844,13 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1821 clearly however! 1844 clearly however!
1822 1845
1823 */ 1846 */
1847
1848 sk_peek_offset_fwd(sk, size);
1849
1824 if (UNIXCB(skb).fp) 1850 if (UNIXCB(skb).fp)
1825 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1851 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1826 } 1852 }
1827 err = size; 1853 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1828 1854
1829 scm_recv(sock, msg, siocb->scm, flags); 1855 scm_recv(sock, msg, siocb->scm, flags);
1830 1856
@@ -1884,6 +1910,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1884 int target; 1910 int target;
1885 int err = 0; 1911 int err = 0;
1886 long timeo; 1912 long timeo;
1913 int skip;
1887 1914
1888 err = -EINVAL; 1915 err = -EINVAL;
1889 if (sk->sk_state != TCP_ESTABLISHED) 1916 if (sk->sk_state != TCP_ESTABLISHED)
@@ -1913,12 +1940,15 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1913 goto out; 1940 goto out;
1914 } 1941 }
1915 1942
1943 skip = sk_peek_offset(sk, flags);
1944
1916 do { 1945 do {
1917 int chunk; 1946 int chunk;
1918 struct sk_buff *skb; 1947 struct sk_buff *skb;
1919 1948
1920 unix_state_lock(sk); 1949 unix_state_lock(sk);
1921 skb = skb_peek(&sk->sk_receive_queue); 1950 skb = skb_peek(&sk->sk_receive_queue);
1951again:
1922 if (skb == NULL) { 1952 if (skb == NULL) {
1923 unix_sk(sk)->recursion_level = 0; 1953 unix_sk(sk)->recursion_level = 0;
1924 if (copied >= target) 1954 if (copied >= target)
@@ -1953,6 +1983,13 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1953 unix_state_unlock(sk); 1983 unix_state_unlock(sk);
1954 break; 1984 break;
1955 } 1985 }
1986
1987 if (skip >= skb->len) {
1988 skip -= skb->len;
1989 skb = skb_peek_next(skb, &sk->sk_receive_queue);
1990 goto again;
1991 }
1992
1956 unix_state_unlock(sk); 1993 unix_state_unlock(sk);
1957 1994
1958 if (check_creds) { 1995 if (check_creds) {
@@ -1972,8 +2009,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1972 sunaddr = NULL; 2009 sunaddr = NULL;
1973 } 2010 }
1974 2011
1975 chunk = min_t(unsigned int, skb->len, size); 2012 chunk = min_t(unsigned int, skb->len - skip, size);
1976 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 2013 if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
1977 if (copied == 0) 2014 if (copied == 0)
1978 copied = -EFAULT; 2015 copied = -EFAULT;
1979 break; 2016 break;
@@ -1985,6 +2022,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1985 if (!(flags & MSG_PEEK)) { 2022 if (!(flags & MSG_PEEK)) {
1986 skb_pull(skb, chunk); 2023 skb_pull(skb, chunk);
1987 2024
2025 sk_peek_offset_bwd(sk, chunk);
2026
1988 if (UNIXCB(skb).fp) 2027 if (UNIXCB(skb).fp)
1989 unix_detach_fds(siocb->scm, skb); 2028 unix_detach_fds(siocb->scm, skb);
1990 2029
@@ -2002,6 +2041,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
2002 if (UNIXCB(skb).fp) 2041 if (UNIXCB(skb).fp)
2003 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 2042 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2004 2043
2044 sk_peek_offset_fwd(sk, chunk);
2045
2005 break; 2046 break;
2006 } 2047 }
2007 } while (size); 2048 } while (size);
@@ -2175,7 +2216,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2175 } 2216 }
2176 2217
2177 /* No write status requested, avoid expensive OUT tests. */ 2218 /* No write status requested, avoid expensive OUT tests. */
2178 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT))) 2219 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2179 return mask; 2220 return mask;
2180 2221
2181 writable = unix_writable(sk); 2222 writable = unix_writable(sk);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 6b7697fd911b..47d3002737f5 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -29,7 +29,7 @@ rtattr_failure:
29 29
30static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) 30static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
31{ 31{
32 struct dentry *dentry = unix_sk(sk)->dentry; 32 struct dentry *dentry = unix_sk(sk)->path.dentry;
33 struct unix_diag_vfs *uv; 33 struct unix_diag_vfs *uv;
34 34
35 if (dentry) { 35 if (dentry) {
@@ -301,14 +301,16 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
301 if (nlmsg_len(h) < hdrlen) 301 if (nlmsg_len(h) < hdrlen)
302 return -EINVAL; 302 return -EINVAL;
303 303
304 if (h->nlmsg_flags & NLM_F_DUMP) 304 if (h->nlmsg_flags & NLM_F_DUMP) {
305 return netlink_dump_start(sock_diag_nlsk, skb, h, 305 struct netlink_dump_control c = {
306 unix_diag_dump, NULL, 0); 306 .dump = unix_diag_dump,
307 else 307 };
308 return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
309 } else
308 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); 310 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
309} 311}
310 312
311static struct sock_diag_handler unix_diag_handler = { 313static const struct sock_diag_handler unix_diag_handler = {
312 .family = AF_UNIX, 314 .family = AF_UNIX,
313 .dump = unix_diag_handler_dump, 315 .dump = unix_diag_handler_dump,
314}; 316};
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 397cffebb3b6..b34b5b9792f0 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -26,12 +26,6 @@ static ctl_table unix_table[] = {
26 { } 26 { }
27}; 27};
28 28
29static struct ctl_path unix_path[] = {
30 { .procname = "net", },
31 { .procname = "unix", },
32 { },
33};
34
35int __net_init unix_sysctl_register(struct net *net) 29int __net_init unix_sysctl_register(struct net *net)
36{ 30{
37 struct ctl_table *table; 31 struct ctl_table *table;
@@ -41,7 +35,7 @@ int __net_init unix_sysctl_register(struct net *net)
41 goto err_alloc; 35 goto err_alloc;
42 36
43 table[0].data = &net->unx.sysctl_max_dgram_qlen; 37 table[0].data = &net->unx.sysctl_max_dgram_qlen;
44 net->unx.ctl = register_net_sysctl_table(net, unix_path, table); 38 net->unx.ctl = register_net_sysctl(net, "net/unix", table);
45 if (net->unx.ctl == NULL) 39 if (net->unx.ctl == NULL)
46 goto err_reg; 40 goto err_reg;
47 41
@@ -58,6 +52,6 @@ void unix_sysctl_unregister(struct net *net)
58 struct ctl_table *table; 52 struct ctl_table *table;
59 53
60 table = net->unx.ctl->ctl_table_arg; 54 table = net->unx.ctl->ctl_table_arg;
61 unregister_sysctl_table(net->unx.ctl); 55 unregister_net_sysctl_table(net->unx.ctl);
62 kfree(table); 56 kfree(table);
63} 57}
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig
index 61ceae0b9566..a157a2e64e18 100644
--- a/net/wanrouter/Kconfig
+++ b/net/wanrouter/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config WAN_ROUTER 5config WAN_ROUTER
6 tristate "WAN router" 6 tristate "WAN router (DEPRECATED)"
7 depends on EXPERIMENTAL 7 depends on EXPERIMENTAL
8 ---help--- 8 ---help---
9 Wide Area Networks (WANs), such as X.25, frame relay and leased 9 Wide Area Networks (WANs), such as X.25, frame relay and leased
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 3c65eae701c4..a6470ac39498 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -187,7 +187,7 @@ out:
187 187
188static 188static
189void __check_new_state(enum wimax_st old_state, enum wimax_st new_state, 189void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
190 unsigned allowed_states_bm) 190 unsigned int allowed_states_bm)
191{ 191{
192 if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) { 192 if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
193 printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n", 193 printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
@@ -425,7 +425,8 @@ static
425size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size, 425size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
426 unsigned char *addr, size_t addr_len) 426 unsigned char *addr, size_t addr_len)
427{ 427{
428 unsigned cnt, total; 428 unsigned int cnt, total;
429
429 for (total = cnt = 0; cnt < addr_len; cnt++) 430 for (total = cnt = 0; cnt < addr_len; cnt++)
430 total += scnprintf(addr_str + total, addr_str_size - total, 431 total += scnprintf(addr_str + total, addr_str_size - total,
431 "%02x%c", addr[cnt], 432 "%02x%c", addr[cnt],
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2fcfe0993ca2..884801ac4dd0 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -45,7 +45,7 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
45 return chan; 45 return chan;
46} 46}
47 47
48int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, 48bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
49 struct ieee80211_channel *chan, 49 struct ieee80211_channel *chan,
50 enum nl80211_channel_type channel_type) 50 enum nl80211_channel_type channel_type)
51{ 51{
diff --git a/net/wireless/core.c b/net/wireless/core.c
index ccdfed897651..a87d43552974 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -422,10 +422,6 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
422 const struct ieee80211_iface_combination *c; 422 const struct ieee80211_iface_combination *c;
423 int i, j; 423 int i, j;
424 424
425 /* If we have combinations enforce them */
426 if (wiphy->n_iface_combinations)
427 wiphy->flags |= WIPHY_FLAG_ENFORCE_COMBINATIONS;
428
429 for (i = 0; i < wiphy->n_iface_combinations; i++) { 425 for (i = 0; i < wiphy->n_iface_combinations; i++) {
430 u32 cnt = 0; 426 u32 cnt = 0;
431 u16 all_iftypes = 0; 427 u16 all_iftypes = 0;
@@ -668,7 +664,7 @@ void wiphy_unregister(struct wiphy *wiphy)
668 mutex_lock(&rdev->devlist_mtx); 664 mutex_lock(&rdev->devlist_mtx);
669 __count = rdev->opencount; 665 __count = rdev->opencount;
670 mutex_unlock(&rdev->devlist_mtx); 666 mutex_unlock(&rdev->devlist_mtx);
671 __count == 0;})); 667 __count == 0; }));
672 668
673 mutex_lock(&rdev->devlist_mtx); 669 mutex_lock(&rdev->devlist_mtx);
674 BUG_ON(!list_empty(&rdev->netdev_list)); 670 BUG_ON(!list_empty(&rdev->netdev_list));
@@ -708,6 +704,10 @@ void wiphy_unregister(struct wiphy *wiphy)
708 flush_work(&rdev->scan_done_wk); 704 flush_work(&rdev->scan_done_wk);
709 cancel_work_sync(&rdev->conn_work); 705 cancel_work_sync(&rdev->conn_work);
710 flush_work(&rdev->event_work); 706 flush_work(&rdev->event_work);
707
708 if (rdev->wowlan && rdev->ops->set_wakeup)
709 rdev->ops->set_wakeup(&rdev->wiphy, false);
710 cfg80211_rdev_free_wowlan(rdev);
711} 711}
712EXPORT_SYMBOL(wiphy_unregister); 712EXPORT_SYMBOL(wiphy_unregister);
713 713
@@ -720,7 +720,6 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
720 mutex_destroy(&rdev->sched_scan_mtx); 720 mutex_destroy(&rdev->sched_scan_mtx);
721 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 721 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
722 cfg80211_put_bss(&scan->pub); 722 cfg80211_put_bss(&scan->pub);
723 cfg80211_rdev_free_wowlan(rdev);
724 kfree(rdev); 723 kfree(rdev);
725} 724}
726 725
@@ -777,7 +776,7 @@ static struct device_type wiphy_type = {
777 .name = "wlan", 776 .name = "wlan",
778}; 777};
779 778
780static int cfg80211_netdev_notifier_call(struct notifier_block * nb, 779static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
781 unsigned long state, 780 unsigned long state,
782 void *ndev) 781 void *ndev)
783{ 782{
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 43ad9c81efcf..8523f3878677 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -144,11 +144,6 @@ static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pu
144 return container_of(pub, struct cfg80211_internal_bss, pub); 144 return container_of(pub, struct cfg80211_internal_bss, pub);
145} 145}
146 146
147static inline void cfg80211_ref_bss(struct cfg80211_internal_bss *bss)
148{
149 kref_get(&bss->ref);
150}
151
152static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss) 147static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss)
153{ 148{
154 atomic_inc(&bss->hold); 149 atomic_inc(&bss->hold);
@@ -325,15 +320,13 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
325 const u8 *bssid, 320 const u8 *bssid,
326 const u8 *ssid, int ssid_len, 321 const u8 *ssid, int ssid_len,
327 const u8 *ie, int ie_len, 322 const u8 *ie, int ie_len,
328 const u8 *key, int key_len, int key_idx, 323 const u8 *key, int key_len, int key_idx);
329 bool local_state_change);
330int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, 324int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
331 struct net_device *dev, struct ieee80211_channel *chan, 325 struct net_device *dev, struct ieee80211_channel *chan,
332 enum nl80211_auth_type auth_type, const u8 *bssid, 326 enum nl80211_auth_type auth_type, const u8 *bssid,
333 const u8 *ssid, int ssid_len, 327 const u8 *ssid, int ssid_len,
334 const u8 *ie, int ie_len, 328 const u8 *ie, int ie_len,
335 const u8 *key, int key_len, int key_idx, 329 const u8 *key, int key_len, int key_idx);
336 bool local_state_change);
337int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 330int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
338 struct net_device *dev, 331 struct net_device *dev,
339 struct ieee80211_channel *chan, 332 struct ieee80211_channel *chan,
@@ -421,7 +414,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
421 size_t ie_len, u16 reason, bool from_ap); 414 size_t ie_len, u16 reason, bool from_ap);
422void cfg80211_sme_scan_done(struct net_device *dev); 415void cfg80211_sme_scan_done(struct net_device *dev);
423void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len); 416void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
424void cfg80211_sme_disassoc(struct net_device *dev, int idx); 417void cfg80211_sme_disassoc(struct net_device *dev,
418 struct cfg80211_internal_bss *bss);
425void __cfg80211_scan_done(struct work_struct *wk); 419void __cfg80211_scan_done(struct work_struct *wk);
426void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); 420void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak);
427void __cfg80211_sched_scan_results(struct work_struct *wk); 421void __cfg80211_sched_scan_results(struct work_struct *wk);
@@ -451,8 +445,6 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
451 struct wireless_dev *wdev, int freq, 445 struct wireless_dev *wdev, int freq,
452 enum nl80211_channel_type channel_type); 446 enum nl80211_channel_type channel_type);
453 447
454u16 cfg80211_calculate_bitrate(struct rate_info *rate);
455
456int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, 448int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
457 const u8 *rates, unsigned int n_rates, 449 const u8 *rates, unsigned int n_rates,
458 u32 *mask); 450 u32 *mask);
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 39765bcfb472..920cabe0461b 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -13,12 +13,6 @@
13#include "core.h" 13#include "core.h"
14#include "debugfs.h" 14#include "debugfs.h"
15 15
16static int cfg80211_open_file_generic(struct inode *inode, struct file *file)
17{
18 file->private_data = inode->i_private;
19 return 0;
20}
21
22#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ 16#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
23static ssize_t name## _read(struct file *file, char __user *userbuf, \ 17static ssize_t name## _read(struct file *file, char __user *userbuf, \
24 size_t count, loff_t *ppos) \ 18 size_t count, loff_t *ppos) \
@@ -33,7 +27,7 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \
33 \ 27 \
34static const struct file_operations name## _ops = { \ 28static const struct file_operations name## _ops = { \
35 .read = name## _read, \ 29 .read = name## _read, \
36 .open = cfg80211_open_file_generic, \ 30 .open = simple_open, \
37 .llseek = generic_file_llseek, \ 31 .llseek = generic_file_llseek, \
38}; 32};
39 33
@@ -102,7 +96,7 @@ static ssize_t ht40allow_map_read(struct file *file,
102 96
103static const struct file_operations ht40allow_map_ops = { 97static const struct file_operations ht40allow_map_ops = {
104 .read = ht40allow_map_read, 98 .read = ht40allow_map_read,
105 .open = cfg80211_open_file_generic, 99 .open = simple_open,
106 .llseek = default_llseek, 100 .llseek = default_llseek,
107}; 101};
108 102
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 9bde4d1d3e9b..7eecdf40cf80 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -68,6 +68,32 @@ static int cfg80211_set_ringparam(struct net_device *dev,
68 return -ENOTSUPP; 68 return -ENOTSUPP;
69} 69}
70 70
71static int cfg80211_get_sset_count(struct net_device *dev, int sset)
72{
73 struct wireless_dev *wdev = dev->ieee80211_ptr;
74 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
75 if (rdev->ops->get_et_sset_count)
76 return rdev->ops->get_et_sset_count(wdev->wiphy, dev, sset);
77 return -EOPNOTSUPP;
78}
79
80static void cfg80211_get_stats(struct net_device *dev,
81 struct ethtool_stats *stats, u64 *data)
82{
83 struct wireless_dev *wdev = dev->ieee80211_ptr;
84 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
85 if (rdev->ops->get_et_stats)
86 rdev->ops->get_et_stats(wdev->wiphy, dev, stats, data);
87}
88
89static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
90{
91 struct wireless_dev *wdev = dev->ieee80211_ptr;
92 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
93 if (rdev->ops->get_et_strings)
94 rdev->ops->get_et_strings(wdev->wiphy, dev, sset, data);
95}
96
71const struct ethtool_ops cfg80211_ethtool_ops = { 97const struct ethtool_ops cfg80211_ethtool_ops = {
72 .get_drvinfo = cfg80211_get_drvinfo, 98 .get_drvinfo = cfg80211_get_drvinfo,
73 .get_regs_len = cfg80211_get_regs_len, 99 .get_regs_len = cfg80211_get_regs_len,
@@ -75,4 +101,7 @@ const struct ethtool_ops cfg80211_ethtool_ops = {
75 .get_link = ethtool_op_get_link, 101 .get_link = ethtool_op_get_link,
76 .get_ringparam = cfg80211_get_ringparam, 102 .get_ringparam = cfg80211_get_ringparam,
77 .set_ringparam = cfg80211_set_ringparam, 103 .set_ringparam = cfg80211_set_ringparam,
104 .get_strings = cfg80211_get_strings,
105 .get_ethtool_stats = cfg80211_get_stats,
106 .get_sset_count = cfg80211_get_sset_count,
78}; 107};
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 30f20fe4a5fe..d2a19b0ff71f 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -473,7 +473,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
473 473
474 /* fixed already - and no change */ 474 /* fixed already - and no change */
475 if (wdev->wext.ibss.bssid && bssid && 475 if (wdev->wext.ibss.bssid && bssid &&
476 compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0) 476 ether_addr_equal(bssid, wdev->wext.ibss.bssid))
477 return 0; 477 return 0;
478 478
479 wdev_lock(wdev); 479 wdev_lock(wdev);
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 755738d26bb4..1526c211db66 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -304,10 +304,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
304 pos = skb->data + hdr_len; 304 pos = skb->data + hdr_len;
305 keyidx = pos[3]; 305 keyidx = pos[3];
306 if (!(keyidx & (1 << 5))) { 306 if (!(keyidx & (1 << 5))) {
307 if (net_ratelimit()) { 307 net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n",
308 printk(KERN_DEBUG "CCMP: received packet without ExtIV" 308 hdr->addr2);
309 " flag from %pM\n", hdr->addr2);
310 }
311 key->dot11RSNAStatsCCMPFormatErrors++; 309 key->dot11RSNAStatsCCMPFormatErrors++;
312 return -2; 310 return -2;
313 } 311 }
@@ -318,11 +316,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
318 return -6; 316 return -6;
319 } 317 }
320 if (!key->key_set) { 318 if (!key->key_set) {
321 if (net_ratelimit()) { 319 net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n",
322 printk(KERN_DEBUG "CCMP: received packet from %pM" 320 hdr->addr2, keyidx);
323 " with keyid=%d that does not have a configured"
324 " key\n", hdr->addr2, keyidx);
325 }
326 return -3; 321 return -3;
327 } 322 }
328 323
@@ -336,15 +331,11 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
336 331
337 if (ccmp_replay_check(pn, key->rx_pn)) { 332 if (ccmp_replay_check(pn, key->rx_pn)) {
338#ifdef CONFIG_LIB80211_DEBUG 333#ifdef CONFIG_LIB80211_DEBUG
339 if (net_ratelimit()) { 334 net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n",
340 printk(KERN_DEBUG "CCMP: replay detected: STA=%pM " 335 hdr->addr2,
341 "previous PN %02x%02x%02x%02x%02x%02x " 336 key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
342 "received PN %02x%02x%02x%02x%02x%02x\n", 337 key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
343 hdr->addr2, 338 pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
344 key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
345 key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
346 pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
347 }
348#endif 339#endif
349 key->dot11RSNAStatsCCMPReplays++; 340 key->dot11RSNAStatsCCMPReplays++;
350 return -4; 341 return -4;
@@ -370,10 +361,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
370 } 361 }
371 362
372 if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { 363 if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
373 if (net_ratelimit()) { 364 net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM\n",
374 printk(KERN_DEBUG "CCMP: decrypt failed: STA=" 365 hdr->addr2);
375 "%pM\n", hdr->addr2);
376 }
377 key->dot11RSNAStatsCCMPDecryptErrors++; 366 key->dot11RSNAStatsCCMPDecryptErrors++;
378 return -5; 367 return -5;
379 } 368 }
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 38734846c19e..d475cfc8568f 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -360,12 +360,9 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
360 struct scatterlist sg; 360 struct scatterlist sg;
361 361
362 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { 362 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
363 if (net_ratelimit()) { 363 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
364 struct ieee80211_hdr *hdr = 364 net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n",
365 (struct ieee80211_hdr *)skb->data; 365 hdr->addr1);
366 printk(KERN_DEBUG ": TKIP countermeasures: dropped "
367 "TX packet to %pM\n", hdr->addr1);
368 }
369 return -1; 366 return -1;
370 } 367 }
371 368
@@ -420,10 +417,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
420 hdr = (struct ieee80211_hdr *)skb->data; 417 hdr = (struct ieee80211_hdr *)skb->data;
421 418
422 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { 419 if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
423 if (net_ratelimit()) { 420 net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n",
424 printk(KERN_DEBUG ": TKIP countermeasures: dropped " 421 hdr->addr2);
425 "received packet from %pM\n", hdr->addr2);
426 }
427 return -1; 422 return -1;
428 } 423 }
429 424
@@ -433,10 +428,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
433 pos = skb->data + hdr_len; 428 pos = skb->data + hdr_len;
434 keyidx = pos[3]; 429 keyidx = pos[3];
435 if (!(keyidx & (1 << 5))) { 430 if (!(keyidx & (1 << 5))) {
436 if (net_ratelimit()) { 431 net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n",
437 printk(KERN_DEBUG "TKIP: received packet without ExtIV" 432 hdr->addr2);
438 " flag from %pM\n", hdr->addr2);
439 }
440 return -2; 433 return -2;
441 } 434 }
442 keyidx >>= 6; 435 keyidx >>= 6;
@@ -446,11 +439,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
446 return -6; 439 return -6;
447 } 440 }
448 if (!tkey->key_set) { 441 if (!tkey->key_set) {
449 if (net_ratelimit()) { 442 net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n",
450 printk(KERN_DEBUG "TKIP: received packet from %pM" 443 hdr->addr2, keyidx);
451 " with keyid=%d that does not have a configured"
452 " key\n", hdr->addr2, keyidx);
453 }
454 return -3; 444 return -3;
455 } 445 }
456 iv16 = (pos[0] << 8) | pos[2]; 446 iv16 = (pos[0] << 8) | pos[2];
@@ -459,12 +449,9 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
459 449
460 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { 450 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
461#ifdef CONFIG_LIB80211_DEBUG 451#ifdef CONFIG_LIB80211_DEBUG
462 if (net_ratelimit()) { 452 net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n",
463 printk(KERN_DEBUG "TKIP: replay detected: STA=%pM" 453 hdr->addr2, tkey->rx_iv32, tkey->rx_iv16,
464 " previous TSC %08x%04x received TSC " 454 iv32, iv16);
465 "%08x%04x\n", hdr->addr2,
466 tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
467 }
468#endif 455#endif
469 tkey->dot11RSNAStatsTKIPReplays++; 456 tkey->dot11RSNAStatsTKIPReplays++;
470 return -4; 457 return -4;
@@ -481,11 +468,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
481 crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); 468 crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
482 sg_init_one(&sg, pos, plen + 4); 469 sg_init_one(&sg, pos, plen + 4);
483 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { 470 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
484 if (net_ratelimit()) { 471 net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n",
485 printk(KERN_DEBUG ": TKIP: failed to decrypt " 472 hdr->addr2);
486 "received packet from %pM\n",
487 hdr->addr2);
488 }
489 return -7; 473 return -7;
490 } 474 }
491 475
@@ -501,10 +485,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
501 tkey->rx_phase1_done = 0; 485 tkey->rx_phase1_done = 0;
502 } 486 }
503#ifdef CONFIG_LIB80211_DEBUG 487#ifdef CONFIG_LIB80211_DEBUG
504 if (net_ratelimit()) { 488 net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n",
505 printk(KERN_DEBUG "TKIP: ICV error detected: STA=" 489 hdr->addr2);
506 "%pM\n", hdr->addr2);
507 }
508#endif 490#endif
509 tkey->dot11RSNAStatsTKIPICVErrors++; 491 tkey->dot11RSNAStatsTKIPICVErrors++;
510 return -5; 492 return -5;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 8c550df13037..2749cb86b462 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -23,6 +23,8 @@
23#define MESH_PERR_MIN_INT 100 23#define MESH_PERR_MIN_INT 100
24#define MESH_DIAM_TRAVERSAL_TIME 50 24#define MESH_DIAM_TRAVERSAL_TIME 50
25 25
26#define MESH_RSSI_THRESHOLD 0
27
26/* 28/*
27 * A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds 29 * A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds
28 * before timing out. This way it will remain ACTIVE and no data frames 30 * before timing out. This way it will remain ACTIVE and no data frames
@@ -36,6 +38,7 @@
36 38
37#define MESH_MAX_PREQ_RETRIES 4 39#define MESH_MAX_PREQ_RETRIES 4
38 40
41#define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50
39 42
40const struct mesh_config default_mesh_config = { 43const struct mesh_config default_mesh_config = {
41 .dot11MeshRetryTimeout = MESH_RET_T, 44 .dot11MeshRetryTimeout = MESH_RET_T,
@@ -46,6 +49,7 @@ const struct mesh_config default_mesh_config = {
46 .element_ttl = MESH_DEFAULT_ELEMENT_TTL, 49 .element_ttl = MESH_DEFAULT_ELEMENT_TTL,
47 .auto_open_plinks = true, 50 .auto_open_plinks = true,
48 .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS, 51 .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
52 .dot11MeshNbrOffsetMaxNeighbor = MESH_SYNC_NEIGHBOR_OFFSET_MAX,
49 .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT, 53 .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
50 .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT, 54 .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
51 .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT, 55 .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT,
@@ -55,9 +59,13 @@ const struct mesh_config default_mesh_config = {
55 .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT, 59 .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
56 .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL, 60 .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL,
57 .dot11MeshGateAnnouncementProtocol = false, 61 .dot11MeshGateAnnouncementProtocol = false,
62 .dot11MeshForwarding = true,
63 .rssi_threshold = MESH_RSSI_THRESHOLD,
64 .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED,
58}; 65};
59 66
60const struct mesh_setup default_mesh_setup = { 67const struct mesh_setup default_mesh_setup = {
68 .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
61 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, 69 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
62 .path_metric = IEEE80211_PATH_METRIC_AIRTIME, 70 .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
63 .ie = NULL, 71 .ie = NULL,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 438dfc105b4a..eb90988bbd36 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/etherdevice.h>
9#include <linux/netdevice.h> 10#include <linux/netdevice.h>
10#include <linux/nl80211.h> 11#include <linux/nl80211.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
@@ -20,40 +21,18 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
20 struct wireless_dev *wdev = dev->ieee80211_ptr; 21 struct wireless_dev *wdev = dev->ieee80211_ptr;
21 struct wiphy *wiphy = wdev->wiphy; 22 struct wiphy *wiphy = wdev->wiphy;
22 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 23 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
23 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
24 u8 *bssid = mgmt->bssid;
25 int i;
26 u16 status = le16_to_cpu(mgmt->u.auth.status_code);
27 bool done = false;
28 24
29 wdev_lock(wdev); 25 wdev_lock(wdev);
30 26
31 for (i = 0; i < MAX_AUTH_BSSES; i++) { 27 nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
32 if (wdev->authtry_bsses[i] && 28 cfg80211_sme_rx_auth(dev, buf, len);
33 memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid,
34 ETH_ALEN) == 0) {
35 if (status == WLAN_STATUS_SUCCESS) {
36 wdev->auth_bsses[i] = wdev->authtry_bsses[i];
37 } else {
38 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
39 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
40 }
41 wdev->authtry_bsses[i] = NULL;
42 done = true;
43 break;
44 }
45 }
46
47 if (done) {
48 nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
49 cfg80211_sme_rx_auth(dev, buf, len);
50 }
51 29
52 wdev_unlock(wdev); 30 wdev_unlock(wdev);
53} 31}
54EXPORT_SYMBOL(cfg80211_send_rx_auth); 32EXPORT_SYMBOL(cfg80211_send_rx_auth);
55 33
56void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) 34void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
35 const u8 *buf, size_t len)
57{ 36{
58 u16 status_code; 37 u16 status_code;
59 struct wireless_dev *wdev = dev->ieee80211_ptr; 38 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -61,8 +40,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
61 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 40 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
62 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 41 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
63 u8 *ie = mgmt->u.assoc_resp.variable; 42 u8 *ie = mgmt->u.assoc_resp.variable;
64 int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); 43 int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
65 struct cfg80211_internal_bss *bss = NULL;
66 44
67 wdev_lock(wdev); 45 wdev_lock(wdev);
68 46
@@ -75,43 +53,20 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
75 * frame instead of reassoc. 53 * frame instead of reassoc.
76 */ 54 */
77 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && 55 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
78 cfg80211_sme_failed_reassoc(wdev)) 56 cfg80211_sme_failed_reassoc(wdev)) {
57 cfg80211_put_bss(bss);
79 goto out; 58 goto out;
59 }
80 60
81 nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); 61 nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL);
82 62
83 if (status_code == WLAN_STATUS_SUCCESS) { 63 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn) {
84 for (i = 0; i < MAX_AUTH_BSSES; i++) {
85 if (!wdev->auth_bsses[i])
86 continue;
87 if (memcmp(wdev->auth_bsses[i]->pub.bssid, mgmt->bssid,
88 ETH_ALEN) == 0) {
89 bss = wdev->auth_bsses[i];
90 wdev->auth_bsses[i] = NULL;
91 /* additional reference to drop hold */
92 cfg80211_ref_bss(bss);
93 break;
94 }
95 }
96
97 /*
98 * We might be coming here because the driver reported
99 * a successful association at the same time as the
100 * user requested a deauth. In that case, we will have
101 * removed the BSS from the auth_bsses list due to the
102 * deauth request when the assoc response makes it. If
103 * the two code paths acquire the lock the other way
104 * around, that's just the standard situation of a
105 * deauth being requested while connected.
106 */
107 if (!bss)
108 goto out;
109 } else if (wdev->conn) {
110 cfg80211_sme_failed_assoc(wdev); 64 cfg80211_sme_failed_assoc(wdev);
111 /* 65 /*
112 * do not call connect_result() now because the 66 * do not call connect_result() now because the
113 * sme will schedule work that does it later. 67 * sme will schedule work that does it later.
114 */ 68 */
69 cfg80211_put_bss(bss);
115 goto out; 70 goto out;
116 } 71 }
117 72
@@ -124,17 +79,10 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
124 wdev->sme_state = CFG80211_SME_CONNECTING; 79 wdev->sme_state = CFG80211_SME_CONNECTING;
125 } 80 }
126 81
127 /* this consumes one bss reference (unless bss is NULL) */ 82 /* this consumes the bss reference */
128 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, 83 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
129 status_code, 84 status_code,
130 status_code == WLAN_STATUS_SUCCESS, 85 status_code == WLAN_STATUS_SUCCESS, bss);
131 bss ? &bss->pub : NULL);
132 /* drop hold now, and also reference acquired above */
133 if (bss) {
134 cfg80211_unhold_bss(bss);
135 cfg80211_put_bss(&bss->pub);
136 }
137
138 out: 86 out:
139 wdev_unlock(wdev); 87 wdev_unlock(wdev);
140} 88}
@@ -148,42 +96,18 @@ void __cfg80211_send_deauth(struct net_device *dev,
148 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 96 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
149 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 97 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
150 const u8 *bssid = mgmt->bssid; 98 const u8 *bssid = mgmt->bssid;
151 int i; 99 bool was_current = false;
152 bool found = false, was_current = false;
153 100
154 ASSERT_WDEV_LOCK(wdev); 101 ASSERT_WDEV_LOCK(wdev);
155 102
156 if (wdev->current_bss && 103 if (wdev->current_bss &&
157 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 104 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
158 cfg80211_unhold_bss(wdev->current_bss); 105 cfg80211_unhold_bss(wdev->current_bss);
159 cfg80211_put_bss(&wdev->current_bss->pub); 106 cfg80211_put_bss(&wdev->current_bss->pub);
160 wdev->current_bss = NULL; 107 wdev->current_bss = NULL;
161 found = true;
162 was_current = true; 108 was_current = true;
163 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
164 if (wdev->auth_bsses[i] &&
165 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
166 cfg80211_unhold_bss(wdev->auth_bsses[i]);
167 cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
168 wdev->auth_bsses[i] = NULL;
169 found = true;
170 break;
171 }
172 if (wdev->authtry_bsses[i] &&
173 memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid,
174 ETH_ALEN) == 0 &&
175 memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) == 0) {
176 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
177 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
178 wdev->authtry_bsses[i] = NULL;
179 found = true;
180 break;
181 }
182 } 109 }
183 110
184 if (!found)
185 return;
186
187 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); 111 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
188 112
189 if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) { 113 if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) {
@@ -192,7 +116,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
192 116
193 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 117 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
194 118
195 from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; 119 from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
196 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); 120 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
197 } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { 121 } else if (wdev->sme_state == CFG80211_SME_CONNECTING) {
198 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, 122 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
@@ -220,10 +144,8 @@ void __cfg80211_send_disassoc(struct net_device *dev,
220 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 144 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
221 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 145 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
222 const u8 *bssid = mgmt->bssid; 146 const u8 *bssid = mgmt->bssid;
223 int i;
224 u16 reason_code; 147 u16 reason_code;
225 bool from_ap; 148 bool from_ap;
226 bool done = false;
227 149
228 ASSERT_WDEV_LOCK(wdev); 150 ASSERT_WDEV_LOCK(wdev);
229 151
@@ -233,24 +155,18 @@ void __cfg80211_send_disassoc(struct net_device *dev,
233 return; 155 return;
234 156
235 if (wdev->current_bss && 157 if (wdev->current_bss &&
236 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 158 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
237 for (i = 0; i < MAX_AUTH_BSSES; i++) { 159 cfg80211_sme_disassoc(dev, wdev->current_bss);
238 if (wdev->authtry_bsses[i] || wdev->auth_bsses[i]) 160 cfg80211_unhold_bss(wdev->current_bss);
239 continue; 161 cfg80211_put_bss(&wdev->current_bss->pub);
240 wdev->auth_bsses[i] = wdev->current_bss; 162 wdev->current_bss = NULL;
241 wdev->current_bss = NULL;
242 done = true;
243 cfg80211_sme_disassoc(dev, i);
244 break;
245 }
246 WARN_ON(!done);
247 } else 163 } else
248 WARN_ON(1); 164 WARN_ON(1);
249 165
250 166
251 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 167 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
252 168
253 from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; 169 from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
254 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); 170 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
255} 171}
256EXPORT_SYMBOL(__cfg80211_send_disassoc); 172EXPORT_SYMBOL(__cfg80211_send_disassoc);
@@ -287,34 +203,6 @@ void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
287} 203}
288EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); 204EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
289 205
290static void __cfg80211_auth_remove(struct wireless_dev *wdev, const u8 *addr)
291{
292 int i;
293 bool done = false;
294
295 ASSERT_WDEV_LOCK(wdev);
296
297 for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
298 if (wdev->authtry_bsses[i] &&
299 memcmp(wdev->authtry_bsses[i]->pub.bssid,
300 addr, ETH_ALEN) == 0) {
301 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
302 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
303 wdev->authtry_bsses[i] = NULL;
304 done = true;
305 break;
306 }
307 }
308
309 WARN_ON(!done);
310}
311
312void __cfg80211_auth_canceled(struct net_device *dev, const u8 *addr)
313{
314 __cfg80211_auth_remove(dev->ieee80211_ptr, addr);
315}
316EXPORT_SYMBOL(__cfg80211_auth_canceled);
317
318void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) 206void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
319{ 207{
320 struct wireless_dev *wdev = dev->ieee80211_ptr; 208 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -329,8 +217,6 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
329 WLAN_STATUS_UNSPECIFIED_FAILURE, 217 WLAN_STATUS_UNSPECIFIED_FAILURE,
330 false, NULL); 218 false, NULL);
331 219
332 __cfg80211_auth_remove(wdev, addr);
333
334 wdev_unlock(wdev); 220 wdev_unlock(wdev);
335} 221}
336EXPORT_SYMBOL(cfg80211_send_auth_timeout); 222EXPORT_SYMBOL(cfg80211_send_auth_timeout);
@@ -340,8 +226,6 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
340 struct wireless_dev *wdev = dev->ieee80211_ptr; 226 struct wireless_dev *wdev = dev->ieee80211_ptr;
341 struct wiphy *wiphy = wdev->wiphy; 227 struct wiphy *wiphy = wdev->wiphy;
342 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 228 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
343 int i;
344 bool done = false;
345 229
346 wdev_lock(wdev); 230 wdev_lock(wdev);
347 231
@@ -351,20 +235,6 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
351 WLAN_STATUS_UNSPECIFIED_FAILURE, 235 WLAN_STATUS_UNSPECIFIED_FAILURE,
352 false, NULL); 236 false, NULL);
353 237
354 for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
355 if (wdev->auth_bsses[i] &&
356 memcmp(wdev->auth_bsses[i]->pub.bssid,
357 addr, ETH_ALEN) == 0) {
358 cfg80211_unhold_bss(wdev->auth_bsses[i]);
359 cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
360 wdev->auth_bsses[i] = NULL;
361 done = true;
362 break;
363 }
364 }
365
366 WARN_ON(!done);
367
368 wdev_unlock(wdev); 238 wdev_unlock(wdev);
369} 239}
370EXPORT_SYMBOL(cfg80211_send_assoc_timeout); 240EXPORT_SYMBOL(cfg80211_send_assoc_timeout);
@@ -403,13 +273,11 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
403 const u8 *bssid, 273 const u8 *bssid,
404 const u8 *ssid, int ssid_len, 274 const u8 *ssid, int ssid_len,
405 const u8 *ie, int ie_len, 275 const u8 *ie, int ie_len,
406 const u8 *key, int key_len, int key_idx, 276 const u8 *key, int key_len, int key_idx)
407 bool local_state_change)
408{ 277{
409 struct wireless_dev *wdev = dev->ieee80211_ptr; 278 struct wireless_dev *wdev = dev->ieee80211_ptr;
410 struct cfg80211_auth_request req; 279 struct cfg80211_auth_request req;
411 struct cfg80211_internal_bss *bss; 280 int err;
412 int i, err, slot = -1, nfree = 0;
413 281
414 ASSERT_WDEV_LOCK(wdev); 282 ASSERT_WDEV_LOCK(wdev);
415 283
@@ -418,23 +286,11 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
418 return -EINVAL; 286 return -EINVAL;
419 287
420 if (wdev->current_bss && 288 if (wdev->current_bss &&
421 memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0) 289 ether_addr_equal(bssid, wdev->current_bss->pub.bssid))
422 return -EALREADY; 290 return -EALREADY;
423 291
424 for (i = 0; i < MAX_AUTH_BSSES; i++) {
425 if (wdev->authtry_bsses[i] &&
426 memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid,
427 ETH_ALEN) == 0)
428 return -EALREADY;
429 if (wdev->auth_bsses[i] &&
430 memcmp(bssid, wdev->auth_bsses[i]->pub.bssid,
431 ETH_ALEN) == 0)
432 return -EALREADY;
433 }
434
435 memset(&req, 0, sizeof(req)); 292 memset(&req, 0, sizeof(req));
436 293
437 req.local_state_change = local_state_change;
438 req.ie = ie; 294 req.ie = ie;
439 req.ie_len = ie_len; 295 req.ie_len = ie_len;
440 req.auth_type = auth_type; 296 req.auth_type = auth_type;
@@ -446,39 +302,9 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
446 if (!req.bss) 302 if (!req.bss)
447 return -ENOENT; 303 return -ENOENT;
448 304
449 bss = bss_from_pub(req.bss);
450
451 for (i = 0; i < MAX_AUTH_BSSES; i++) {
452 if (!wdev->auth_bsses[i] && !wdev->authtry_bsses[i]) {
453 slot = i;
454 nfree++;
455 }
456 }
457
458 /* we need one free slot for disassoc and one for this auth */
459 if (nfree < 2) {
460 err = -ENOSPC;
461 goto out;
462 }
463
464 if (local_state_change)
465 wdev->auth_bsses[slot] = bss;
466 else
467 wdev->authtry_bsses[slot] = bss;
468 cfg80211_hold_bss(bss);
469
470 err = rdev->ops->auth(&rdev->wiphy, dev, &req); 305 err = rdev->ops->auth(&rdev->wiphy, dev, &req);
471 if (err) {
472 if (local_state_change)
473 wdev->auth_bsses[slot] = NULL;
474 else
475 wdev->authtry_bsses[slot] = NULL;
476 cfg80211_unhold_bss(bss);
477 }
478 306
479 out: 307 cfg80211_put_bss(req.bss);
480 if (err)
481 cfg80211_put_bss(req.bss);
482 return err; 308 return err;
483} 309}
484 310
@@ -487,15 +313,14 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
487 enum nl80211_auth_type auth_type, const u8 *bssid, 313 enum nl80211_auth_type auth_type, const u8 *bssid,
488 const u8 *ssid, int ssid_len, 314 const u8 *ssid, int ssid_len,
489 const u8 *ie, int ie_len, 315 const u8 *ie, int ie_len,
490 const u8 *key, int key_len, int key_idx, 316 const u8 *key, int key_len, int key_idx)
491 bool local_state_change)
492{ 317{
493 int err; 318 int err;
494 319
495 wdev_lock(dev->ieee80211_ptr); 320 wdev_lock(dev->ieee80211_ptr);
496 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 321 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
497 ssid, ssid_len, ie, ie_len, 322 ssid, ssid_len, ie, ie_len,
498 key, key_len, key_idx, local_state_change); 323 key, key_len, key_idx);
499 wdev_unlock(dev->ieee80211_ptr); 324 wdev_unlock(dev->ieee80211_ptr);
500 325
501 return err; 326 return err;
@@ -530,8 +355,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
530{ 355{
531 struct wireless_dev *wdev = dev->ieee80211_ptr; 356 struct wireless_dev *wdev = dev->ieee80211_ptr;
532 struct cfg80211_assoc_request req; 357 struct cfg80211_assoc_request req;
533 struct cfg80211_internal_bss *bss; 358 int err;
534 int i, err, slot = -1;
535 bool was_connected = false; 359 bool was_connected = false;
536 360
537 ASSERT_WDEV_LOCK(wdev); 361 ASSERT_WDEV_LOCK(wdev);
@@ -539,7 +363,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
539 memset(&req, 0, sizeof(req)); 363 memset(&req, 0, sizeof(req));
540 364
541 if (wdev->current_bss && prev_bssid && 365 if (wdev->current_bss && prev_bssid &&
542 memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) { 366 ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) {
543 /* 367 /*
544 * Trying to reassociate: Allow this to proceed and let the old 368 * Trying to reassociate: Allow this to proceed and let the old
545 * association to be dropped when the new one is completed. 369 * association to be dropped when the new one is completed.
@@ -573,26 +397,14 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
573 return -ENOENT; 397 return -ENOENT;
574 } 398 }
575 399
576 bss = bss_from_pub(req.bss); 400 err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
577
578 for (i = 0; i < MAX_AUTH_BSSES; i++) {
579 if (bss == wdev->auth_bsses[i]) {
580 slot = i;
581 break;
582 }
583 }
584 401
585 if (slot < 0) { 402 if (err) {
586 err = -ENOTCONN; 403 if (was_connected)
587 goto out; 404 wdev->sme_state = CFG80211_SME_CONNECTED;
405 cfg80211_put_bss(req.bss);
588 } 406 }
589 407
590 err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
591 out:
592 if (err && was_connected)
593 wdev->sme_state = CFG80211_SME_CONNECTED;
594 /* still a reference in wdev->auth_bsses[slot] */
595 cfg80211_put_bss(req.bss);
596 return err; 408 return err;
597} 409}
598 410
@@ -624,36 +436,27 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
624 bool local_state_change) 436 bool local_state_change)
625{ 437{
626 struct wireless_dev *wdev = dev->ieee80211_ptr; 438 struct wireless_dev *wdev = dev->ieee80211_ptr;
627 struct cfg80211_deauth_request req; 439 struct cfg80211_deauth_request req = {
628 int i; 440 .bssid = bssid,
441 .reason_code = reason,
442 .ie = ie,
443 .ie_len = ie_len,
444 };
629 445
630 ASSERT_WDEV_LOCK(wdev); 446 ASSERT_WDEV_LOCK(wdev);
631 447
632 memset(&req, 0, sizeof(req)); 448 if (local_state_change) {
633 req.reason_code = reason; 449 if (wdev->current_bss &&
634 req.local_state_change = local_state_change; 450 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
635 req.ie = ie; 451 cfg80211_unhold_bss(wdev->current_bss);
636 req.ie_len = ie_len; 452 cfg80211_put_bss(&wdev->current_bss->pub);
637 if (wdev->current_bss && 453 wdev->current_bss = NULL;
638 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
639 req.bss = &wdev->current_bss->pub;
640 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
641 if (wdev->auth_bsses[i] &&
642 memcmp(bssid, wdev->auth_bsses[i]->pub.bssid, ETH_ALEN) == 0) {
643 req.bss = &wdev->auth_bsses[i]->pub;
644 break;
645 }
646 if (wdev->authtry_bsses[i] &&
647 memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid, ETH_ALEN) == 0) {
648 req.bss = &wdev->authtry_bsses[i]->pub;
649 break;
650 } 454 }
651 }
652 455
653 if (!req.bss) 456 return 0;
654 return -ENOTCONN; 457 }
655 458
656 return rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); 459 return rdev->ops->deauth(&rdev->wiphy, dev, &req);
657} 460}
658 461
659int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 462int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
@@ -693,12 +496,12 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
693 req.local_state_change = local_state_change; 496 req.local_state_change = local_state_change;
694 req.ie = ie; 497 req.ie = ie;
695 req.ie_len = ie_len; 498 req.ie_len = ie_len;
696 if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) 499 if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
697 req.bss = &wdev->current_bss->pub; 500 req.bss = &wdev->current_bss->pub;
698 else 501 else
699 return -ENOTCONN; 502 return -ENOTCONN;
700 503
701 return rdev->ops->disassoc(&rdev->wiphy, dev, &req, wdev); 504 return rdev->ops->disassoc(&rdev->wiphy, dev, &req);
702} 505}
703 506
704int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 507int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
@@ -722,7 +525,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
722{ 525{
723 struct wireless_dev *wdev = dev->ieee80211_ptr; 526 struct wireless_dev *wdev = dev->ieee80211_ptr;
724 struct cfg80211_deauth_request req; 527 struct cfg80211_deauth_request req;
725 int i; 528 u8 bssid[ETH_ALEN];
726 529
727 ASSERT_WDEV_LOCK(wdev); 530 ASSERT_WDEV_LOCK(wdev);
728 531
@@ -734,35 +537,17 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
734 req.ie = NULL; 537 req.ie = NULL;
735 req.ie_len = 0; 538 req.ie_len = 0;
736 539
737 if (wdev->current_bss) { 540 if (!wdev->current_bss)
738 req.bss = &wdev->current_bss->pub; 541 return;
739 rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
740 if (wdev->current_bss) {
741 cfg80211_unhold_bss(wdev->current_bss);
742 cfg80211_put_bss(&wdev->current_bss->pub);
743 wdev->current_bss = NULL;
744 }
745 }
746 542
747 for (i = 0; i < MAX_AUTH_BSSES; i++) { 543 memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
748 if (wdev->auth_bsses[i]) { 544 req.bssid = bssid;
749 req.bss = &wdev->auth_bsses[i]->pub; 545 rdev->ops->deauth(&rdev->wiphy, dev, &req);
750 rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); 546
751 if (wdev->auth_bsses[i]) { 547 if (wdev->current_bss) {
752 cfg80211_unhold_bss(wdev->auth_bsses[i]); 548 cfg80211_unhold_bss(wdev->current_bss);
753 cfg80211_put_bss(&wdev->auth_bsses[i]->pub); 549 cfg80211_put_bss(&wdev->current_bss->pub);
754 wdev->auth_bsses[i] = NULL; 550 wdev->current_bss = NULL;
755 }
756 }
757 if (wdev->authtry_bsses[i]) {
758 req.bss = &wdev->authtry_bsses[i]->pub;
759 rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
760 if (wdev->authtry_bsses[i]) {
761 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
762 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
763 wdev->authtry_bsses[i] = NULL;
764 }
765 }
766 } 551 }
767} 552}
768 553
@@ -974,8 +759,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
974 break; 759 break;
975 } 760 }
976 761
977 if (memcmp(wdev->current_bss->pub.bssid, 762 if (!ether_addr_equal(wdev->current_bss->pub.bssid,
978 mgmt->bssid, ETH_ALEN)) { 763 mgmt->bssid)) {
979 err = -ENOTCONN; 764 err = -ENOTCONN;
980 break; 765 break;
981 } 766 }
@@ -988,8 +773,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
988 break; 773 break;
989 774
990 /* for station, check that DA is the AP */ 775 /* for station, check that DA is the AP */
991 if (memcmp(wdev->current_bss->pub.bssid, 776 if (!ether_addr_equal(wdev->current_bss->pub.bssid,
992 mgmt->da, ETH_ALEN)) { 777 mgmt->da)) {
993 err = -ENOTCONN; 778 err = -ENOTCONN;
994 break; 779 break;
995 } 780 }
@@ -997,11 +782,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
997 case NL80211_IFTYPE_AP: 782 case NL80211_IFTYPE_AP:
998 case NL80211_IFTYPE_P2P_GO: 783 case NL80211_IFTYPE_P2P_GO:
999 case NL80211_IFTYPE_AP_VLAN: 784 case NL80211_IFTYPE_AP_VLAN:
1000 if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN)) 785 if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
1001 err = -EINVAL; 786 err = -EINVAL;
1002 break; 787 break;
1003 case NL80211_IFTYPE_MESH_POINT: 788 case NL80211_IFTYPE_MESH_POINT:
1004 if (memcmp(mgmt->sa, mgmt->bssid, ETH_ALEN)) { 789 if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) {
1005 err = -EINVAL; 790 err = -EINVAL;
1006 break; 791 break;
1007 } 792 }
@@ -1020,7 +805,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
1020 return err; 805 return err;
1021 } 806 }
1022 807
1023 if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) 808 if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
1024 return -EINVAL; 809 return -EINVAL;
1025 810
1026 /* Transmit the Action frame as requested by user space */ 811 /* Transmit the Action frame as requested by user space */
@@ -1030,8 +815,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
1030 cookie); 815 cookie);
1031} 816}
1032 817
1033bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf, 818bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
1034 size_t len, gfp_t gfp) 819 const u8 *buf, size_t len, gfp_t gfp)
1035{ 820{
1036 struct wireless_dev *wdev = dev->ieee80211_ptr; 821 struct wireless_dev *wdev = dev->ieee80211_ptr;
1037 struct wiphy *wiphy = wdev->wiphy; 822 struct wiphy *wiphy = wdev->wiphy;
@@ -1070,7 +855,8 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
1070 /* found match! */ 855 /* found match! */
1071 856
1072 /* Indicate the received Action frame to user space */ 857 /* Indicate the received Action frame to user space */
1073 if (nl80211_send_mgmt(rdev, dev, reg->nlpid, freq, 858 if (nl80211_send_mgmt(rdev, dev, reg->nlpid,
859 freq, sig_mbm,
1074 buf, len, gfp)) 860 buf, len, gfp))
1075 continue; 861 continue;
1076 862
@@ -1143,6 +929,33 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
1143} 929}
1144EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); 930EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
1145 931
932void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
933 enum nl80211_channel_type type)
934{
935 struct wireless_dev *wdev = dev->ieee80211_ptr;
936 struct wiphy *wiphy = wdev->wiphy;
937 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
938 struct ieee80211_channel *chan;
939
940 wdev_lock(wdev);
941
942 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
943 wdev->iftype != NL80211_IFTYPE_P2P_GO))
944 goto out;
945
946 chan = rdev_freq_to_chan(rdev, freq, type);
947 if (WARN_ON(!chan))
948 goto out;
949
950 wdev->channel = chan;
951
952 nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
953out:
954 wdev_unlock(wdev);
955 return;
956}
957EXPORT_SYMBOL(cfg80211_ch_switch_notify);
958
1146bool cfg80211_rx_spurious_frame(struct net_device *dev, 959bool cfg80211_rx_spurious_frame(struct net_device *dev,
1147 const u8 *addr, gfp_t gfp) 960 const u8 *addr, gfp_t gfp)
1148{ 961{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index afeea32e04ad..206465dc0cab 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -204,6 +204,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
204 .len = NL80211_HT_CAPABILITY_LEN 204 .len = NL80211_HT_CAPABILITY_LEN
205 }, 205 },
206 [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 }, 206 [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
207 [NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 },
208 [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
207}; 209};
208 210
209/* policy for the key attributes */ 211/* policy for the key attributes */
@@ -354,20 +356,26 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
354static int nl80211_msg_put_channel(struct sk_buff *msg, 356static int nl80211_msg_put_channel(struct sk_buff *msg,
355 struct ieee80211_channel *chan) 357 struct ieee80211_channel *chan)
356{ 358{
357 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ, 359 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
358 chan->center_freq); 360 chan->center_freq))
361 goto nla_put_failure;
359 362
360 if (chan->flags & IEEE80211_CHAN_DISABLED) 363 if ((chan->flags & IEEE80211_CHAN_DISABLED) &&
361 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED); 364 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED))
362 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) 365 goto nla_put_failure;
363 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN); 366 if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
364 if (chan->flags & IEEE80211_CHAN_NO_IBSS) 367 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN))
365 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS); 368 goto nla_put_failure;
366 if (chan->flags & IEEE80211_CHAN_RADAR) 369 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
367 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR); 370 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
371 goto nla_put_failure;
372 if ((chan->flags & IEEE80211_CHAN_RADAR) &&
373 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
374 goto nla_put_failure;
368 375
369 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 376 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
370 DBM_TO_MBM(chan->max_power)); 377 DBM_TO_MBM(chan->max_power)))
378 goto nla_put_failure;
371 379
372 return 0; 380 return 0;
373 381
@@ -427,10 +435,9 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
427 435
428 if (tb[NL80211_KEY_DEFAULT_TYPES]) { 436 if (tb[NL80211_KEY_DEFAULT_TYPES]) {
429 struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; 437 struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
430 int err = nla_parse_nested(kdt, 438 err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
431 NUM_NL80211_KEY_DEFAULT_TYPES - 1, 439 tb[NL80211_KEY_DEFAULT_TYPES],
432 tb[NL80211_KEY_DEFAULT_TYPES], 440 nl80211_key_default_policy);
433 nl80211_key_default_policy);
434 if (err) 441 if (err)
435 return err; 442 return err;
436 443
@@ -620,8 +627,8 @@ static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
620 627
621 i = 0; 628 i = 0;
622 while (ifmodes) { 629 while (ifmodes) {
623 if (ifmodes & 1) 630 if ((ifmodes & 1) && nla_put_flag(msg, i))
624 NLA_PUT_FLAG(msg, i); 631 goto nla_put_failure;
625 ifmodes >>= 1; 632 ifmodes >>= 1;
626 i++; 633 i++;
627 } 634 }
@@ -664,8 +671,9 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
664 nl_limit = nla_nest_start(msg, j + 1); 671 nl_limit = nla_nest_start(msg, j + 1);
665 if (!nl_limit) 672 if (!nl_limit)
666 goto nla_put_failure; 673 goto nla_put_failure;
667 NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX, 674 if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX,
668 c->limits[j].max); 675 c->limits[j].max))
676 goto nla_put_failure;
669 if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, 677 if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
670 c->limits[j].types)) 678 c->limits[j].types))
671 goto nla_put_failure; 679 goto nla_put_failure;
@@ -674,13 +682,14 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
674 682
675 nla_nest_end(msg, nl_limits); 683 nla_nest_end(msg, nl_limits);
676 684
677 if (c->beacon_int_infra_match) 685 if (c->beacon_int_infra_match &&
678 NLA_PUT_FLAG(msg, 686 nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
679 NL80211_IFACE_COMB_STA_AP_BI_MATCH); 687 goto nla_put_failure;
680 NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, 688 if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
681 c->num_different_channels); 689 c->num_different_channels) ||
682 NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM, 690 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
683 c->max_interfaces); 691 c->max_interfaces))
692 goto nla_put_failure;
684 693
685 nla_nest_end(msg, nl_combi); 694 nla_nest_end(msg, nl_combi);
686 } 695 }
@@ -711,64 +720,74 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
711 if (!hdr) 720 if (!hdr)
712 return -1; 721 return -1;
713 722
714 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); 723 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
715 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 724 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) ||
716 725 nla_put_u32(msg, NL80211_ATTR_GENERATION,
717 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, 726 cfg80211_rdev_list_generation) ||
718 cfg80211_rdev_list_generation); 727 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
719 728 dev->wiphy.retry_short) ||
720 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, 729 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
721 dev->wiphy.retry_short); 730 dev->wiphy.retry_long) ||
722 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, 731 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
723 dev->wiphy.retry_long); 732 dev->wiphy.frag_threshold) ||
724 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, 733 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
725 dev->wiphy.frag_threshold); 734 dev->wiphy.rts_threshold) ||
726 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, 735 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
727 dev->wiphy.rts_threshold); 736 dev->wiphy.coverage_class) ||
728 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, 737 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
729 dev->wiphy.coverage_class); 738 dev->wiphy.max_scan_ssids) ||
730 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 739 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
731 dev->wiphy.max_scan_ssids); 740 dev->wiphy.max_sched_scan_ssids) ||
732 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, 741 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
733 dev->wiphy.max_sched_scan_ssids); 742 dev->wiphy.max_scan_ie_len) ||
734 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, 743 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
735 dev->wiphy.max_scan_ie_len); 744 dev->wiphy.max_sched_scan_ie_len) ||
736 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, 745 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
737 dev->wiphy.max_sched_scan_ie_len); 746 dev->wiphy.max_match_sets))
738 NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS, 747 goto nla_put_failure;
739 dev->wiphy.max_match_sets); 748
740 749 if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
741 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) 750 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
742 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); 751 goto nla_put_failure;
743 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) 752 if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
744 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH); 753 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
745 if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) 754 goto nla_put_failure;
746 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD); 755 if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
747 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) 756 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
748 NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT); 757 goto nla_put_failure;
749 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) 758 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
750 NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT); 759 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
751 if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) 760 goto nla_put_failure;
752 NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP); 761 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
753 762 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
754 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, 763 goto nla_put_failure;
755 sizeof(u32) * dev->wiphy.n_cipher_suites, 764 if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
756 dev->wiphy.cipher_suites); 765 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
757 766 goto nla_put_failure;
758 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, 767
759 dev->wiphy.max_num_pmkids); 768 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
760 769 sizeof(u32) * dev->wiphy.n_cipher_suites,
761 if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) 770 dev->wiphy.cipher_suites))
762 NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE); 771 goto nla_put_failure;
763 772
764 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, 773 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
765 dev->wiphy.available_antennas_tx); 774 dev->wiphy.max_num_pmkids))
766 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, 775 goto nla_put_failure;
767 dev->wiphy.available_antennas_rx); 776
768 777 if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
769 if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) 778 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
770 NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, 779 goto nla_put_failure;
771 dev->wiphy.probe_resp_offload); 780
781 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
782 dev->wiphy.available_antennas_tx) ||
783 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
784 dev->wiphy.available_antennas_rx))
785 goto nla_put_failure;
786
787 if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
788 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
789 dev->wiphy.probe_resp_offload))
790 goto nla_put_failure;
772 791
773 if ((dev->wiphy.available_antennas_tx || 792 if ((dev->wiphy.available_antennas_tx ||
774 dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { 793 dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
@@ -776,8 +795,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
776 int res; 795 int res;
777 res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant); 796 res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
778 if (!res) { 797 if (!res) {
779 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant); 798 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX,
780 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant); 799 tx_ant) ||
800 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX,
801 rx_ant))
802 goto nla_put_failure;
781 } 803 }
782 } 804 }
783 805
@@ -798,17 +820,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
798 goto nla_put_failure; 820 goto nla_put_failure;
799 821
800 /* add HT info */ 822 /* add HT info */
801 if (dev->wiphy.bands[band]->ht_cap.ht_supported) { 823 if (dev->wiphy.bands[band]->ht_cap.ht_supported &&
802 NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET, 824 (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET,
803 sizeof(dev->wiphy.bands[band]->ht_cap.mcs), 825 sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
804 &dev->wiphy.bands[band]->ht_cap.mcs); 826 &dev->wiphy.bands[band]->ht_cap.mcs) ||
805 NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA, 827 nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA,
806 dev->wiphy.bands[band]->ht_cap.cap); 828 dev->wiphy.bands[band]->ht_cap.cap) ||
807 NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, 829 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
808 dev->wiphy.bands[band]->ht_cap.ampdu_factor); 830 dev->wiphy.bands[band]->ht_cap.ampdu_factor) ||
809 NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, 831 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
810 dev->wiphy.bands[band]->ht_cap.ampdu_density); 832 dev->wiphy.bands[band]->ht_cap.ampdu_density)))
811 } 833 goto nla_put_failure;
812 834
813 /* add frequencies */ 835 /* add frequencies */
814 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); 836 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
@@ -841,11 +863,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
841 goto nla_put_failure; 863 goto nla_put_failure;
842 864
843 rate = &dev->wiphy.bands[band]->bitrates[i]; 865 rate = &dev->wiphy.bands[band]->bitrates[i];
844 NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE, 866 if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE,
845 rate->bitrate); 867 rate->bitrate))
846 if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) 868 goto nla_put_failure;
847 NLA_PUT_FLAG(msg, 869 if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
848 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE); 870 nla_put_flag(msg,
871 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
872 goto nla_put_failure;
849 873
850 nla_nest_end(msg, nl_rate); 874 nla_nest_end(msg, nl_rate);
851 } 875 }
@@ -865,14 +889,15 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
865 do { \ 889 do { \
866 if (dev->ops->op) { \ 890 if (dev->ops->op) { \
867 i++; \ 891 i++; \
868 NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \ 892 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
893 goto nla_put_failure; \
869 } \ 894 } \
870 } while (0) 895 } while (0)
871 896
872 CMD(add_virtual_intf, NEW_INTERFACE); 897 CMD(add_virtual_intf, NEW_INTERFACE);
873 CMD(change_virtual_intf, SET_INTERFACE); 898 CMD(change_virtual_intf, SET_INTERFACE);
874 CMD(add_key, NEW_KEY); 899 CMD(add_key, NEW_KEY);
875 CMD(add_beacon, NEW_BEACON); 900 CMD(start_ap, START_AP);
876 CMD(add_station, NEW_STATION); 901 CMD(add_station, NEW_STATION);
877 CMD(add_mpath, NEW_MPATH); 902 CMD(add_mpath, NEW_MPATH);
878 CMD(update_mesh_config, SET_MESH_CONFIG); 903 CMD(update_mesh_config, SET_MESH_CONFIG);
@@ -893,7 +918,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
893 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); 918 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
894 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 919 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
895 i++; 920 i++;
896 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 921 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
922 goto nla_put_failure;
897 } 923 }
898 CMD(set_channel, SET_CHANNEL); 924 CMD(set_channel, SET_CHANNEL);
899 CMD(set_wds_peer, SET_WDS_PEER); 925 CMD(set_wds_peer, SET_WDS_PEER);
@@ -907,7 +933,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
907 CMD(set_noack_map, SET_NOACK_MAP); 933 CMD(set_noack_map, SET_NOACK_MAP);
908 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { 934 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
909 i++; 935 i++;
910 NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS); 936 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
937 goto nla_put_failure;
911 } 938 }
912 939
913#ifdef CONFIG_NL80211_TESTMODE 940#ifdef CONFIG_NL80211_TESTMODE
@@ -918,23 +945,27 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
918 945
919 if (dev->ops->connect || dev->ops->auth) { 946 if (dev->ops->connect || dev->ops->auth) {
920 i++; 947 i++;
921 NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT); 948 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
949 goto nla_put_failure;
922 } 950 }
923 951
924 if (dev->ops->disconnect || dev->ops->deauth) { 952 if (dev->ops->disconnect || dev->ops->deauth) {
925 i++; 953 i++;
926 NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT); 954 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
955 goto nla_put_failure;
927 } 956 }
928 957
929 nla_nest_end(msg, nl_cmds); 958 nla_nest_end(msg, nl_cmds);
930 959
931 if (dev->ops->remain_on_channel && 960 if (dev->ops->remain_on_channel &&
932 dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) 961 (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
933 NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 962 nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
934 dev->wiphy.max_remain_on_channel_duration); 963 dev->wiphy.max_remain_on_channel_duration))
964 goto nla_put_failure;
935 965
936 if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) 966 if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
937 NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); 967 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
968 goto nla_put_failure;
938 969
939 if (mgmt_stypes) { 970 if (mgmt_stypes) {
940 u16 stypes; 971 u16 stypes;
@@ -952,9 +983,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
952 i = 0; 983 i = 0;
953 stypes = mgmt_stypes[ift].tx; 984 stypes = mgmt_stypes[ift].tx;
954 while (stypes) { 985 while (stypes) {
955 if (stypes & 1) 986 if ((stypes & 1) &&
956 NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, 987 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
957 (i << 4) | IEEE80211_FTYPE_MGMT); 988 (i << 4) | IEEE80211_FTYPE_MGMT))
989 goto nla_put_failure;
958 stypes >>= 1; 990 stypes >>= 1;
959 i++; 991 i++;
960 } 992 }
@@ -974,9 +1006,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
974 i = 0; 1006 i = 0;
975 stypes = mgmt_stypes[ift].rx; 1007 stypes = mgmt_stypes[ift].rx;
976 while (stypes) { 1008 while (stypes) {
977 if (stypes & 1) 1009 if ((stypes & 1) &&
978 NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, 1010 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
979 (i << 4) | IEEE80211_FTYPE_MGMT); 1011 (i << 4) | IEEE80211_FTYPE_MGMT))
1012 goto nla_put_failure;
980 stypes >>= 1; 1013 stypes >>= 1;
981 i++; 1014 i++;
982 } 1015 }
@@ -993,22 +1026,23 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
993 if (!nl_wowlan) 1026 if (!nl_wowlan)
994 goto nla_put_failure; 1027 goto nla_put_failure;
995 1028
996 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) 1029 if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
997 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); 1030 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
998 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) 1031 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
999 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); 1032 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
1000 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) 1033 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
1001 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); 1034 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
1002 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) 1035 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
1003 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED); 1036 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
1004 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) 1037 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
1005 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); 1038 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
1006 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) 1039 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
1007 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); 1040 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
1008 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) 1041 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
1009 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); 1042 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
1010 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) 1043 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
1011 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); 1044 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
1045 goto nla_put_failure;
1012 if (dev->wiphy.wowlan.n_patterns) { 1046 if (dev->wiphy.wowlan.n_patterns) {
1013 struct nl80211_wowlan_pattern_support pat = { 1047 struct nl80211_wowlan_pattern_support pat = {
1014 .max_patterns = dev->wiphy.wowlan.n_patterns, 1048 .max_patterns = dev->wiphy.wowlan.n_patterns,
@@ -1017,8 +1051,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1017 .max_pattern_len = 1051 .max_pattern_len =
1018 dev->wiphy.wowlan.pattern_max_len, 1052 dev->wiphy.wowlan.pattern_max_len,
1019 }; 1053 };
1020 NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, 1054 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
1021 sizeof(pat), &pat); 1055 sizeof(pat), &pat))
1056 goto nla_put_failure;
1022 } 1057 }
1023 1058
1024 nla_nest_end(msg, nl_wowlan); 1059 nla_nest_end(msg, nl_wowlan);
@@ -1031,16 +1066,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1031 if (nl80211_put_iface_combinations(&dev->wiphy, msg)) 1066 if (nl80211_put_iface_combinations(&dev->wiphy, msg))
1032 goto nla_put_failure; 1067 goto nla_put_failure;
1033 1068
1034 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) 1069 if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
1035 NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME, 1070 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
1036 dev->wiphy.ap_sme_capa); 1071 dev->wiphy.ap_sme_capa))
1072 goto nla_put_failure;
1037 1073
1038 NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features); 1074 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS,
1075 dev->wiphy.features))
1076 goto nla_put_failure;
1039 1077
1040 if (dev->wiphy.ht_capa_mod_mask) 1078 if (dev->wiphy.ht_capa_mod_mask &&
1041 NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK, 1079 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
1042 sizeof(*dev->wiphy.ht_capa_mod_mask), 1080 sizeof(*dev->wiphy.ht_capa_mod_mask),
1043 dev->wiphy.ht_capa_mod_mask); 1081 dev->wiphy.ht_capa_mod_mask))
1082 goto nla_put_failure;
1044 1083
1045 return genlmsg_end(msg, hdr); 1084 return genlmsg_end(msg, hdr);
1046 1085
@@ -1103,17 +1142,20 @@ static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = {
1103static int parse_txq_params(struct nlattr *tb[], 1142static int parse_txq_params(struct nlattr *tb[],
1104 struct ieee80211_txq_params *txq_params) 1143 struct ieee80211_txq_params *txq_params)
1105{ 1144{
1106 if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] || 1145 if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] ||
1107 !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || 1146 !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] ||
1108 !tb[NL80211_TXQ_ATTR_AIFS]) 1147 !tb[NL80211_TXQ_ATTR_AIFS])
1109 return -EINVAL; 1148 return -EINVAL;
1110 1149
1111 txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]); 1150 txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
1112 txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); 1151 txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]);
1113 txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); 1152 txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]);
1114 txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); 1153 txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]);
1115 txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); 1154 txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]);
1116 1155
1156 if (txq_params->ac >= NL80211_NUM_ACS)
1157 return -EINVAL;
1158
1117 return 0; 1159 return 0;
1118} 1160}
1119 1161
@@ -1137,6 +1179,27 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
1137 wdev->iftype == NL80211_IFTYPE_P2P_GO; 1179 wdev->iftype == NL80211_IFTYPE_P2P_GO;
1138} 1180}
1139 1181
1182static bool nl80211_valid_channel_type(struct genl_info *info,
1183 enum nl80211_channel_type *channel_type)
1184{
1185 enum nl80211_channel_type tmp;
1186
1187 if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE])
1188 return false;
1189
1190 tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
1191 if (tmp != NL80211_CHAN_NO_HT &&
1192 tmp != NL80211_CHAN_HT20 &&
1193 tmp != NL80211_CHAN_HT40PLUS &&
1194 tmp != NL80211_CHAN_HT40MINUS)
1195 return false;
1196
1197 if (channel_type)
1198 *channel_type = tmp;
1199
1200 return true;
1201}
1202
1140static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, 1203static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1141 struct wireless_dev *wdev, 1204 struct wireless_dev *wdev,
1142 struct genl_info *info) 1205 struct genl_info *info)
@@ -1151,15 +1214,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1151 if (!nl80211_can_set_dev_channel(wdev)) 1214 if (!nl80211_can_set_dev_channel(wdev))
1152 return -EOPNOTSUPP; 1215 return -EOPNOTSUPP;
1153 1216
1154 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 1217 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
1155 channel_type = nla_get_u32(info->attrs[ 1218 !nl80211_valid_channel_type(info, &channel_type))
1156 NL80211_ATTR_WIPHY_CHANNEL_TYPE]); 1219 return -EINVAL;
1157 if (channel_type != NL80211_CHAN_NO_HT &&
1158 channel_type != NL80211_CHAN_HT20 &&
1159 channel_type != NL80211_CHAN_HT40PLUS &&
1160 channel_type != NL80211_CHAN_HT40MINUS)
1161 return -EINVAL;
1162 }
1163 1220
1164 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 1221 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
1165 1222
@@ -1293,6 +1350,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1293 goto bad_res; 1350 goto bad_res;
1294 } 1351 }
1295 1352
1353 if (!netif_running(netdev)) {
1354 result = -ENETDOWN;
1355 goto bad_res;
1356 }
1357
1296 nla_for_each_nested(nl_txq_params, 1358 nla_for_each_nested(nl_txq_params,
1297 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], 1359 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
1298 rem_txq_params) { 1360 rem_txq_params) {
@@ -1483,14 +1545,28 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1483 if (!hdr) 1545 if (!hdr)
1484 return -1; 1546 return -1;
1485 1547
1486 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 1548 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
1487 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 1549 nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
1488 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); 1550 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
1489 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); 1551 nla_put_u32(msg, NL80211_ATTR_IFTYPE,
1552 dev->ieee80211_ptr->iftype) ||
1553 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1554 rdev->devlist_generation ^
1555 (cfg80211_rdev_list_generation << 2)))
1556 goto nla_put_failure;
1490 1557
1491 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, 1558 if (rdev->ops->get_channel) {
1492 rdev->devlist_generation ^ 1559 struct ieee80211_channel *chan;
1493 (cfg80211_rdev_list_generation << 2)); 1560 enum nl80211_channel_type channel_type;
1561
1562 chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type);
1563 if (chan &&
1564 (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
1565 chan->center_freq) ||
1566 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
1567 channel_type)))
1568 goto nla_put_failure;
1569 }
1494 1570
1495 return genlmsg_end(msg, hdr); 1571 return genlmsg_end(msg, hdr);
1496 1572
@@ -1788,35 +1864,34 @@ static void get_key_callback(void *c, struct key_params *params)
1788 struct nlattr *key; 1864 struct nlattr *key;
1789 struct get_key_cookie *cookie = c; 1865 struct get_key_cookie *cookie = c;
1790 1866
1791 if (params->key) 1867 if ((params->key &&
1792 NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA, 1868 nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
1793 params->key_len, params->key); 1869 params->key_len, params->key)) ||
1794 1870 (params->seq &&
1795 if (params->seq) 1871 nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
1796 NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ, 1872 params->seq_len, params->seq)) ||
1797 params->seq_len, params->seq); 1873 (params->cipher &&
1798 1874 nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
1799 if (params->cipher) 1875 params->cipher)))
1800 NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, 1876 goto nla_put_failure;
1801 params->cipher);
1802 1877
1803 key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); 1878 key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY);
1804 if (!key) 1879 if (!key)
1805 goto nla_put_failure; 1880 goto nla_put_failure;
1806 1881
1807 if (params->key) 1882 if ((params->key &&
1808 NLA_PUT(cookie->msg, NL80211_KEY_DATA, 1883 nla_put(cookie->msg, NL80211_KEY_DATA,
1809 params->key_len, params->key); 1884 params->key_len, params->key)) ||
1810 1885 (params->seq &&
1811 if (params->seq) 1886 nla_put(cookie->msg, NL80211_KEY_SEQ,
1812 NLA_PUT(cookie->msg, NL80211_KEY_SEQ, 1887 params->seq_len, params->seq)) ||
1813 params->seq_len, params->seq); 1888 (params->cipher &&
1814 1889 nla_put_u32(cookie->msg, NL80211_KEY_CIPHER,
1815 if (params->cipher) 1890 params->cipher)))
1816 NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER, 1891 goto nla_put_failure;
1817 params->cipher);
1818 1892
1819 NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx); 1893 if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
1894 goto nla_put_failure;
1820 1895
1821 nla_nest_end(cookie->msg, key); 1896 nla_nest_end(cookie->msg, key);
1822 1897
@@ -1874,10 +1949,12 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
1874 cookie.msg = msg; 1949 cookie.msg = msg;
1875 cookie.idx = key_idx; 1950 cookie.idx = key_idx;
1876 1951
1877 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 1952 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
1878 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); 1953 nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx))
1879 if (mac_addr) 1954 goto nla_put_failure;
1880 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 1955 if (mac_addr &&
1956 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
1957 goto nla_put_failure;
1881 1958
1882 if (pairwise && mac_addr && 1959 if (pairwise && mac_addr &&
1883 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) 1960 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -2076,15 +2153,10 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
2076 return err; 2153 return err;
2077} 2154}
2078 2155
2079static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) 2156static int nl80211_parse_beacon(struct genl_info *info,
2157 struct cfg80211_beacon_data *bcn)
2080{ 2158{
2081 int (*call)(struct wiphy *wiphy, struct net_device *dev, 2159 bool haveinfo = false;
2082 struct beacon_parameters *info);
2083 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2084 struct net_device *dev = info->user_ptr[1];
2085 struct wireless_dev *wdev = dev->ieee80211_ptr;
2086 struct beacon_parameters params;
2087 int haveinfo = 0, err;
2088 2160
2089 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) || 2161 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) ||
2090 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) || 2162 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) ||
@@ -2092,149 +2164,190 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
2092 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP])) 2164 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]))
2093 return -EINVAL; 2165 return -EINVAL;
2094 2166
2095 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2167 memset(bcn, 0, sizeof(*bcn));
2096 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2097 return -EOPNOTSUPP;
2098
2099 memset(&params, 0, sizeof(params));
2100
2101 switch (info->genlhdr->cmd) {
2102 case NL80211_CMD_NEW_BEACON:
2103 /* these are required for NEW_BEACON */
2104 if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] ||
2105 !info->attrs[NL80211_ATTR_DTIM_PERIOD] ||
2106 !info->attrs[NL80211_ATTR_BEACON_HEAD])
2107 return -EINVAL;
2108
2109 params.interval =
2110 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
2111 params.dtim_period =
2112 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
2113
2114 err = cfg80211_validate_beacon_int(rdev, params.interval);
2115 if (err)
2116 return err;
2117
2118 /*
2119 * In theory, some of these attributes could be required for
2120 * NEW_BEACON, but since they were not used when the command was
2121 * originally added, keep them optional for old user space
2122 * programs to work with drivers that do not need the additional
2123 * information.
2124 */
2125 if (info->attrs[NL80211_ATTR_SSID]) {
2126 params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
2127 params.ssid_len =
2128 nla_len(info->attrs[NL80211_ATTR_SSID]);
2129 if (params.ssid_len == 0 ||
2130 params.ssid_len > IEEE80211_MAX_SSID_LEN)
2131 return -EINVAL;
2132 }
2133
2134 if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
2135 params.hidden_ssid = nla_get_u32(
2136 info->attrs[NL80211_ATTR_HIDDEN_SSID]);
2137 if (params.hidden_ssid !=
2138 NL80211_HIDDEN_SSID_NOT_IN_USE &&
2139 params.hidden_ssid !=
2140 NL80211_HIDDEN_SSID_ZERO_LEN &&
2141 params.hidden_ssid !=
2142 NL80211_HIDDEN_SSID_ZERO_CONTENTS)
2143 return -EINVAL;
2144 }
2145
2146 params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
2147
2148 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
2149 params.auth_type = nla_get_u32(
2150 info->attrs[NL80211_ATTR_AUTH_TYPE]);
2151 if (!nl80211_valid_auth_type(params.auth_type))
2152 return -EINVAL;
2153 } else
2154 params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
2155
2156 err = nl80211_crypto_settings(rdev, info, &params.crypto,
2157 NL80211_MAX_NR_CIPHER_SUITES);
2158 if (err)
2159 return err;
2160
2161 call = rdev->ops->add_beacon;
2162 break;
2163 case NL80211_CMD_SET_BEACON:
2164 call = rdev->ops->set_beacon;
2165 break;
2166 default:
2167 WARN_ON(1);
2168 return -EOPNOTSUPP;
2169 }
2170
2171 if (!call)
2172 return -EOPNOTSUPP;
2173 2168
2174 if (info->attrs[NL80211_ATTR_BEACON_HEAD]) { 2169 if (info->attrs[NL80211_ATTR_BEACON_HEAD]) {
2175 params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]); 2170 bcn->head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]);
2176 params.head_len = 2171 bcn->head_len = nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]);
2177 nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]); 2172 if (!bcn->head_len)
2178 haveinfo = 1; 2173 return -EINVAL;
2174 haveinfo = true;
2179 } 2175 }
2180 2176
2181 if (info->attrs[NL80211_ATTR_BEACON_TAIL]) { 2177 if (info->attrs[NL80211_ATTR_BEACON_TAIL]) {
2182 params.tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]); 2178 bcn->tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]);
2183 params.tail_len = 2179 bcn->tail_len =
2184 nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]); 2180 nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]);
2185 haveinfo = 1; 2181 haveinfo = true;
2186 } 2182 }
2187 2183
2188 if (!haveinfo) 2184 if (!haveinfo)
2189 return -EINVAL; 2185 return -EINVAL;
2190 2186
2191 if (info->attrs[NL80211_ATTR_IE]) { 2187 if (info->attrs[NL80211_ATTR_IE]) {
2192 params.beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]); 2188 bcn->beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]);
2193 params.beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]); 2189 bcn->beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2194 } 2190 }
2195 2191
2196 if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) { 2192 if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) {
2197 params.proberesp_ies = 2193 bcn->proberesp_ies =
2198 nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]); 2194 nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
2199 params.proberesp_ies_len = 2195 bcn->proberesp_ies_len =
2200 nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]); 2196 nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
2201 } 2197 }
2202 2198
2203 if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) { 2199 if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
2204 params.assocresp_ies = 2200 bcn->assocresp_ies =
2205 nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]); 2201 nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
2206 params.assocresp_ies_len = 2202 bcn->assocresp_ies_len =
2207 nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]); 2203 nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
2208 } 2204 }
2209 2205
2210 if (info->attrs[NL80211_ATTR_PROBE_RESP]) { 2206 if (info->attrs[NL80211_ATTR_PROBE_RESP]) {
2211 params.probe_resp = 2207 bcn->probe_resp =
2212 nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]); 2208 nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]);
2213 params.probe_resp_len = 2209 bcn->probe_resp_len =
2214 nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]); 2210 nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]);
2215 } 2211 }
2216 2212
2217 err = call(&rdev->wiphy, dev, &params); 2213 return 0;
2218 if (!err && params.interval) 2214}
2219 wdev->beacon_interval = params.interval; 2215
2216static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2217{
2218 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2219 struct net_device *dev = info->user_ptr[1];
2220 struct wireless_dev *wdev = dev->ieee80211_ptr;
2221 struct cfg80211_ap_settings params;
2222 int err;
2223
2224 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2225 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2226 return -EOPNOTSUPP;
2227
2228 if (!rdev->ops->start_ap)
2229 return -EOPNOTSUPP;
2230
2231 if (wdev->beacon_interval)
2232 return -EALREADY;
2233
2234 memset(&params, 0, sizeof(params));
2235
2236 /* these are required for START_AP */
2237 if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] ||
2238 !info->attrs[NL80211_ATTR_DTIM_PERIOD] ||
2239 !info->attrs[NL80211_ATTR_BEACON_HEAD])
2240 return -EINVAL;
2241
2242 err = nl80211_parse_beacon(info, &params.beacon);
2243 if (err)
2244 return err;
2245
2246 params.beacon_interval =
2247 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
2248 params.dtim_period =
2249 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
2250
2251 err = cfg80211_validate_beacon_int(rdev, params.beacon_interval);
2252 if (err)
2253 return err;
2254
2255 /*
2256 * In theory, some of these attributes should be required here
2257 * but since they were not used when the command was originally
2258 * added, keep them optional for old user space programs to let
2259 * them continue to work with drivers that do not need the
2260 * additional information -- drivers must check!
2261 */
2262 if (info->attrs[NL80211_ATTR_SSID]) {
2263 params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
2264 params.ssid_len =
2265 nla_len(info->attrs[NL80211_ATTR_SSID]);
2266 if (params.ssid_len == 0 ||
2267 params.ssid_len > IEEE80211_MAX_SSID_LEN)
2268 return -EINVAL;
2269 }
2270
2271 if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
2272 params.hidden_ssid = nla_get_u32(
2273 info->attrs[NL80211_ATTR_HIDDEN_SSID]);
2274 if (params.hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE &&
2275 params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_LEN &&
2276 params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_CONTENTS)
2277 return -EINVAL;
2278 }
2279
2280 params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
2281
2282 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
2283 params.auth_type = nla_get_u32(
2284 info->attrs[NL80211_ATTR_AUTH_TYPE]);
2285 if (!nl80211_valid_auth_type(params.auth_type))
2286 return -EINVAL;
2287 } else
2288 params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
2289
2290 err = nl80211_crypto_settings(rdev, info, &params.crypto,
2291 NL80211_MAX_NR_CIPHER_SUITES);
2292 if (err)
2293 return err;
2294
2295 if (info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]) {
2296 if (!(rdev->wiphy.features & NL80211_FEATURE_INACTIVITY_TIMER))
2297 return -EOPNOTSUPP;
2298 params.inactivity_timeout = nla_get_u16(
2299 info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]);
2300 }
2301
2302 err = rdev->ops->start_ap(&rdev->wiphy, dev, &params);
2303 if (!err)
2304 wdev->beacon_interval = params.beacon_interval;
2220 return err; 2305 return err;
2221} 2306}
2222 2307
2223static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) 2308static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
2224{ 2309{
2225 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2310 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2226 struct net_device *dev = info->user_ptr[1]; 2311 struct net_device *dev = info->user_ptr[1];
2227 struct wireless_dev *wdev = dev->ieee80211_ptr; 2312 struct wireless_dev *wdev = dev->ieee80211_ptr;
2313 struct cfg80211_beacon_data params;
2228 int err; 2314 int err;
2229 2315
2230 if (!rdev->ops->del_beacon) 2316 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2317 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2318 return -EOPNOTSUPP;
2319
2320 if (!rdev->ops->change_beacon)
2321 return -EOPNOTSUPP;
2322
2323 if (!wdev->beacon_interval)
2324 return -EINVAL;
2325
2326 err = nl80211_parse_beacon(info, &params);
2327 if (err)
2328 return err;
2329
2330 return rdev->ops->change_beacon(&rdev->wiphy, dev, &params);
2331}
2332
2333static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
2334{
2335 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2336 struct net_device *dev = info->user_ptr[1];
2337 struct wireless_dev *wdev = dev->ieee80211_ptr;
2338 int err;
2339
2340 if (!rdev->ops->stop_ap)
2231 return -EOPNOTSUPP; 2341 return -EOPNOTSUPP;
2232 2342
2233 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2343 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2234 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2344 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2235 return -EOPNOTSUPP; 2345 return -EOPNOTSUPP;
2236 2346
2237 err = rdev->ops->del_beacon(&rdev->wiphy, dev); 2347 if (!wdev->beacon_interval)
2348 return -ENOENT;
2349
2350 err = rdev->ops->stop_ap(&rdev->wiphy, dev);
2238 if (!err) 2351 if (!err)
2239 wdev->beacon_interval = 0; 2352 wdev->beacon_interval = 0;
2240 return err; 2353 return err;
@@ -2312,10 +2425,16 @@ static int parse_station_flags(struct genl_info *info,
2312 return -EINVAL; 2425 return -EINVAL;
2313 } 2426 }
2314 2427
2315 for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) 2428 for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) {
2316 if (flags[flag]) 2429 if (flags[flag]) {
2317 params->sta_flags_set |= (1<<flag); 2430 params->sta_flags_set |= (1<<flag);
2318 2431
2432 /* no longer support new API additions in old API */
2433 if (flag > NL80211_STA_FLAG_MAX_OLD_API)
2434 return -EINVAL;
2435 }
2436 }
2437
2319 return 0; 2438 return 0;
2320} 2439}
2321 2440
@@ -2331,15 +2450,15 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
2331 2450
2332 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ 2451 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
2333 bitrate = cfg80211_calculate_bitrate(info); 2452 bitrate = cfg80211_calculate_bitrate(info);
2334 if (bitrate > 0) 2453 if ((bitrate > 0 &&
2335 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); 2454 nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) ||
2336 2455 ((info->flags & RATE_INFO_FLAGS_MCS) &&
2337 if (info->flags & RATE_INFO_FLAGS_MCS) 2456 nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
2338 NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs); 2457 ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
2339 if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) 2458 nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) ||
2340 NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); 2459 ((info->flags & RATE_INFO_FLAGS_SHORT_GI) &&
2341 if (info->flags & RATE_INFO_FLAGS_SHORT_GI) 2460 nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)))
2342 NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); 2461 goto nla_put_failure;
2343 2462
2344 nla_nest_end(msg, rate); 2463 nla_nest_end(msg, rate);
2345 return true; 2464 return true;
@@ -2349,7 +2468,9 @@ nla_put_failure:
2349} 2468}
2350 2469
2351static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, 2470static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2352 int flags, struct net_device *dev, 2471 int flags,
2472 struct cfg80211_registered_device *rdev,
2473 struct net_device *dev,
2353 const u8 *mac_addr, struct station_info *sinfo) 2474 const u8 *mac_addr, struct station_info *sinfo)
2354{ 2475{
2355 void *hdr; 2476 void *hdr;
@@ -2359,41 +2480,54 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2359 if (!hdr) 2480 if (!hdr)
2360 return -1; 2481 return -1;
2361 2482
2362 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 2483 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
2363 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 2484 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
2364 2485 nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation))
2365 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation); 2486 goto nla_put_failure;
2366 2487
2367 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); 2488 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
2368 if (!sinfoattr) 2489 if (!sinfoattr)
2369 goto nla_put_failure; 2490 goto nla_put_failure;
2370 if (sinfo->filled & STATION_INFO_CONNECTED_TIME) 2491 if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) &&
2371 NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME, 2492 nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME,
2372 sinfo->connected_time); 2493 sinfo->connected_time))
2373 if (sinfo->filled & STATION_INFO_INACTIVE_TIME) 2494 goto nla_put_failure;
2374 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, 2495 if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) &&
2375 sinfo->inactive_time); 2496 nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
2376 if (sinfo->filled & STATION_INFO_RX_BYTES) 2497 sinfo->inactive_time))
2377 NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES, 2498 goto nla_put_failure;
2378 sinfo->rx_bytes); 2499 if ((sinfo->filled & STATION_INFO_RX_BYTES) &&
2379 if (sinfo->filled & STATION_INFO_TX_BYTES) 2500 nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
2380 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES, 2501 sinfo->rx_bytes))
2381 sinfo->tx_bytes); 2502 goto nla_put_failure;
2382 if (sinfo->filled & STATION_INFO_LLID) 2503 if ((sinfo->filled & STATION_INFO_TX_BYTES) &&
2383 NLA_PUT_U16(msg, NL80211_STA_INFO_LLID, 2504 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
2384 sinfo->llid); 2505 sinfo->tx_bytes))
2385 if (sinfo->filled & STATION_INFO_PLID) 2506 goto nla_put_failure;
2386 NLA_PUT_U16(msg, NL80211_STA_INFO_PLID, 2507 if ((sinfo->filled & STATION_INFO_LLID) &&
2387 sinfo->plid); 2508 nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid))
2388 if (sinfo->filled & STATION_INFO_PLINK_STATE) 2509 goto nla_put_failure;
2389 NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, 2510 if ((sinfo->filled & STATION_INFO_PLID) &&
2390 sinfo->plink_state); 2511 nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid))
2391 if (sinfo->filled & STATION_INFO_SIGNAL) 2512 goto nla_put_failure;
2392 NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, 2513 if ((sinfo->filled & STATION_INFO_PLINK_STATE) &&
2393 sinfo->signal); 2514 nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE,
2394 if (sinfo->filled & STATION_INFO_SIGNAL_AVG) 2515 sinfo->plink_state))
2395 NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, 2516 goto nla_put_failure;
2396 sinfo->signal_avg); 2517 switch (rdev->wiphy.signal_type) {
2518 case CFG80211_SIGNAL_TYPE_MBM:
2519 if ((sinfo->filled & STATION_INFO_SIGNAL) &&
2520 nla_put_u8(msg, NL80211_STA_INFO_SIGNAL,
2521 sinfo->signal))
2522 goto nla_put_failure;
2523 if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) &&
2524 nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG,
2525 sinfo->signal_avg))
2526 goto nla_put_failure;
2527 break;
2528 default:
2529 break;
2530 }
2397 if (sinfo->filled & STATION_INFO_TX_BITRATE) { 2531 if (sinfo->filled & STATION_INFO_TX_BITRATE) {
2398 if (!nl80211_put_sta_rate(msg, &sinfo->txrate, 2532 if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
2399 NL80211_STA_INFO_TX_BITRATE)) 2533 NL80211_STA_INFO_TX_BITRATE))
@@ -2404,49 +2538,60 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2404 NL80211_STA_INFO_RX_BITRATE)) 2538 NL80211_STA_INFO_RX_BITRATE))
2405 goto nla_put_failure; 2539 goto nla_put_failure;
2406 } 2540 }
2407 if (sinfo->filled & STATION_INFO_RX_PACKETS) 2541 if ((sinfo->filled & STATION_INFO_RX_PACKETS) &&
2408 NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, 2542 nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS,
2409 sinfo->rx_packets); 2543 sinfo->rx_packets))
2410 if (sinfo->filled & STATION_INFO_TX_PACKETS) 2544 goto nla_put_failure;
2411 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS, 2545 if ((sinfo->filled & STATION_INFO_TX_PACKETS) &&
2412 sinfo->tx_packets); 2546 nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS,
2413 if (sinfo->filled & STATION_INFO_TX_RETRIES) 2547 sinfo->tx_packets))
2414 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES, 2548 goto nla_put_failure;
2415 sinfo->tx_retries); 2549 if ((sinfo->filled & STATION_INFO_TX_RETRIES) &&
2416 if (sinfo->filled & STATION_INFO_TX_FAILED) 2550 nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES,
2417 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, 2551 sinfo->tx_retries))
2418 sinfo->tx_failed); 2552 goto nla_put_failure;
2419 if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) 2553 if ((sinfo->filled & STATION_INFO_TX_FAILED) &&
2420 NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS, 2554 nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
2421 sinfo->beacon_loss_count); 2555 sinfo->tx_failed))
2556 goto nla_put_failure;
2557 if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
2558 nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
2559 sinfo->beacon_loss_count))
2560 goto nla_put_failure;
2422 if (sinfo->filled & STATION_INFO_BSS_PARAM) { 2561 if (sinfo->filled & STATION_INFO_BSS_PARAM) {
2423 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); 2562 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
2424 if (!bss_param) 2563 if (!bss_param)
2425 goto nla_put_failure; 2564 goto nla_put_failure;
2426 2565
2427 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) 2566 if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) &&
2428 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT); 2567 nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) ||
2429 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) 2568 ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) &&
2430 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE); 2569 nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) ||
2431 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) 2570 ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) &&
2432 NLA_PUT_FLAG(msg, 2571 nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) ||
2433 NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME); 2572 nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
2434 NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, 2573 sinfo->bss_param.dtim_period) ||
2435 sinfo->bss_param.dtim_period); 2574 nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
2436 NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, 2575 sinfo->bss_param.beacon_interval))
2437 sinfo->bss_param.beacon_interval); 2576 goto nla_put_failure;
2438 2577
2439 nla_nest_end(msg, bss_param); 2578 nla_nest_end(msg, bss_param);
2440 } 2579 }
2441 if (sinfo->filled & STATION_INFO_STA_FLAGS) 2580 if ((sinfo->filled & STATION_INFO_STA_FLAGS) &&
2442 NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS, 2581 nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
2443 sizeof(struct nl80211_sta_flag_update), 2582 sizeof(struct nl80211_sta_flag_update),
2444 &sinfo->sta_flags); 2583 &sinfo->sta_flags))
2584 goto nla_put_failure;
2585 if ((sinfo->filled & STATION_INFO_T_OFFSET) &&
2586 nla_put_u64(msg, NL80211_STA_INFO_T_OFFSET,
2587 sinfo->t_offset))
2588 goto nla_put_failure;
2445 nla_nest_end(msg, sinfoattr); 2589 nla_nest_end(msg, sinfoattr);
2446 2590
2447 if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES) 2591 if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) &&
2448 NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, 2592 nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
2449 sinfo->assoc_req_ies); 2593 sinfo->assoc_req_ies))
2594 goto nla_put_failure;
2450 2595
2451 return genlmsg_end(msg, hdr); 2596 return genlmsg_end(msg, hdr);
2452 2597
@@ -2486,7 +2631,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
2486 if (nl80211_send_station(skb, 2631 if (nl80211_send_station(skb,
2487 NETLINK_CB(cb->skb).pid, 2632 NETLINK_CB(cb->skb).pid,
2488 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2633 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2489 netdev, mac_addr, 2634 dev, netdev, mac_addr,
2490 &sinfo) < 0) 2635 &sinfo) < 0)
2491 goto out; 2636 goto out;
2492 2637
@@ -2531,7 +2676,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
2531 return -ENOMEM; 2676 return -ENOMEM;
2532 2677
2533 if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, 2678 if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0,
2534 dev, mac_addr, &sinfo) < 0) { 2679 rdev, dev, mac_addr, &sinfo) < 0) {
2535 nlmsg_free(msg); 2680 nlmsg_free(msg);
2536 return -ENOBUFS; 2681 return -ENOBUFS;
2537 } 2682 }
@@ -2655,13 +2800,6 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2655 break; 2800 break;
2656 case NL80211_IFTYPE_P2P_CLIENT: 2801 case NL80211_IFTYPE_P2P_CLIENT:
2657 case NL80211_IFTYPE_STATION: 2802 case NL80211_IFTYPE_STATION:
2658 /* disallow things sta doesn't support */
2659 if (params.plink_action)
2660 return -EINVAL;
2661 if (params.ht_capa)
2662 return -EINVAL;
2663 if (params.listen_interval >= 0)
2664 return -EINVAL;
2665 /* 2803 /*
2666 * Don't allow userspace to change the TDLS_PEER flag, 2804 * Don't allow userspace to change the TDLS_PEER flag,
2667 * but silently ignore attempts to change it since we 2805 * but silently ignore attempts to change it since we
@@ -2669,7 +2807,15 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2669 * to change the flag. 2807 * to change the flag.
2670 */ 2808 */
2671 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); 2809 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
2672 2810 /* fall through */
2811 case NL80211_IFTYPE_ADHOC:
2812 /* disallow things sta doesn't support */
2813 if (params.plink_action)
2814 return -EINVAL;
2815 if (params.ht_capa)
2816 return -EINVAL;
2817 if (params.listen_interval >= 0)
2818 return -EINVAL;
2673 /* reject any changes other than AUTHORIZED */ 2819 /* reject any changes other than AUTHORIZED */
2674 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) 2820 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
2675 return -EINVAL; 2821 return -EINVAL;
@@ -2867,36 +3013,37 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
2867 if (!hdr) 3013 if (!hdr)
2868 return -1; 3014 return -1;
2869 3015
2870 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 3016 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
2871 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); 3017 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) ||
2872 NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); 3018 nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) ||
2873 3019 nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation))
2874 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation); 3020 goto nla_put_failure;
2875 3021
2876 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); 3022 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
2877 if (!pinfoattr) 3023 if (!pinfoattr)
2878 goto nla_put_failure; 3024 goto nla_put_failure;
2879 if (pinfo->filled & MPATH_INFO_FRAME_QLEN) 3025 if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) &&
2880 NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, 3026 nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
2881 pinfo->frame_qlen); 3027 pinfo->frame_qlen))
2882 if (pinfo->filled & MPATH_INFO_SN) 3028 goto nla_put_failure;
2883 NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN, 3029 if (((pinfo->filled & MPATH_INFO_SN) &&
2884 pinfo->sn); 3030 nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) ||
2885 if (pinfo->filled & MPATH_INFO_METRIC) 3031 ((pinfo->filled & MPATH_INFO_METRIC) &&
2886 NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, 3032 nla_put_u32(msg, NL80211_MPATH_INFO_METRIC,
2887 pinfo->metric); 3033 pinfo->metric)) ||
2888 if (pinfo->filled & MPATH_INFO_EXPTIME) 3034 ((pinfo->filled & MPATH_INFO_EXPTIME) &&
2889 NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME, 3035 nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME,
2890 pinfo->exptime); 3036 pinfo->exptime)) ||
2891 if (pinfo->filled & MPATH_INFO_FLAGS) 3037 ((pinfo->filled & MPATH_INFO_FLAGS) &&
2892 NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS, 3038 nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS,
2893 pinfo->flags); 3039 pinfo->flags)) ||
2894 if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) 3040 ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) &&
2895 NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, 3041 nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
2896 pinfo->discovery_timeout); 3042 pinfo->discovery_timeout)) ||
2897 if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) 3043 ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) &&
2898 NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, 3044 nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
2899 pinfo->discovery_retries); 3045 pinfo->discovery_retries)))
3046 goto nla_put_failure;
2900 3047
2901 nla_nest_end(msg, pinfoattr); 3048 nla_nest_end(msg, pinfoattr);
2902 3049
@@ -3222,43 +3369,52 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
3222 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); 3369 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
3223 if (!pinfoattr) 3370 if (!pinfoattr)
3224 goto nla_put_failure; 3371 goto nla_put_failure;
3225 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 3372 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
3226 NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, 3373 nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
3227 cur_params.dot11MeshRetryTimeout); 3374 cur_params.dot11MeshRetryTimeout) ||
3228 NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, 3375 nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
3229 cur_params.dot11MeshConfirmTimeout); 3376 cur_params.dot11MeshConfirmTimeout) ||
3230 NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, 3377 nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
3231 cur_params.dot11MeshHoldingTimeout); 3378 cur_params.dot11MeshHoldingTimeout) ||
3232 NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, 3379 nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
3233 cur_params.dot11MeshMaxPeerLinks); 3380 cur_params.dot11MeshMaxPeerLinks) ||
3234 NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES, 3381 nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES,
3235 cur_params.dot11MeshMaxRetries); 3382 cur_params.dot11MeshMaxRetries) ||
3236 NLA_PUT_U8(msg, NL80211_MESHCONF_TTL, 3383 nla_put_u8(msg, NL80211_MESHCONF_TTL,
3237 cur_params.dot11MeshTTL); 3384 cur_params.dot11MeshTTL) ||
3238 NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL, 3385 nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL,
3239 cur_params.element_ttl); 3386 cur_params.element_ttl) ||
3240 NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, 3387 nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
3241 cur_params.auto_open_plinks); 3388 cur_params.auto_open_plinks) ||
3242 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, 3389 nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
3243 cur_params.dot11MeshHWMPmaxPREQretries); 3390 cur_params.dot11MeshNbrOffsetMaxNeighbor) ||
3244 NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, 3391 nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
3245 cur_params.path_refresh_time); 3392 cur_params.dot11MeshHWMPmaxPREQretries) ||
3246 NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, 3393 nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
3247 cur_params.min_discovery_timeout); 3394 cur_params.path_refresh_time) ||
3248 NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, 3395 nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
3249 cur_params.dot11MeshHWMPactivePathTimeout); 3396 cur_params.min_discovery_timeout) ||
3250 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, 3397 nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
3251 cur_params.dot11MeshHWMPpreqMinInterval); 3398 cur_params.dot11MeshHWMPactivePathTimeout) ||
3252 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, 3399 nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
3253 cur_params.dot11MeshHWMPperrMinInterval); 3400 cur_params.dot11MeshHWMPpreqMinInterval) ||
3254 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, 3401 nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
3255 cur_params.dot11MeshHWMPnetDiameterTraversalTime); 3402 cur_params.dot11MeshHWMPperrMinInterval) ||
3256 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, 3403 nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
3257 cur_params.dot11MeshHWMPRootMode); 3404 cur_params.dot11MeshHWMPnetDiameterTraversalTime) ||
3258 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, 3405 nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
3259 cur_params.dot11MeshHWMPRannInterval); 3406 cur_params.dot11MeshHWMPRootMode) ||
3260 NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 3407 nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3261 cur_params.dot11MeshGateAnnouncementProtocol); 3408 cur_params.dot11MeshHWMPRannInterval) ||
3409 nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3410 cur_params.dot11MeshGateAnnouncementProtocol) ||
3411 nla_put_u8(msg, NL80211_MESHCONF_FORWARDING,
3412 cur_params.dot11MeshForwarding) ||
3413 nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
3414 cur_params.rssi_threshold) ||
3415 nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE,
3416 cur_params.ht_opmode))
3417 goto nla_put_failure;
3262 nla_nest_end(msg, pinfoattr); 3418 nla_nest_end(msg, pinfoattr);
3263 genlmsg_end(msg, hdr); 3419 genlmsg_end(msg, hdr);
3264 return genlmsg_reply(msg, info); 3420 return genlmsg_reply(msg, info);
@@ -3279,6 +3435,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3279 [NL80211_MESHCONF_TTL] = { .type = NLA_U8 }, 3435 [NL80211_MESHCONF_TTL] = { .type = NLA_U8 },
3280 [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 }, 3436 [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
3281 [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, 3437 [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
3438 [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
3282 3439
3283 [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, 3440 [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
3284 [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, 3441 [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
@@ -3290,10 +3447,14 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3290 [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 }, 3447 [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
3291 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 }, 3448 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
3292 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 }, 3449 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
3450 [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
3451 [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32},
3452 [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16},
3293}; 3453};
3294 3454
3295static const struct nla_policy 3455static const struct nla_policy
3296 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { 3456 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = {
3457 [NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC] = { .type = NLA_U8 },
3297 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, 3458 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
3298 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 3459 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
3299 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, 3460 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
@@ -3346,6 +3507,9 @@ do {\
3346 mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); 3507 mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
3347 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 3508 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
3348 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); 3509 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8);
3510 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
3511 mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
3512 nla_get_u32);
3349 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 3513 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
3350 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, 3514 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
3351 nla_get_u8); 3515 nla_get_u8);
@@ -3379,6 +3543,12 @@ do {\
3379 dot11MeshGateAnnouncementProtocol, mask, 3543 dot11MeshGateAnnouncementProtocol, mask,
3380 NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 3544 NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3381 nla_get_u8); 3545 nla_get_u8);
3546 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding,
3547 mask, NL80211_MESHCONF_FORWARDING, nla_get_u8);
3548 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
3549 mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32);
3550 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode,
3551 mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16);
3382 if (mask_out) 3552 if (mask_out)
3383 *mask_out = mask; 3553 *mask_out = mask;
3384 3554
@@ -3399,6 +3569,12 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
3399 nl80211_mesh_setup_params_policy)) 3569 nl80211_mesh_setup_params_policy))
3400 return -EINVAL; 3570 return -EINVAL;
3401 3571
3572 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])
3573 setup->sync_method =
3574 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ?
3575 IEEE80211_SYNC_METHOD_VENDOR :
3576 IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET;
3577
3402 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL]) 3578 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])
3403 setup->path_sel_proto = 3579 setup->path_sel_proto =
3404 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ? 3580 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ?
@@ -3483,11 +3659,12 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
3483 if (!hdr) 3659 if (!hdr)
3484 goto put_failure; 3660 goto put_failure;
3485 3661
3486 NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, 3662 if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
3487 cfg80211_regdomain->alpha2); 3663 cfg80211_regdomain->alpha2) ||
3488 if (cfg80211_regdomain->dfs_region) 3664 (cfg80211_regdomain->dfs_region &&
3489 NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION, 3665 nla_put_u8(msg, NL80211_ATTR_DFS_REGION,
3490 cfg80211_regdomain->dfs_region); 3666 cfg80211_regdomain->dfs_region)))
3667 goto nla_put_failure;
3491 3668
3492 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); 3669 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
3493 if (!nl_reg_rules) 3670 if (!nl_reg_rules)
@@ -3507,18 +3684,19 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
3507 if (!nl_reg_rule) 3684 if (!nl_reg_rule)
3508 goto nla_put_failure; 3685 goto nla_put_failure;
3509 3686
3510 NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS, 3687 if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
3511 reg_rule->flags); 3688 reg_rule->flags) ||
3512 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START, 3689 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
3513 freq_range->start_freq_khz); 3690 freq_range->start_freq_khz) ||
3514 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END, 3691 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
3515 freq_range->end_freq_khz); 3692 freq_range->end_freq_khz) ||
3516 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, 3693 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
3517 freq_range->max_bandwidth_khz); 3694 freq_range->max_bandwidth_khz) ||
3518 NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, 3695 nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
3519 power_rule->max_antenna_gain); 3696 power_rule->max_antenna_gain) ||
3520 NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, 3697 nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
3521 power_rule->max_eirp); 3698 power_rule->max_eirp))
3699 goto nla_put_failure;
3522 3700
3523 nla_nest_end(msg, nl_reg_rule); 3701 nla_nest_end(msg, nl_reg_rule);
3524 } 3702 }
@@ -4079,7 +4257,6 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4079 struct cfg80211_bss *res = &intbss->pub; 4257 struct cfg80211_bss *res = &intbss->pub;
4080 void *hdr; 4258 void *hdr;
4081 struct nlattr *bss; 4259 struct nlattr *bss;
4082 int i;
4083 4260
4084 ASSERT_WDEV_LOCK(wdev); 4261 ASSERT_WDEV_LOCK(wdev);
4085 4262
@@ -4090,37 +4267,44 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4090 4267
4091 genl_dump_check_consistent(cb, hdr, &nl80211_fam); 4268 genl_dump_check_consistent(cb, hdr, &nl80211_fam);
4092 4269
4093 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); 4270 if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation) ||
4094 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); 4271 nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
4272 goto nla_put_failure;
4095 4273
4096 bss = nla_nest_start(msg, NL80211_ATTR_BSS); 4274 bss = nla_nest_start(msg, NL80211_ATTR_BSS);
4097 if (!bss) 4275 if (!bss)
4098 goto nla_put_failure; 4276 goto nla_put_failure;
4099 if (!is_zero_ether_addr(res->bssid)) 4277 if ((!is_zero_ether_addr(res->bssid) &&
4100 NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid); 4278 nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) ||
4101 if (res->information_elements && res->len_information_elements) 4279 (res->information_elements && res->len_information_elements &&
4102 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, 4280 nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
4103 res->len_information_elements, 4281 res->len_information_elements,
4104 res->information_elements); 4282 res->information_elements)) ||
4105 if (res->beacon_ies && res->len_beacon_ies && 4283 (res->beacon_ies && res->len_beacon_ies &&
4106 res->beacon_ies != res->information_elements) 4284 res->beacon_ies != res->information_elements &&
4107 NLA_PUT(msg, NL80211_BSS_BEACON_IES, 4285 nla_put(msg, NL80211_BSS_BEACON_IES,
4108 res->len_beacon_ies, res->beacon_ies); 4286 res->len_beacon_ies, res->beacon_ies)))
4109 if (res->tsf) 4287 goto nla_put_failure;
4110 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); 4288 if (res->tsf &&
4111 if (res->beacon_interval) 4289 nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
4112 NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); 4290 goto nla_put_failure;
4113 NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); 4291 if (res->beacon_interval &&
4114 NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); 4292 nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
4115 NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO, 4293 goto nla_put_failure;
4116 jiffies_to_msecs(jiffies - intbss->ts)); 4294 if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
4295 nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
4296 nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
4297 jiffies_to_msecs(jiffies - intbss->ts)))
4298 goto nla_put_failure;
4117 4299
4118 switch (rdev->wiphy.signal_type) { 4300 switch (rdev->wiphy.signal_type) {
4119 case CFG80211_SIGNAL_TYPE_MBM: 4301 case CFG80211_SIGNAL_TYPE_MBM:
4120 NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal); 4302 if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
4303 goto nla_put_failure;
4121 break; 4304 break;
4122 case CFG80211_SIGNAL_TYPE_UNSPEC: 4305 case CFG80211_SIGNAL_TYPE_UNSPEC:
4123 NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal); 4306 if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
4307 goto nla_put_failure;
4124 break; 4308 break;
4125 default: 4309 default:
4126 break; 4310 break;
@@ -4129,21 +4313,16 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4129 switch (wdev->iftype) { 4313 switch (wdev->iftype) {
4130 case NL80211_IFTYPE_P2P_CLIENT: 4314 case NL80211_IFTYPE_P2P_CLIENT:
4131 case NL80211_IFTYPE_STATION: 4315 case NL80211_IFTYPE_STATION:
4132 if (intbss == wdev->current_bss) 4316 if (intbss == wdev->current_bss &&
4133 NLA_PUT_U32(msg, NL80211_BSS_STATUS, 4317 nla_put_u32(msg, NL80211_BSS_STATUS,
4134 NL80211_BSS_STATUS_ASSOCIATED); 4318 NL80211_BSS_STATUS_ASSOCIATED))
4135 else for (i = 0; i < MAX_AUTH_BSSES; i++) { 4319 goto nla_put_failure;
4136 if (intbss != wdev->auth_bsses[i])
4137 continue;
4138 NLA_PUT_U32(msg, NL80211_BSS_STATUS,
4139 NL80211_BSS_STATUS_AUTHENTICATED);
4140 break;
4141 }
4142 break; 4320 break;
4143 case NL80211_IFTYPE_ADHOC: 4321 case NL80211_IFTYPE_ADHOC:
4144 if (intbss == wdev->current_bss) 4322 if (intbss == wdev->current_bss &&
4145 NLA_PUT_U32(msg, NL80211_BSS_STATUS, 4323 nla_put_u32(msg, NL80211_BSS_STATUS,
4146 NL80211_BSS_STATUS_IBSS_JOINED); 4324 NL80211_BSS_STATUS_IBSS_JOINED))
4325 goto nla_put_failure;
4147 break; 4326 break;
4148 default: 4327 default:
4149 break; 4328 break;
@@ -4212,34 +4391,43 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
4212 if (!hdr) 4391 if (!hdr)
4213 return -ENOMEM; 4392 return -ENOMEM;
4214 4393
4215 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 4394 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
4395 goto nla_put_failure;
4216 4396
4217 infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); 4397 infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO);
4218 if (!infoattr) 4398 if (!infoattr)
4219 goto nla_put_failure; 4399 goto nla_put_failure;
4220 4400
4221 NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY, 4401 if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY,
4222 survey->channel->center_freq); 4402 survey->channel->center_freq))
4223 if (survey->filled & SURVEY_INFO_NOISE_DBM) 4403 goto nla_put_failure;
4224 NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE, 4404
4225 survey->noise); 4405 if ((survey->filled & SURVEY_INFO_NOISE_DBM) &&
4226 if (survey->filled & SURVEY_INFO_IN_USE) 4406 nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise))
4227 NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE); 4407 goto nla_put_failure;
4228 if (survey->filled & SURVEY_INFO_CHANNEL_TIME) 4408 if ((survey->filled & SURVEY_INFO_IN_USE) &&
4229 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME, 4409 nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
4230 survey->channel_time); 4410 goto nla_put_failure;
4231 if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) 4411 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) &&
4232 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, 4412 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
4233 survey->channel_time_busy); 4413 survey->channel_time))
4234 if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) 4414 goto nla_put_failure;
4235 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, 4415 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) &&
4236 survey->channel_time_ext_busy); 4416 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
4237 if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) 4417 survey->channel_time_busy))
4238 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX, 4418 goto nla_put_failure;
4239 survey->channel_time_rx); 4419 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) &&
4240 if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) 4420 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
4241 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX, 4421 survey->channel_time_ext_busy))
4242 survey->channel_time_tx); 4422 goto nla_put_failure;
4423 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) &&
4424 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
4425 survey->channel_time_rx))
4426 goto nla_put_failure;
4427 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) &&
4428 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
4429 survey->channel_time_tx))
4430 goto nla_put_failure;
4243 4431
4244 nla_nest_end(msg, infoattr); 4432 nla_nest_end(msg, infoattr);
4245 4433
@@ -4406,10 +4594,16 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
4406 4594
4407 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; 4595 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
4408 4596
4597 /*
4598 * Since we no longer track auth state, ignore
4599 * requests to only change local state.
4600 */
4601 if (local_state_change)
4602 return 0;
4603
4409 return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 4604 return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
4410 ssid, ssid_len, ie, ie_len, 4605 ssid, ssid_len, ie, ie_len,
4411 key.p.key, key.p.key_len, key.idx, 4606 key.p.key, key.p.key_len, key.idx);
4412 local_state_change);
4413} 4607}
4414 4608
4415static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, 4609static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
@@ -4739,12 +4933,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
4739 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 4933 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
4740 enum nl80211_channel_type channel_type; 4934 enum nl80211_channel_type channel_type;
4741 4935
4742 channel_type = nla_get_u32( 4936 if (!nl80211_valid_channel_type(info, &channel_type))
4743 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
4744 if (channel_type != NL80211_CHAN_NO_HT &&
4745 channel_type != NL80211_CHAN_HT20 &&
4746 channel_type != NL80211_CHAN_HT40MINUS &&
4747 channel_type != NL80211_CHAN_HT40PLUS)
4748 return -EINVAL; 4937 return -EINVAL;
4749 4938
4750 if (channel_type != NL80211_CHAN_NO_HT && 4939 if (channel_type != NL80211_CHAN_NO_HT &&
@@ -4781,7 +4970,6 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
4781 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); 4970 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
4782 struct ieee80211_supported_band *sband = 4971 struct ieee80211_supported_band *sband =
4783 wiphy->bands[ibss.channel->band]; 4972 wiphy->bands[ibss.channel->band];
4784 int err;
4785 4973
4786 err = ieee80211_get_ratemask(sband, rates, n_rates, 4974 err = ieee80211_get_ratemask(sband, rates, n_rates,
4787 &ibss.basic_rates); 4975 &ibss.basic_rates);
@@ -4801,6 +4989,9 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
4801 return PTR_ERR(connkeys); 4989 return PTR_ERR(connkeys);
4802 } 4990 }
4803 4991
4992 ibss.control_port =
4993 nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT]);
4994
4804 err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); 4995 err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
4805 if (err) 4996 if (err)
4806 kfree(connkeys); 4997 kfree(connkeys);
@@ -4912,7 +5103,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
4912 NL80211_CMD_TESTMODE); 5103 NL80211_CMD_TESTMODE);
4913 struct nlattr *tmdata; 5104 struct nlattr *tmdata;
4914 5105
4915 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) { 5106 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
4916 genlmsg_cancel(skb, hdr); 5107 genlmsg_cancel(skb, hdr);
4917 break; 5108 break;
4918 } 5109 }
@@ -4963,7 +5154,8 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
4963 return NULL; 5154 return NULL;
4964 } 5155 }
4965 5156
4966 NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 5157 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
5158 goto nla_put_failure;
4967 data = nla_nest_start(skb, NL80211_ATTR_TESTDATA); 5159 data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
4968 5160
4969 ((void **)skb->cb)[0] = rdev; 5161 ((void **)skb->cb)[0] = rdev;
@@ -5069,6 +5261,13 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
5069 5261
5070 wiphy = &rdev->wiphy; 5262 wiphy = &rdev->wiphy;
5071 5263
5264 connect.bg_scan_period = -1;
5265 if (info->attrs[NL80211_ATTR_BG_SCAN_PERIOD] &&
5266 (wiphy->flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)) {
5267 connect.bg_scan_period =
5268 nla_get_u16(info->attrs[NL80211_ATTR_BG_SCAN_PERIOD]);
5269 }
5270
5072 if (info->attrs[NL80211_ATTR_MAC]) 5271 if (info->attrs[NL80211_ATTR_MAC])
5073 connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); 5272 connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
5074 connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); 5273 connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
@@ -5302,15 +5501,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5302 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)) 5501 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
5303 return -EOPNOTSUPP; 5502 return -EOPNOTSUPP;
5304 5503
5305 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 5504 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
5306 channel_type = nla_get_u32( 5505 !nl80211_valid_channel_type(info, &channel_type))
5307 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); 5506 return -EINVAL;
5308 if (channel_type != NL80211_CHAN_NO_HT &&
5309 channel_type != NL80211_CHAN_HT20 &&
5310 channel_type != NL80211_CHAN_HT40PLUS &&
5311 channel_type != NL80211_CHAN_HT40MINUS)
5312 return -EINVAL;
5313 }
5314 5507
5315 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 5508 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
5316 chan = rdev_freq_to_chan(rdev, freq, channel_type); 5509 chan = rdev_freq_to_chan(rdev, freq, channel_type);
@@ -5335,7 +5528,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5335 if (err) 5528 if (err)
5336 goto free_msg; 5529 goto free_msg;
5337 5530
5338 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 5531 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
5532 goto nla_put_failure;
5339 5533
5340 genlmsg_end(msg, hdr); 5534 genlmsg_end(msg, hdr);
5341 5535
@@ -5390,9 +5584,39 @@ static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
5390 return mask; 5584 return mask;
5391} 5585}
5392 5586
5587static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
5588 u8 *rates, u8 rates_len,
5589 u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
5590{
5591 u8 i;
5592
5593 memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
5594
5595 for (i = 0; i < rates_len; i++) {
5596 int ridx, rbit;
5597
5598 ridx = rates[i] / 8;
5599 rbit = BIT(rates[i] % 8);
5600
5601 /* check validity */
5602 if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
5603 return false;
5604
5605 /* check availability */
5606 if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
5607 mcs[ridx] |= rbit;
5608 else
5609 return false;
5610 }
5611
5612 return true;
5613}
5614
5393static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = { 5615static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
5394 [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY, 5616 [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
5395 .len = NL80211_MAX_SUPP_RATES }, 5617 .len = NL80211_MAX_SUPP_RATES },
5618 [NL80211_TXRATE_MCS] = { .type = NLA_BINARY,
5619 .len = NL80211_MAX_SUPP_HT_RATES },
5396}; 5620};
5397 5621
5398static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, 5622static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
@@ -5418,12 +5642,20 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
5418 sband = rdev->wiphy.bands[i]; 5642 sband = rdev->wiphy.bands[i];
5419 mask.control[i].legacy = 5643 mask.control[i].legacy =
5420 sband ? (1 << sband->n_bitrates) - 1 : 0; 5644 sband ? (1 << sband->n_bitrates) - 1 : 0;
5645 if (sband)
5646 memcpy(mask.control[i].mcs,
5647 sband->ht_cap.mcs.rx_mask,
5648 sizeof(mask.control[i].mcs));
5649 else
5650 memset(mask.control[i].mcs, 0,
5651 sizeof(mask.control[i].mcs));
5421 } 5652 }
5422 5653
5423 /* 5654 /*
5424 * The nested attribute uses enum nl80211_band as the index. This maps 5655 * The nested attribute uses enum nl80211_band as the index. This maps
5425 * directly to the enum ieee80211_band values used in cfg80211. 5656 * directly to the enum ieee80211_band values used in cfg80211.
5426 */ 5657 */
5658 BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
5427 nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) 5659 nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
5428 { 5660 {
5429 enum ieee80211_band band = nla_type(tx_rates); 5661 enum ieee80211_band band = nla_type(tx_rates);
@@ -5439,7 +5671,31 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
5439 sband, 5671 sband,
5440 nla_data(tb[NL80211_TXRATE_LEGACY]), 5672 nla_data(tb[NL80211_TXRATE_LEGACY]),
5441 nla_len(tb[NL80211_TXRATE_LEGACY])); 5673 nla_len(tb[NL80211_TXRATE_LEGACY]));
5442 if (mask.control[band].legacy == 0) 5674 if ((mask.control[band].legacy == 0) &&
5675 nla_len(tb[NL80211_TXRATE_LEGACY]))
5676 return -EINVAL;
5677 }
5678 if (tb[NL80211_TXRATE_MCS]) {
5679 if (!ht_rateset_to_mask(
5680 sband,
5681 nla_data(tb[NL80211_TXRATE_MCS]),
5682 nla_len(tb[NL80211_TXRATE_MCS]),
5683 mask.control[band].mcs))
5684 return -EINVAL;
5685 }
5686
5687 if (mask.control[band].legacy == 0) {
5688 /* don't allow empty legacy rates if HT
5689 * is not even supported. */
5690 if (!rdev->wiphy.bands[band]->ht_cap.ht_supported)
5691 return -EINVAL;
5692
5693 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
5694 if (mask.control[band].mcs[i])
5695 break;
5696
5697 /* legacy and mcs rates may not be both empty */
5698 if (i == IEEE80211_HT_MCS_MASK_LEN)
5443 return -EINVAL; 5699 return -EINVAL;
5444 } 5700 }
5445 } 5701 }
@@ -5518,12 +5774,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5518 } 5774 }
5519 5775
5520 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 5776 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
5521 channel_type = nla_get_u32( 5777 if (!nl80211_valid_channel_type(info, &channel_type))
5522 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
5523 if (channel_type != NL80211_CHAN_NO_HT &&
5524 channel_type != NL80211_CHAN_HT20 &&
5525 channel_type != NL80211_CHAN_HT40PLUS &&
5526 channel_type != NL80211_CHAN_HT40MINUS)
5527 return -EINVAL; 5778 return -EINVAL;
5528 channel_type_valid = true; 5779 channel_type_valid = true;
5529 } 5780 }
@@ -5563,7 +5814,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5563 goto free_msg; 5814 goto free_msg;
5564 5815
5565 if (msg) { 5816 if (msg) {
5566 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 5817 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
5818 goto nla_put_failure;
5567 5819
5568 genlmsg_end(msg, hdr); 5820 genlmsg_end(msg, hdr);
5569 return genlmsg_reply(msg, info); 5821 return genlmsg_reply(msg, info);
@@ -5668,7 +5920,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
5668 else 5920 else
5669 ps_state = NL80211_PS_DISABLED; 5921 ps_state = NL80211_PS_DISABLED;
5670 5922
5671 NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state); 5923 if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state))
5924 goto nla_put_failure;
5672 5925
5673 genlmsg_end(msg, hdr); 5926 genlmsg_end(msg, hdr);
5674 return genlmsg_reply(msg, info); 5927 return genlmsg_reply(msg, info);
@@ -5815,20 +6068,21 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
5815 if (!nl_wowlan) 6068 if (!nl_wowlan)
5816 goto nla_put_failure; 6069 goto nla_put_failure;
5817 6070
5818 if (rdev->wowlan->any) 6071 if ((rdev->wowlan->any &&
5819 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); 6072 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
5820 if (rdev->wowlan->disconnect) 6073 (rdev->wowlan->disconnect &&
5821 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); 6074 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
5822 if (rdev->wowlan->magic_pkt) 6075 (rdev->wowlan->magic_pkt &&
5823 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); 6076 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
5824 if (rdev->wowlan->gtk_rekey_failure) 6077 (rdev->wowlan->gtk_rekey_failure &&
5825 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); 6078 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
5826 if (rdev->wowlan->eap_identity_req) 6079 (rdev->wowlan->eap_identity_req &&
5827 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); 6080 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
5828 if (rdev->wowlan->four_way_handshake) 6081 (rdev->wowlan->four_way_handshake &&
5829 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); 6082 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
5830 if (rdev->wowlan->rfkill_release) 6083 (rdev->wowlan->rfkill_release &&
5831 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); 6084 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
6085 goto nla_put_failure;
5832 if (rdev->wowlan->n_patterns) { 6086 if (rdev->wowlan->n_patterns) {
5833 struct nlattr *nl_pats, *nl_pat; 6087 struct nlattr *nl_pats, *nl_pat;
5834 int i, pat_len; 6088 int i, pat_len;
@@ -5843,12 +6097,13 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
5843 if (!nl_pat) 6097 if (!nl_pat)
5844 goto nla_put_failure; 6098 goto nla_put_failure;
5845 pat_len = rdev->wowlan->patterns[i].pattern_len; 6099 pat_len = rdev->wowlan->patterns[i].pattern_len;
5846 NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK, 6100 if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
5847 DIV_ROUND_UP(pat_len, 8), 6101 DIV_ROUND_UP(pat_len, 8),
5848 rdev->wowlan->patterns[i].mask); 6102 rdev->wowlan->patterns[i].mask) ||
5849 NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN, 6103 nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
5850 pat_len, 6104 pat_len,
5851 rdev->wowlan->patterns[i].pattern); 6105 rdev->wowlan->patterns[i].pattern))
6106 goto nla_put_failure;
5852 nla_nest_end(msg, nl_pat); 6107 nla_nest_end(msg, nl_pat);
5853 } 6108 }
5854 nla_nest_end(msg, nl_pats); 6109 nla_nest_end(msg, nl_pats);
@@ -5873,6 +6128,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
5873 struct cfg80211_wowlan new_triggers = {}; 6128 struct cfg80211_wowlan new_triggers = {};
5874 struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; 6129 struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
5875 int err, i; 6130 int err, i;
6131 bool prev_enabled = rdev->wowlan;
5876 6132
5877 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) 6133 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
5878 return -EOPNOTSUPP; 6134 return -EOPNOTSUPP;
@@ -6005,6 +6261,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6005 rdev->wowlan = NULL; 6261 rdev->wowlan = NULL;
6006 } 6262 }
6007 6263
6264 if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
6265 rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan);
6266
6008 return 0; 6267 return 0;
6009 error: 6268 error:
6010 for (i = 0; i < new_triggers.n_patterns; i++) 6269 for (i = 0; i < new_triggers.n_patterns; i++)
@@ -6121,7 +6380,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
6121 if (err) 6380 if (err)
6122 goto free_msg; 6381 goto free_msg;
6123 6382
6124 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 6383 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
6384 goto nla_put_failure;
6125 6385
6126 genlmsg_end(msg, hdr); 6386 genlmsg_end(msg, hdr);
6127 6387
@@ -6262,7 +6522,7 @@ static struct genl_ops nl80211_ops[] = {
6262 .doit = nl80211_get_key, 6522 .doit = nl80211_get_key,
6263 .policy = nl80211_policy, 6523 .policy = nl80211_policy,
6264 .flags = GENL_ADMIN_PERM, 6524 .flags = GENL_ADMIN_PERM,
6265 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6525 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6266 NL80211_FLAG_NEED_RTNL, 6526 NL80211_FLAG_NEED_RTNL,
6267 }, 6527 },
6268 { 6528 {
@@ -6293,24 +6553,24 @@ static struct genl_ops nl80211_ops[] = {
6293 .cmd = NL80211_CMD_SET_BEACON, 6553 .cmd = NL80211_CMD_SET_BEACON,
6294 .policy = nl80211_policy, 6554 .policy = nl80211_policy,
6295 .flags = GENL_ADMIN_PERM, 6555 .flags = GENL_ADMIN_PERM,
6296 .doit = nl80211_addset_beacon, 6556 .doit = nl80211_set_beacon,
6297 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6557 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6298 NL80211_FLAG_NEED_RTNL, 6558 NL80211_FLAG_NEED_RTNL,
6299 }, 6559 },
6300 { 6560 {
6301 .cmd = NL80211_CMD_NEW_BEACON, 6561 .cmd = NL80211_CMD_START_AP,
6302 .policy = nl80211_policy, 6562 .policy = nl80211_policy,
6303 .flags = GENL_ADMIN_PERM, 6563 .flags = GENL_ADMIN_PERM,
6304 .doit = nl80211_addset_beacon, 6564 .doit = nl80211_start_ap,
6305 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6565 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6306 NL80211_FLAG_NEED_RTNL, 6566 NL80211_FLAG_NEED_RTNL,
6307 }, 6567 },
6308 { 6568 {
6309 .cmd = NL80211_CMD_DEL_BEACON, 6569 .cmd = NL80211_CMD_STOP_AP,
6310 .policy = nl80211_policy, 6570 .policy = nl80211_policy,
6311 .flags = GENL_ADMIN_PERM, 6571 .flags = GENL_ADMIN_PERM,
6312 .doit = nl80211_del_beacon, 6572 .doit = nl80211_stop_ap,
6313 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6573 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6314 NL80211_FLAG_NEED_RTNL, 6574 NL80211_FLAG_NEED_RTNL,
6315 }, 6575 },
6316 { 6576 {
@@ -6326,7 +6586,7 @@ static struct genl_ops nl80211_ops[] = {
6326 .doit = nl80211_set_station, 6586 .doit = nl80211_set_station,
6327 .policy = nl80211_policy, 6587 .policy = nl80211_policy,
6328 .flags = GENL_ADMIN_PERM, 6588 .flags = GENL_ADMIN_PERM,
6329 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6589 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6330 NL80211_FLAG_NEED_RTNL, 6590 NL80211_FLAG_NEED_RTNL,
6331 }, 6591 },
6332 { 6592 {
@@ -6342,7 +6602,7 @@ static struct genl_ops nl80211_ops[] = {
6342 .doit = nl80211_del_station, 6602 .doit = nl80211_del_station,
6343 .policy = nl80211_policy, 6603 .policy = nl80211_policy,
6344 .flags = GENL_ADMIN_PERM, 6604 .flags = GENL_ADMIN_PERM,
6345 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6605 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6346 NL80211_FLAG_NEED_RTNL, 6606 NL80211_FLAG_NEED_RTNL,
6347 }, 6607 },
6348 { 6608 {
@@ -6375,7 +6635,7 @@ static struct genl_ops nl80211_ops[] = {
6375 .doit = nl80211_del_mpath, 6635 .doit = nl80211_del_mpath,
6376 .policy = nl80211_policy, 6636 .policy = nl80211_policy,
6377 .flags = GENL_ADMIN_PERM, 6637 .flags = GENL_ADMIN_PERM,
6378 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6638 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6379 NL80211_FLAG_NEED_RTNL, 6639 NL80211_FLAG_NEED_RTNL,
6380 }, 6640 },
6381 { 6641 {
@@ -6383,7 +6643,7 @@ static struct genl_ops nl80211_ops[] = {
6383 .doit = nl80211_set_bss, 6643 .doit = nl80211_set_bss,
6384 .policy = nl80211_policy, 6644 .policy = nl80211_policy,
6385 .flags = GENL_ADMIN_PERM, 6645 .flags = GENL_ADMIN_PERM,
6386 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6646 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6387 NL80211_FLAG_NEED_RTNL, 6647 NL80211_FLAG_NEED_RTNL,
6388 }, 6648 },
6389 { 6649 {
@@ -6409,7 +6669,7 @@ static struct genl_ops nl80211_ops[] = {
6409 .doit = nl80211_get_mesh_config, 6669 .doit = nl80211_get_mesh_config,
6410 .policy = nl80211_policy, 6670 .policy = nl80211_policy,
6411 /* can be retrieved by unprivileged users */ 6671 /* can be retrieved by unprivileged users */
6412 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6672 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6413 NL80211_FLAG_NEED_RTNL, 6673 NL80211_FLAG_NEED_RTNL,
6414 }, 6674 },
6415 { 6675 {
@@ -6542,7 +6802,7 @@ static struct genl_ops nl80211_ops[] = {
6542 .doit = nl80211_setdel_pmksa, 6802 .doit = nl80211_setdel_pmksa,
6543 .policy = nl80211_policy, 6803 .policy = nl80211_policy,
6544 .flags = GENL_ADMIN_PERM, 6804 .flags = GENL_ADMIN_PERM,
6545 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6805 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6546 NL80211_FLAG_NEED_RTNL, 6806 NL80211_FLAG_NEED_RTNL,
6547 }, 6807 },
6548 { 6808 {
@@ -6550,7 +6810,7 @@ static struct genl_ops nl80211_ops[] = {
6550 .doit = nl80211_setdel_pmksa, 6810 .doit = nl80211_setdel_pmksa,
6551 .policy = nl80211_policy, 6811 .policy = nl80211_policy,
6552 .flags = GENL_ADMIN_PERM, 6812 .flags = GENL_ADMIN_PERM,
6553 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6813 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6554 NL80211_FLAG_NEED_RTNL, 6814 NL80211_FLAG_NEED_RTNL,
6555 }, 6815 },
6556 { 6816 {
@@ -6558,7 +6818,7 @@ static struct genl_ops nl80211_ops[] = {
6558 .doit = nl80211_flush_pmksa, 6818 .doit = nl80211_flush_pmksa,
6559 .policy = nl80211_policy, 6819 .policy = nl80211_policy,
6560 .flags = GENL_ADMIN_PERM, 6820 .flags = GENL_ADMIN_PERM,
6561 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6821 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6562 NL80211_FLAG_NEED_RTNL, 6822 NL80211_FLAG_NEED_RTNL,
6563 }, 6823 },
6564 { 6824 {
@@ -6718,7 +6978,7 @@ static struct genl_ops nl80211_ops[] = {
6718 .doit = nl80211_probe_client, 6978 .doit = nl80211_probe_client,
6719 .policy = nl80211_policy, 6979 .policy = nl80211_policy,
6720 .flags = GENL_ADMIN_PERM, 6980 .flags = GENL_ADMIN_PERM,
6721 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6981 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6722 NL80211_FLAG_NEED_RTNL, 6982 NL80211_FLAG_NEED_RTNL,
6723 }, 6983 },
6724 { 6984 {
@@ -6789,19 +7049,24 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
6789 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); 7049 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS);
6790 if (!nest) 7050 if (!nest)
6791 goto nla_put_failure; 7051 goto nla_put_failure;
6792 for (i = 0; i < req->n_ssids; i++) 7052 for (i = 0; i < req->n_ssids; i++) {
6793 NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid); 7053 if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid))
7054 goto nla_put_failure;
7055 }
6794 nla_nest_end(msg, nest); 7056 nla_nest_end(msg, nest);
6795 7057
6796 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); 7058 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
6797 if (!nest) 7059 if (!nest)
6798 goto nla_put_failure; 7060 goto nla_put_failure;
6799 for (i = 0; i < req->n_channels; i++) 7061 for (i = 0; i < req->n_channels; i++) {
6800 NLA_PUT_U32(msg, i, req->channels[i]->center_freq); 7062 if (nla_put_u32(msg, i, req->channels[i]->center_freq))
7063 goto nla_put_failure;
7064 }
6801 nla_nest_end(msg, nest); 7065 nla_nest_end(msg, nest);
6802 7066
6803 if (req->ie) 7067 if (req->ie &&
6804 NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie); 7068 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
7069 goto nla_put_failure;
6805 7070
6806 return 0; 7071 return 0;
6807 nla_put_failure: 7072 nla_put_failure:
@@ -6820,8 +7085,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
6820 if (!hdr) 7085 if (!hdr)
6821 return -1; 7086 return -1;
6822 7087
6823 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7088 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
6824 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7089 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
7090 goto nla_put_failure;
6825 7091
6826 /* ignore errors and send incomplete event anyway */ 7092 /* ignore errors and send incomplete event anyway */
6827 nl80211_add_scan_req(msg, rdev); 7093 nl80211_add_scan_req(msg, rdev);
@@ -6845,8 +7111,9 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
6845 if (!hdr) 7111 if (!hdr)
6846 return -1; 7112 return -1;
6847 7113
6848 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7114 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
6849 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7115 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
7116 goto nla_put_failure;
6850 7117
6851 return genlmsg_end(msg, hdr); 7118 return genlmsg_end(msg, hdr);
6852 7119
@@ -6969,26 +7236,33 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
6969 } 7236 }
6970 7237
6971 /* Userspace can always count this one always being set */ 7238 /* Userspace can always count this one always being set */
6972 NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator); 7239 if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator))
6973 7240 goto nla_put_failure;
6974 if (request->alpha2[0] == '0' && request->alpha2[1] == '0') 7241
6975 NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, 7242 if (request->alpha2[0] == '0' && request->alpha2[1] == '0') {
6976 NL80211_REGDOM_TYPE_WORLD); 7243 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
6977 else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') 7244 NL80211_REGDOM_TYPE_WORLD))
6978 NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, 7245 goto nla_put_failure;
6979 NL80211_REGDOM_TYPE_CUSTOM_WORLD); 7246 } else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') {
6980 else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || 7247 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
6981 request->intersect) 7248 NL80211_REGDOM_TYPE_CUSTOM_WORLD))
6982 NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, 7249 goto nla_put_failure;
6983 NL80211_REGDOM_TYPE_INTERSECTION); 7250 } else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
6984 else { 7251 request->intersect) {
6985 NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, 7252 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
6986 NL80211_REGDOM_TYPE_COUNTRY); 7253 NL80211_REGDOM_TYPE_INTERSECTION))
6987 NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2); 7254 goto nla_put_failure;
6988 } 7255 } else {
6989 7256 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
6990 if (wiphy_idx_valid(request->wiphy_idx)) 7257 NL80211_REGDOM_TYPE_COUNTRY) ||
6991 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); 7258 nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
7259 request->alpha2))
7260 goto nla_put_failure;
7261 }
7262
7263 if (wiphy_idx_valid(request->wiphy_idx) &&
7264 nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
7265 goto nla_put_failure;
6992 7266
6993 genlmsg_end(msg, hdr); 7267 genlmsg_end(msg, hdr);
6994 7268
@@ -7022,9 +7296,10 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
7022 return; 7296 return;
7023 } 7297 }
7024 7298
7025 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7299 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7026 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7300 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7027 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); 7301 nla_put(msg, NL80211_ATTR_FRAME, len, buf))
7302 goto nla_put_failure;
7028 7303
7029 genlmsg_end(msg, hdr); 7304 genlmsg_end(msg, hdr);
7030 7305
@@ -7102,10 +7377,11 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
7102 return; 7377 return;
7103 } 7378 }
7104 7379
7105 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7380 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7106 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7381 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7107 NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); 7382 nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
7108 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 7383 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
7384 goto nla_put_failure;
7109 7385
7110 genlmsg_end(msg, hdr); 7386 genlmsg_end(msg, hdr);
7111 7387
@@ -7153,15 +7429,15 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
7153 return; 7429 return;
7154 } 7430 }
7155 7431
7156 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7432 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7157 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7433 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7158 if (bssid) 7434 (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
7159 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 7435 nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
7160 NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status); 7436 (req_ie &&
7161 if (req_ie) 7437 nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
7162 NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); 7438 (resp_ie &&
7163 if (resp_ie) 7439 nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
7164 NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); 7440 goto nla_put_failure;
7165 7441
7166 genlmsg_end(msg, hdr); 7442 genlmsg_end(msg, hdr);
7167 7443
@@ -7193,13 +7469,14 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
7193 return; 7469 return;
7194 } 7470 }
7195 7471
7196 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7472 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7197 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7473 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7198 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 7474 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
7199 if (req_ie) 7475 (req_ie &&
7200 NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); 7476 nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
7201 if (resp_ie) 7477 (resp_ie &&
7202 NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); 7478 nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
7479 goto nla_put_failure;
7203 7480
7204 genlmsg_end(msg, hdr); 7481 genlmsg_end(msg, hdr);
7205 7482
@@ -7230,14 +7507,14 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
7230 return; 7507 return;
7231 } 7508 }
7232 7509
7233 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7510 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7234 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7511 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7235 if (from_ap && reason) 7512 (from_ap && reason &&
7236 NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason); 7513 nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
7237 if (from_ap) 7514 (from_ap &&
7238 NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP); 7515 nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
7239 if (ie) 7516 (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie)))
7240 NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); 7517 goto nla_put_failure;
7241 7518
7242 genlmsg_end(msg, hdr); 7519 genlmsg_end(msg, hdr);
7243 7520
@@ -7268,9 +7545,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
7268 return; 7545 return;
7269 } 7546 }
7270 7547
7271 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7548 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7272 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7549 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7273 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 7550 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
7551 goto nla_put_failure;
7274 7552
7275 genlmsg_end(msg, hdr); 7553 genlmsg_end(msg, hdr);
7276 7554
@@ -7301,11 +7579,12 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
7301 return; 7579 return;
7302 } 7580 }
7303 7581
7304 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7582 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7305 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7583 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7306 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr); 7584 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) ||
7307 if (ie_len && ie) 7585 (ie_len && ie &&
7308 NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie); 7586 nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
7587 goto nla_put_failure;
7309 7588
7310 genlmsg_end(msg, hdr); 7589 genlmsg_end(msg, hdr);
7311 7590
@@ -7336,15 +7615,14 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
7336 return; 7615 return;
7337 } 7616 }
7338 7617
7339 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7618 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7340 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7619 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7341 if (addr) 7620 (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) ||
7342 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 7621 nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) ||
7343 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); 7622 (key_id != -1 &&
7344 if (key_id != -1) 7623 nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) ||
7345 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); 7624 (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc)))
7346 if (tsc) 7625 goto nla_put_failure;
7347 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
7348 7626
7349 genlmsg_end(msg, hdr); 7627 genlmsg_end(msg, hdr);
7350 7628
@@ -7379,7 +7657,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
7379 * Since we are applying the beacon hint to a wiphy we know its 7657 * Since we are applying the beacon hint to a wiphy we know its
7380 * wiphy_idx is valid 7658 * wiphy_idx is valid
7381 */ 7659 */
7382 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)); 7660 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
7661 goto nla_put_failure;
7383 7662
7384 /* Before */ 7663 /* Before */
7385 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); 7664 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
@@ -7431,14 +7710,16 @@ static void nl80211_send_remain_on_chan_event(
7431 return; 7710 return;
7432 } 7711 }
7433 7712
7434 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7713 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7435 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7714 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7436 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq); 7715 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
7437 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type); 7716 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
7438 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 7717 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
7718 goto nla_put_failure;
7439 7719
7440 if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL) 7720 if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
7441 NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration); 7721 nla_put_u32(msg, NL80211_ATTR_DURATION, duration))
7722 goto nla_put_failure;
7442 7723
7443 genlmsg_end(msg, hdr); 7724 genlmsg_end(msg, hdr);
7444 7725
@@ -7482,7 +7763,8 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
7482 if (!msg) 7763 if (!msg)
7483 return; 7764 return;
7484 7765
7485 if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) { 7766 if (nl80211_send_station(msg, 0, 0, 0,
7767 rdev, dev, mac_addr, sinfo) < 0) {
7486 nlmsg_free(msg); 7768 nlmsg_free(msg);
7487 return; 7769 return;
7488 } 7770 }
@@ -7508,8 +7790,9 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
7508 return; 7790 return;
7509 } 7791 }
7510 7792
7511 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 7793 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
7512 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 7794 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
7795 goto nla_put_failure;
7513 7796
7514 genlmsg_end(msg, hdr); 7797 genlmsg_end(msg, hdr);
7515 7798
@@ -7545,9 +7828,10 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
7545 return true; 7828 return true;
7546 } 7829 }
7547 7830
7548 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7831 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7549 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 7832 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
7550 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 7833 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
7834 goto nla_put_failure;
7551 7835
7552 err = genlmsg_end(msg, hdr); 7836 err = genlmsg_end(msg, hdr);
7553 if (err < 0) { 7837 if (err < 0) {
@@ -7580,7 +7864,8 @@ bool nl80211_unexpected_4addr_frame(struct net_device *dev,
7580 7864
7581int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 7865int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
7582 struct net_device *netdev, u32 nlpid, 7866 struct net_device *netdev, u32 nlpid,
7583 int freq, const u8 *buf, size_t len, gfp_t gfp) 7867 int freq, int sig_dbm,
7868 const u8 *buf, size_t len, gfp_t gfp)
7584{ 7869{
7585 struct sk_buff *msg; 7870 struct sk_buff *msg;
7586 void *hdr; 7871 void *hdr;
@@ -7595,10 +7880,13 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
7595 return -ENOMEM; 7880 return -ENOMEM;
7596 } 7881 }
7597 7882
7598 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7883 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7599 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7884 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7600 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); 7885 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
7601 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); 7886 (sig_dbm &&
7887 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
7888 nla_put(msg, NL80211_ATTR_FRAME, len, buf))
7889 goto nla_put_failure;
7602 7890
7603 genlmsg_end(msg, hdr); 7891 genlmsg_end(msg, hdr);
7604 7892
@@ -7628,12 +7916,12 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
7628 return; 7916 return;
7629 } 7917 }
7630 7918
7631 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7919 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7632 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7920 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7633 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); 7921 nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
7634 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 7922 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
7635 if (ack) 7923 (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
7636 NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); 7924 goto nla_put_failure;
7637 7925
7638 genlmsg_end(msg, hdr); 7926 genlmsg_end(msg, hdr);
7639 7927
@@ -7665,15 +7953,17 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
7665 return; 7953 return;
7666 } 7954 }
7667 7955
7668 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7956 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7669 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7957 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
7958 goto nla_put_failure;
7670 7959
7671 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); 7960 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
7672 if (!pinfoattr) 7961 if (!pinfoattr)
7673 goto nla_put_failure; 7962 goto nla_put_failure;
7674 7963
7675 NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, 7964 if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
7676 rssi_event); 7965 rssi_event))
7966 goto nla_put_failure;
7677 7967
7678 nla_nest_end(msg, pinfoattr); 7968 nla_nest_end(msg, pinfoattr);
7679 7969
@@ -7706,16 +7996,18 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
7706 return; 7996 return;
7707 } 7997 }
7708 7998
7709 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7999 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7710 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 8000 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7711 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 8001 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
8002 goto nla_put_failure;
7712 8003
7713 rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA); 8004 rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA);
7714 if (!rekey_attr) 8005 if (!rekey_attr)
7715 goto nla_put_failure; 8006 goto nla_put_failure;
7716 8007
7717 NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR, 8008 if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR,
7718 NL80211_REPLAY_CTR_LEN, replay_ctr); 8009 NL80211_REPLAY_CTR_LEN, replay_ctr))
8010 goto nla_put_failure;
7719 8011
7720 nla_nest_end(msg, rekey_attr); 8012 nla_nest_end(msg, rekey_attr);
7721 8013
@@ -7748,17 +8040,19 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
7748 return; 8040 return;
7749 } 8041 }
7750 8042
7751 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 8043 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7752 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 8044 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
8045 goto nla_put_failure;
7753 8046
7754 attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE); 8047 attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
7755 if (!attr) 8048 if (!attr)
7756 goto nla_put_failure; 8049 goto nla_put_failure;
7757 8050
7758 NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index); 8051 if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) ||
7759 NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid); 8052 nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) ||
7760 if (preauth) 8053 (preauth &&
7761 NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH); 8054 nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH)))
8055 goto nla_put_failure;
7762 8056
7763 nla_nest_end(msg, attr); 8057 nla_nest_end(msg, attr);
7764 8058
@@ -7773,6 +8067,39 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
7773 nlmsg_free(msg); 8067 nlmsg_free(msg);
7774} 8068}
7775 8069
8070void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
8071 struct net_device *netdev, int freq,
8072 enum nl80211_channel_type type, gfp_t gfp)
8073{
8074 struct sk_buff *msg;
8075 void *hdr;
8076
8077 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
8078 if (!msg)
8079 return;
8080
8081 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CH_SWITCH_NOTIFY);
8082 if (!hdr) {
8083 nlmsg_free(msg);
8084 return;
8085 }
8086
8087 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
8088 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
8089 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type))
8090 goto nla_put_failure;
8091
8092 genlmsg_end(msg, hdr);
8093
8094 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
8095 nl80211_mlme_mcgrp.id, gfp);
8096 return;
8097
8098 nla_put_failure:
8099 genlmsg_cancel(msg, hdr);
8100 nlmsg_free(msg);
8101}
8102
7776void 8103void
7777nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 8104nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
7778 struct net_device *netdev, const u8 *peer, 8105 struct net_device *netdev, const u8 *peer,
@@ -7792,15 +8119,17 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
7792 return; 8119 return;
7793 } 8120 }
7794 8121
7795 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 8122 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7796 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 8123 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7797 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer); 8124 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
8125 goto nla_put_failure;
7798 8126
7799 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); 8127 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
7800 if (!pinfoattr) 8128 if (!pinfoattr)
7801 goto nla_put_failure; 8129 goto nla_put_failure;
7802 8130
7803 NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets); 8131 if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets))
8132 goto nla_put_failure;
7804 8133
7805 nla_nest_end(msg, pinfoattr); 8134 nla_nest_end(msg, pinfoattr);
7806 8135
@@ -7834,12 +8163,12 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
7834 return; 8163 return;
7835 } 8164 }
7836 8165
7837 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 8166 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7838 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 8167 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
7839 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 8168 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
7840 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 8169 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
7841 if (acked) 8170 (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
7842 NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); 8171 goto nla_put_failure;
7843 8172
7844 err = genlmsg_end(msg, hdr); 8173 err = genlmsg_end(msg, hdr);
7845 if (err < 0) { 8174 if (err < 0) {
@@ -7859,7 +8188,7 @@ EXPORT_SYMBOL(cfg80211_probe_status);
7859 8188
7860void cfg80211_report_obss_beacon(struct wiphy *wiphy, 8189void cfg80211_report_obss_beacon(struct wiphy *wiphy,
7861 const u8 *frame, size_t len, 8190 const u8 *frame, size_t len,
7862 int freq, gfp_t gfp) 8191 int freq, int sig_dbm, gfp_t gfp)
7863{ 8192{
7864 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 8193 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
7865 struct sk_buff *msg; 8194 struct sk_buff *msg;
@@ -7879,10 +8208,13 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
7879 return; 8208 return;
7880 } 8209 }
7881 8210
7882 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 8211 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7883 if (freq) 8212 (freq &&
7884 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); 8213 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
7885 NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame); 8214 (sig_dbm &&
8215 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
8216 nla_put(msg, NL80211_ATTR_FRAME, len, frame))
8217 goto nla_put_failure;
7886 8218
7887 genlmsg_end(msg, hdr); 8219 genlmsg_end(msg, hdr);
7888 8220
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 12bf4d185abe..01a1122c3b33 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -92,7 +92,8 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
92 gfp_t gfp); 92 gfp_t gfp);
93 93
94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
95 struct net_device *netdev, u32 nlpid, int freq, 95 struct net_device *netdev, u32 nlpid,
96 int freq, int sig_dbm,
96 const u8 *buf, size_t len, gfp_t gfp); 97 const u8 *buf, size_t len, gfp_t gfp);
97void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, 98void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
98 struct net_device *netdev, u64 cookie, 99 struct net_device *netdev, u64 cookie,
@@ -117,6 +118,10 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
117 struct net_device *netdev, int index, 118 struct net_device *netdev, int index,
118 const u8 *bssid, bool preauth, gfp_t gfp); 119 const u8 *bssid, bool preauth, gfp_t gfp);
119 120
121void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
122 struct net_device *dev, int freq,
123 enum nl80211_channel_type type, gfp_t gfp);
124
120bool nl80211_unexpected_frame(struct net_device *dev, 125bool nl80211_unexpected_frame(struct net_device *dev,
121 const u8 *addr, gfp_t gfp); 126 const u8 *addr, gfp_t gfp);
122bool nl80211_unexpected_4addr_frame(struct net_device *dev, 127bool nl80211_unexpected_4addr_frame(struct net_device *dev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f65feaad155f..15f347477a99 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -388,7 +388,15 @@ static void reg_regdb_query(const char *alpha2)
388 388
389 schedule_work(&reg_regdb_work); 389 schedule_work(&reg_regdb_work);
390} 390}
391
392/* Feel free to add any other sanity checks here */
393static void reg_regdb_size_check(void)
394{
395 /* We should ideally BUILD_BUG_ON() but then random builds would fail */
396 WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
397}
391#else 398#else
399static inline void reg_regdb_size_check(void) {}
392static inline void reg_regdb_query(const char *alpha2) {} 400static inline void reg_regdb_query(const char *alpha2) {}
393#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ 401#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
394 402
@@ -882,23 +890,8 @@ static void handle_channel(struct wiphy *wiphy,
882 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 890 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
883 chan->max_antenna_gain = min(chan->orig_mag, 891 chan->max_antenna_gain = min(chan->orig_mag,
884 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 892 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
885 if (chan->orig_mpwr) { 893 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
886 /* 894 chan->max_power = min(chan->max_power, chan->max_reg_power);
887 * Devices that have their own custom regulatory domain
888 * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
889 * passed country IE power settings.
890 */
891 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
892 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
893 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
894 chan->max_power =
895 MBM_TO_DBM(power_rule->max_eirp);
896 } else {
897 chan->max_power = min(chan->orig_mpwr,
898 (int) MBM_TO_DBM(power_rule->max_eirp));
899 }
900 } else
901 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
902} 895}
903 896
904static void handle_band(struct wiphy *wiphy, 897static void handle_band(struct wiphy *wiphy,
@@ -2337,6 +2330,8 @@ int __init regulatory_init(void)
2337 spin_lock_init(&reg_requests_lock); 2330 spin_lock_init(&reg_requests_lock);
2338 spin_lock_init(&reg_pending_beacons_lock); 2331 spin_lock_init(&reg_pending_beacons_lock);
2339 2332
2333 reg_regdb_size_check();
2334
2340 cfg80211_regdomain = cfg80211_world_regdom; 2335 cfg80211_regdomain = cfg80211_world_regdom;
2341 2336
2342 user_alpha2[0] = '9'; 2337 user_alpha2[0] = '9';
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 31119e32e092..af2b1caa37fa 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -18,7 +18,7 @@
18#include "nl80211.h" 18#include "nl80211.h"
19#include "wext-compat.h" 19#include "wext-compat.h"
20 20
21#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) 21#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
22 22
23void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) 23void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
24{ 24{
@@ -281,7 +281,7 @@ static bool is_bss(struct cfg80211_bss *a,
281{ 281{
282 const u8 *ssidie; 282 const u8 *ssidie;
283 283
284 if (bssid && compare_ether_addr(a->bssid, bssid)) 284 if (bssid && !ether_addr_equal(a->bssid, bssid))
285 return false; 285 return false;
286 286
287 if (!ssid) 287 if (!ssid)
@@ -378,7 +378,11 @@ static int cmp_bss_core(struct cfg80211_bss *a,
378 b->len_information_elements); 378 b->len_information_elements);
379 } 379 }
380 380
381 return memcmp(a->bssid, b->bssid, ETH_ALEN); 381 /*
382 * we can't use compare_ether_addr here since we need a < > operator.
383 * The binary return value of compare_ether_addr isn't enough
384 */
385 return memcmp(a->bssid, b->bssid, sizeof(a->bssid));
382} 386}
383 387
384static int cmp_bss(struct cfg80211_bss *a, 388static int cmp_bss(struct cfg80211_bss *a,
@@ -734,9 +738,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
734struct cfg80211_bss* 738struct cfg80211_bss*
735cfg80211_inform_bss(struct wiphy *wiphy, 739cfg80211_inform_bss(struct wiphy *wiphy,
736 struct ieee80211_channel *channel, 740 struct ieee80211_channel *channel,
737 const u8 *bssid, 741 const u8 *bssid, u64 tsf, u16 capability,
738 u64 timestamp, u16 capability, u16 beacon_interval, 742 u16 beacon_interval, const u8 *ie, size_t ielen,
739 const u8 *ie, size_t ielen,
740 s32 signal, gfp_t gfp) 743 s32 signal, gfp_t gfp)
741{ 744{
742 struct cfg80211_internal_bss *res; 745 struct cfg80211_internal_bss *res;
@@ -758,7 +761,7 @@ cfg80211_inform_bss(struct wiphy *wiphy,
758 memcpy(res->pub.bssid, bssid, ETH_ALEN); 761 memcpy(res->pub.bssid, bssid, ETH_ALEN);
759 res->pub.channel = channel; 762 res->pub.channel = channel;
760 res->pub.signal = signal; 763 res->pub.signal = signal;
761 res->pub.tsf = timestamp; 764 res->pub.tsf = tsf;
762 res->pub.beacon_interval = beacon_interval; 765 res->pub.beacon_interval = beacon_interval;
763 res->pub.capability = capability; 766 res->pub.capability = capability;
764 /* 767 /*
@@ -861,6 +864,18 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
861} 864}
862EXPORT_SYMBOL(cfg80211_inform_bss_frame); 865EXPORT_SYMBOL(cfg80211_inform_bss_frame);
863 866
867void cfg80211_ref_bss(struct cfg80211_bss *pub)
868{
869 struct cfg80211_internal_bss *bss;
870
871 if (!pub)
872 return;
873
874 bss = container_of(pub, struct cfg80211_internal_bss, pub);
875 kref_get(&bss->ref);
876}
877EXPORT_SYMBOL(cfg80211_ref_bss);
878
864void cfg80211_put_bss(struct cfg80211_bss *pub) 879void cfg80211_put_bss(struct cfg80211_bss *pub)
865{ 880{
866 struct cfg80211_internal_bss *bss; 881 struct cfg80211_internal_bss *bss;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 7b9ecaed96be..f7e937ff8978 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -179,7 +179,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
179 params->ssid, params->ssid_len, 179 params->ssid, params->ssid_len,
180 NULL, 0, 180 NULL, 0,
181 params->key, params->key_len, 181 params->key, params->key_len,
182 params->key_idx, false); 182 params->key_idx);
183 case CFG80211_CONN_ASSOCIATE_NEXT: 183 case CFG80211_CONN_ASSOCIATE_NEXT:
184 BUG_ON(!rdev->ops->assoc); 184 BUG_ON(!rdev->ops->assoc);
185 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 185 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -477,6 +477,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
477 kfree(wdev->connect_keys); 477 kfree(wdev->connect_keys);
478 wdev->connect_keys = NULL; 478 wdev->connect_keys = NULL;
479 wdev->ssid_len = 0; 479 wdev->ssid_len = 0;
480 cfg80211_put_bss(bss);
480 return; 481 return;
481 } 482 }
482 483
@@ -701,31 +702,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
701 wdev->ssid_len = 0; 702 wdev->ssid_len = 0;
702 703
703 if (wdev->conn) { 704 if (wdev->conn) {
704 const u8 *bssid;
705 int ret;
706
707 kfree(wdev->conn->ie); 705 kfree(wdev->conn->ie);
708 wdev->conn->ie = NULL; 706 wdev->conn->ie = NULL;
709 kfree(wdev->conn); 707 kfree(wdev->conn);
710 wdev->conn = NULL; 708 wdev->conn = NULL;
711
712 /*
713 * If this disconnect was due to a disassoc, we
714 * we might still have an auth BSS around. For
715 * the userspace SME that's currently expected,
716 * but for the kernel SME (nl80211 CONNECT or
717 * wireless extensions) we want to clear up all
718 * state.
719 */
720 for (i = 0; i < MAX_AUTH_BSSES; i++) {
721 if (!wdev->auth_bsses[i])
722 continue;
723 bssid = wdev->auth_bsses[i]->pub.bssid;
724 ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
725 WLAN_REASON_DEAUTH_LEAVING,
726 false);
727 WARN(ret, "deauth failed: %d\n", ret);
728 }
729 } 709 }
730 710
731 nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); 711 nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
@@ -1012,7 +992,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
1012 return err; 992 return err;
1013} 993}
1014 994
1015void cfg80211_sme_disassoc(struct net_device *dev, int idx) 995void cfg80211_sme_disassoc(struct net_device *dev,
996 struct cfg80211_internal_bss *bss)
1016{ 997{
1017 struct wireless_dev *wdev = dev->ieee80211_ptr; 998 struct wireless_dev *wdev = dev->ieee80211_ptr;
1018 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 999 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1031,16 +1012,8 @@ void cfg80211_sme_disassoc(struct net_device *dev, int idx)
1031 * want it any more so deauthenticate too. 1012 * want it any more so deauthenticate too.
1032 */ 1013 */
1033 1014
1034 if (!wdev->auth_bsses[idx]) 1015 memcpy(bssid, bss->pub.bssid, ETH_ALEN);
1035 return;
1036 1016
1037 memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN); 1017 __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
1038 if (__cfg80211_mlme_deauth(rdev, dev, bssid, 1018 WLAN_REASON_DEAUTH_LEAVING, false);
1039 NULL, 0, WLAN_REASON_DEAUTH_LEAVING,
1040 false)) {
1041 /* whatever -- assume gone anyway */
1042 cfg80211_unhold_bss(wdev->auth_bsses[idx]);
1043 cfg80211_put_bss(&wdev->auth_bsses[idx]->pub);
1044 wdev->auth_bsses[idx] = NULL;
1045 }
1046} 1019}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 9aa9db6c8141..55d99466babb 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -370,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
370 iftype != NL80211_IFTYPE_P2P_CLIENT && 370 iftype != NL80211_IFTYPE_P2P_CLIENT &&
371 iftype != NL80211_IFTYPE_MESH_POINT) || 371 iftype != NL80211_IFTYPE_MESH_POINT) ||
372 (is_multicast_ether_addr(dst) && 372 (is_multicast_ether_addr(dst) &&
373 !compare_ether_addr(src, addr))) 373 ether_addr_equal(src, addr)))
374 return -1; 374 return -1;
375 if (iftype == NL80211_IFTYPE_MESH_POINT) { 375 if (iftype == NL80211_IFTYPE_MESH_POINT) {
376 struct ieee80211s_hdr *meshdr = 376 struct ieee80211s_hdr *meshdr =
@@ -398,9 +398,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
398 payload = skb->data + hdrlen; 398 payload = skb->data + hdrlen;
399 ethertype = (payload[6] << 8) | payload[7]; 399 ethertype = (payload[6] << 8) | payload[7];
400 400
401 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && 401 if (likely((ether_addr_equal(payload, rfc1042_header) &&
402 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || 402 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
403 compare_ether_addr(payload, bridge_tunnel_header) == 0)) { 403 ether_addr_equal(payload, bridge_tunnel_header))) {
404 /* remove RFC1042 or Bridge-Tunnel encapsulation and 404 /* remove RFC1042 or Bridge-Tunnel encapsulation and
405 * replace EtherType */ 405 * replace EtherType */
406 skb_pull(skb, hdrlen + 6); 406 skb_pull(skb, hdrlen + 6);
@@ -609,10 +609,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
609 payload = frame->data; 609 payload = frame->data;
610 ethertype = (payload[6] << 8) | payload[7]; 610 ethertype = (payload[6] << 8) | payload[7];
611 611
612 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && 612 if (likely((ether_addr_equal(payload, rfc1042_header) &&
613 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || 613 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
614 compare_ether_addr(payload, 614 ether_addr_equal(payload, bridge_tunnel_header))) {
615 bridge_tunnel_header) == 0)) {
616 /* remove RFC1042 or Bridge-Tunnel 615 /* remove RFC1042 or Bridge-Tunnel
617 * encapsulation and replace EtherType */ 616 * encapsulation and replace EtherType */
618 skb_pull(frame, 6); 617 skb_pull(frame, 6);
@@ -880,7 +879,7 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
880 return rate->legacy; 879 return rate->legacy;
881 880
882 /* the formula below does only work for MCS values smaller than 32 */ 881 /* the formula below does only work for MCS values smaller than 32 */
883 if (rate->mcs >= 32) 882 if (WARN_ON_ONCE(rate->mcs >= 32))
884 return 0; 883 return 0;
885 884
886 modulation = rate->mcs & 7; 885 modulation = rate->mcs & 7;
@@ -904,6 +903,7 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
904 /* do NOT round down here */ 903 /* do NOT round down here */
905 return (bitrate + 50000) / 100000; 904 return (bitrate + 50000) / 100000;
906} 905}
906EXPORT_SYMBOL(cfg80211_calculate_bitrate);
907 907
908int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, 908int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
909 u32 beacon_int) 909 u32 beacon_int)
@@ -945,13 +945,6 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
945 if (rdev->wiphy.software_iftypes & BIT(iftype)) 945 if (rdev->wiphy.software_iftypes & BIT(iftype))
946 return 0; 946 return 0;
947 947
948 /*
949 * Drivers will gradually all set this flag, until all
950 * have it we only enforce for those that set it.
951 */
952 if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS))
953 return 0;
954
955 memset(num, 0, sizeof(num)); 948 memset(num, 0, sizeof(num));
956 949
957 num[iftype] = 1; 950 num[iftype] = 1;
@@ -971,6 +964,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
971 } 964 }
972 mutex_unlock(&rdev->devlist_mtx); 965 mutex_unlock(&rdev->devlist_mtx);
973 966
967 if (total == 1)
968 return 0;
969
974 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 970 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
975 const struct ieee80211_iface_combination *c; 971 const struct ieee80211_iface_combination *c;
976 struct ieee80211_iface_limit *limits; 972 struct ieee80211_iface_limit *limits;
@@ -988,7 +984,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
988 if (rdev->wiphy.software_iftypes & BIT(iftype)) 984 if (rdev->wiphy.software_iftypes & BIT(iftype))
989 continue; 985 continue;
990 for (j = 0; j < c->n_limits; j++) { 986 for (j = 0; j < c->n_limits; j++) {
991 if (!(limits[j].types & iftype)) 987 if (!(limits[j].types & BIT(iftype)))
992 continue; 988 continue;
993 if (limits[j].max < num[iftype]) 989 if (limits[j].max < num[iftype])
994 goto cont; 990 goto cont;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3c24eb97e9d7..6a6181a673ca 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -821,6 +821,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
821 struct wireless_dev *wdev = dev->ieee80211_ptr; 821 struct wireless_dev *wdev = dev->ieee80211_ptr;
822 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 822 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
823 struct ieee80211_channel *chan; 823 struct ieee80211_channel *chan;
824 enum nl80211_channel_type channel_type;
824 825
825 switch (wdev->iftype) { 826 switch (wdev->iftype) {
826 case NL80211_IFTYPE_STATION: 827 case NL80211_IFTYPE_STATION:
@@ -831,7 +832,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
831 if (!rdev->ops->get_channel) 832 if (!rdev->ops->get_channel)
832 return -EINVAL; 833 return -EINVAL;
833 834
834 chan = rdev->ops->get_channel(wdev->wiphy); 835 chan = rdev->ops->get_channel(wdev->wiphy, &channel_type);
835 if (!chan) 836 if (!chan)
836 return -EINVAL; 837 return -EINVAL;
837 freq->m = chan->center_freq; 838 freq->m = chan->center_freq;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 0af7f54e4f61..b0eb7aa49b60 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -256,7 +256,7 @@ static const struct iw_ioctl_description standard_ioctl[] = {
256 .max_tokens = sizeof(struct iw_pmksa), 256 .max_tokens = sizeof(struct iw_pmksa),
257 }, 257 },
258}; 258};
259static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl); 259static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
260 260
261/* 261/*
262 * Meta-data about all the additional standard Wireless Extension events 262 * Meta-data about all the additional standard Wireless Extension events
@@ -306,7 +306,7 @@ static const struct iw_ioctl_description standard_event[] = {
306 .max_tokens = sizeof(struct iw_pmkid_cand), 306 .max_tokens = sizeof(struct iw_pmkid_cand),
307 }, 307 },
308}; 308};
309static const unsigned standard_event_num = ARRAY_SIZE(standard_event); 309static const unsigned int standard_event_num = ARRAY_SIZE(standard_event);
310 310
311/* Size (in bytes) of various events */ 311/* Size (in bytes) of various events */
312static const int event_type_size[] = { 312static const int event_type_size[] = {
@@ -402,7 +402,8 @@ static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev,
402 r->ifi_flags = dev_get_flags(dev); 402 r->ifi_flags = dev_get_flags(dev);
403 r->ifi_change = 0; /* Wireless changes don't affect those flags */ 403 r->ifi_change = 0; /* Wireless changes don't affect those flags */
404 404
405 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 405 if (nla_put_string(skb, IFLA_IFNAME, dev->name))
406 goto nla_put_failure;
406 407
407 return nlh; 408 return nlh;
408 nla_put_failure: 409 nla_put_failure:
@@ -428,7 +429,7 @@ void wireless_send_event(struct net_device * dev,
428 int hdr_len; /* Size of the event header */ 429 int hdr_len; /* Size of the event header */
429 int wrqu_off = 0; /* Offset in wrqu */ 430 int wrqu_off = 0; /* Offset in wrqu */
430 /* Don't "optimise" the following variable, it will crash */ 431 /* Don't "optimise" the following variable, it will crash */
431 unsigned cmd_index; /* *MUST* be unsigned */ 432 unsigned int cmd_index; /* *MUST* be unsigned */
432 struct sk_buff *skb; 433 struct sk_buff *skb;
433 struct nlmsghdr *nlh; 434 struct nlmsghdr *nlh;
434 struct nlattr *nla; 435 struct nlattr *nla;
@@ -780,8 +781,10 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
780 if (cmd == SIOCSIWENCODEEXT) { 781 if (cmd == SIOCSIWENCODEEXT) {
781 struct iw_encode_ext *ee = (void *) extra; 782 struct iw_encode_ext *ee = (void *) extra;
782 783
783 if (iwp->length < sizeof(*ee) + ee->key_len) 784 if (iwp->length < sizeof(*ee) + ee->key_len) {
784 return -EFAULT; 785 err = -EFAULT;
786 goto out;
787 }
785 } 788 }
786 } 789 }
787 790
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 326750b99151..7decbd357d51 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -30,6 +30,9 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
30 wdev->wext.connect.ie = wdev->wext.ie; 30 wdev->wext.connect.ie = wdev->wext.ie;
31 wdev->wext.connect.ie_len = wdev->wext.ie_len; 31 wdev->wext.connect.ie_len = wdev->wext.ie_len;
32 32
33 /* Use default background scan period */
34 wdev->wext.connect.bg_scan_period = -1;
35
33 if (wdev->wext.keys) { 36 if (wdev->wext.keys) {
34 wdev->wext.keys->def = wdev->wext.default_key; 37 wdev->wext.keys->def = wdev->wext.default_key;
35 wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; 38 wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
@@ -273,7 +276,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
273 276
274 /* fixed already - and no change */ 277 /* fixed already - and no change */
275 if (wdev->wext.connect.bssid && bssid && 278 if (wdev->wext.connect.bssid && bssid &&
276 compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0) 279 ether_addr_equal(bssid, wdev->wext.connect.bssid))
277 goto out; 280 goto out;
278 281
279 err = __cfg80211_disconnect(rdev, dev, 282 err = __cfg80211_disconnect(rdev, dev,
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
index 5d643a548feb..33bef22e44e9 100644
--- a/net/wireless/wext-spy.c
+++ b/net/wireless/wext-spy.c
@@ -203,7 +203,7 @@ void wireless_spy_update(struct net_device * dev,
203 203
204 /* Update all records that match */ 204 /* Update all records that match */
205 for (i = 0; i < spydata->spy_number; i++) 205 for (i = 0; i < spydata->spy_number; i++)
206 if (!compare_ether_addr(address, spydata->spy_address[i])) { 206 if (ether_addr_equal(address, spydata->spy_address[i])) {
207 memcpy(&(spydata->spy_stat[i]), wstats, 207 memcpy(&(spydata->spy_stat[i]), wstats,
208 sizeof(struct iw_quality)); 208 sizeof(struct iw_quality));
209 match = i; 209 match = i;
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index d2efd29f434e..43239527a205 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,18 +73,12 @@ static struct ctl_table x25_table[] = {
73 { 0, }, 73 { 0, },
74}; 74};
75 75
76static struct ctl_path x25_path[] = {
77 { .procname = "net", },
78 { .procname = "x25", },
79 { }
80};
81
82void __init x25_register_sysctl(void) 76void __init x25_register_sysctl(void)
83{ 77{
84 x25_table_header = register_sysctl_paths(x25_path, x25_table); 78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
85} 79}
86 80
87void x25_unregister_sysctl(void) 81void x25_unregister_sysctl(void)
88{ 82{
89 unregister_sysctl_table(x25_table_header); 83 unregister_net_sysctl_table(x25_table_header);
90} 84}
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index f0ce862d1f46..a8a236338e61 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
58 if (!sock_owned_by_user(sk)) { 58 if (!sock_owned_by_user(sk)) {
59 queued = x25_process_rx_frame(sk, skb); 59 queued = x25_process_rx_frame(sk, skb);
60 } else { 60 } else {
61 queued = !sk_add_backlog(sk, skb); 61 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
62 } 62 }
63 bh_unlock_sock(sk); 63 bh_unlock_sock(sk);
64 sock_put(sk); 64 sock_put(sk);
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 36384a1fa9f2..66c638730c7a 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -231,7 +231,7 @@ int x25_create_facilities(unsigned char *buffer,
231 } 231 }
232 232
233 if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) { 233 if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
234 unsigned bytecount = (dte_facs->calling_len + 1) >> 1; 234 unsigned int bytecount = (dte_facs->calling_len + 1) >> 1;
235 *p++ = X25_FAC_CALLING_AE; 235 *p++ = X25_FAC_CALLING_AE;
236 *p++ = 1 + bytecount; 236 *p++ = 1 + bytecount;
237 *p++ = dte_facs->calling_len; 237 *p++ = dte_facs->calling_len;
@@ -240,7 +240,7 @@ int x25_create_facilities(unsigned char *buffer,
240 } 240 }
241 241
242 if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) { 242 if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) {
243 unsigned bytecount = (dte_facs->called_len % 2) ? 243 unsigned int bytecount = (dte_facs->called_len % 2) ?
244 dte_facs->called_len / 2 + 1 : 244 dte_facs->called_len / 2 + 1 :
245 dte_facs->called_len / 2; 245 dte_facs->called_len / 2;
246 *p++ = X25_FAC_CALLED_AE; 246 *p++ = X25_FAC_CALLED_AE;
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 6d081674515f..ce90b8d92365 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -3,12 +3,17 @@
3# 3#
4config XFRM 4config XFRM
5 bool 5 bool
6 select CRYPTO
7 depends on NET 6 depends on NET
8 7
8config XFRM_ALGO
9 tristate
10 select XFRM
11 select CRYPTO
12
9config XFRM_USER 13config XFRM_USER
10 tristate "Transformation user configuration interface" 14 tristate "Transformation user configuration interface"
11 depends on INET && XFRM 15 depends on INET
16 select XFRM_ALGO
12 ---help--- 17 ---help---
13 Support for Transformation(XFRM) user configuration interface 18 Support for Transformation(XFRM) user configuration interface
14 like IPsec used by native Linux tools. 19 like IPsec used by native Linux tools.
@@ -48,13 +53,13 @@ config XFRM_STATISTICS
48 53
49config XFRM_IPCOMP 54config XFRM_IPCOMP
50 tristate 55 tristate
51 select XFRM 56 select XFRM_ALGO
52 select CRYPTO 57 select CRYPTO
53 select CRYPTO_DEFLATE 58 select CRYPTO_DEFLATE
54 59
55config NET_KEY 60config NET_KEY
56 tristate "PF_KEY sockets" 61 tristate "PF_KEY sockets"
57 select XFRM 62 select XFRM_ALGO
58 ---help--- 63 ---help---
59 PF_KEYv2 socket family, compatible to KAME ones. 64 PF_KEYv2 socket family, compatible to KAME ones.
60 They are required if you are going to use IPsec tools ported 65 They are required if you are going to use IPsec tools ported
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index aa429eefe919..c0e961983f17 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -3,8 +3,9 @@
3# 3#
4 4
5obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ 5obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
6 xfrm_input.o xfrm_output.o xfrm_algo.o \ 6 xfrm_input.o xfrm_output.o \
7 xfrm_sysctl.o xfrm_replay.o 7 xfrm_sysctl.o xfrm_replay.o
8obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o 8obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
9obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
9obj-$(CONFIG_XFRM_USER) += xfrm_user.o 10obj-$(CONFIG_XFRM_USER) += xfrm_user.o
10obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o 11obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 791ab2e77f3f..4ce2d93162c1 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -15,9 +15,6 @@
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/scatterlist.h> 16#include <linux/scatterlist.h>
17#include <net/xfrm.h> 17#include <net/xfrm.h>
18#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
19#include <net/ah.h>
20#endif
21#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) 18#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
22#include <net/esp.h> 19#include <net/esp.h>
23#endif 20#endif
@@ -752,3 +749,5 @@ void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
752} 749}
753EXPORT_SYMBOL_GPL(pskb_put); 750EXPORT_SYMBOL_GPL(pskb_put);
754#endif 751#endif
752
753MODULE_LICENSE("GPL");
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 7199d78b2aa1..716502ada53b 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -45,10 +45,10 @@ static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr,
45 return (h ^ (h >> 16)) & hmask; 45 return (h ^ (h >> 16)) & hmask;
46} 46}
47 47
48static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr, 48static inline unsigned int __xfrm_src_hash(const xfrm_address_t *daddr,
49 const xfrm_address_t *saddr, 49 const xfrm_address_t *saddr,
50 unsigned short family, 50 unsigned short family,
51 unsigned int hmask) 51 unsigned int hmask)
52{ 52{
53 unsigned int h = family; 53 unsigned int h = family;
54 switch (family) { 54 switch (family) {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 47bacd8c0250..95a338c89f99 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -21,7 +21,7 @@
21 21
22static int xfrm_output2(struct sk_buff *skb); 22static int xfrm_output2(struct sk_buff *skb);
23 23
24static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) 24static int xfrm_skb_check_space(struct sk_buff *skb)
25{ 25{
26 struct dst_entry *dst = skb_dst(skb); 26 struct dst_entry *dst = skb_dst(skb);
27 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) 27 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
@@ -48,7 +48,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
48 goto resume; 48 goto resume;
49 49
50 do { 50 do {
51 err = xfrm_state_check_space(x, skb); 51 err = xfrm_skb_check_space(skb);
52 if (err) { 52 if (err) {
53 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 53 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
54 goto error_nolock; 54 goto error_nolock;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7661576b6f45..ccfbd328a69d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -26,6 +26,7 @@
26#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/audit.h> 27#include <linux/audit.h>
28#include <net/dst.h> 28#include <net/dst.h>
29#include <net/flow.h>
29#include <net/xfrm.h> 30#include <net/xfrm.h>
30#include <net/ip.h> 31#include <net/ip.h>
31#ifdef CONFIG_XFRM_STATISTICS 32#ifdef CONFIG_XFRM_STATISTICS
@@ -56,7 +57,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *xdst);
56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 57static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
57 int dir); 58 int dir);
58 59
59static inline int 60static inline bool
60__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 61__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
61{ 62{
62 const struct flowi4 *fl4 = &fl->u.ip4; 63 const struct flowi4 *fl4 = &fl->u.ip4;
@@ -69,7 +70,7 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
69 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 70 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
70} 71}
71 72
72static inline int 73static inline bool
73__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 74__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
74{ 75{
75 const struct flowi6 *fl6 = &fl->u.ip6; 76 const struct flowi6 *fl6 = &fl->u.ip6;
@@ -82,8 +83,8 @@ __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
82 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 83 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
83} 84}
84 85
85int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 86bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
86 unsigned short family) 87 unsigned short family)
87{ 88{
88 switch (family) { 89 switch (family) {
89 case AF_INET: 90 case AF_INET:
@@ -91,7 +92,7 @@ int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
91 case AF_INET6: 92 case AF_INET6:
92 return __xfrm6_selector_match(sel, fl); 93 return __xfrm6_selector_match(sel, fl);
93 } 94 }
94 return 0; 95 return false;
95} 96}
96 97
97static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 98static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
@@ -877,7 +878,8 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
877 u8 type, u16 family, int dir) 878 u8 type, u16 family, int dir)
878{ 879{
879 const struct xfrm_selector *sel = &pol->selector; 880 const struct xfrm_selector *sel = &pol->selector;
880 int match, ret = -ESRCH; 881 int ret = -ESRCH;
882 bool match;
881 883
882 if (pol->family != family || 884 if (pol->family != family ||
883 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 885 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
@@ -1006,8 +1008,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1006 1008
1007 read_lock_bh(&xfrm_policy_lock); 1009 read_lock_bh(&xfrm_policy_lock);
1008 if ((pol = sk->sk_policy[dir]) != NULL) { 1010 if ((pol = sk->sk_policy[dir]) != NULL) {
1009 int match = xfrm_selector_match(&pol->selector, fl, 1011 bool match = xfrm_selector_match(&pol->selector, fl,
1010 sk->sk_family); 1012 sk->sk_family);
1011 int err = 0; 1013 int err = 0;
1012 1014
1013 if (match) { 1015 if (match) {
@@ -1919,6 +1921,9 @@ no_transform:
1919 } 1921 }
1920ok: 1922ok:
1921 xfrm_pols_put(pols, drop_pols); 1923 xfrm_pols_put(pols, drop_pols);
1924 if (dst && dst->xfrm &&
1925 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
1926 dst->flags |= DST_XFRM_TUNNEL;
1922 return dst; 1927 return dst;
1923 1928
1924nopol: 1929nopol:
@@ -2767,8 +2772,8 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2767#endif 2772#endif
2768 2773
2769#ifdef CONFIG_XFRM_MIGRATE 2774#ifdef CONFIG_XFRM_MIGRATE
2770static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 2775static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
2771 const struct xfrm_selector *sel_tgt) 2776 const struct xfrm_selector *sel_tgt)
2772{ 2777{
2773 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2778 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2774 if (sel_tgt->family == sel_cmp->family && 2779 if (sel_tgt->family == sel_cmp->family &&
@@ -2778,14 +2783,14 @@ static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
2778 sel_cmp->family) == 0 && 2783 sel_cmp->family) == 0 &&
2779 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2784 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2780 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 2785 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2781 return 1; 2786 return true;
2782 } 2787 }
2783 } else { 2788 } else {
2784 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 2789 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2785 return 1; 2790 return true;
2786 } 2791 }
2787 } 2792 }
2788 return 0; 2793 return false;
2789} 2794}
2790 2795
2791static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, 2796static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 39e02c54ed26..2f6d11d04a2b 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -167,7 +167,7 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
167 } 167 }
168 168
169 if (xfrm_aevent_is_on(xs_net(x))) 169 if (xfrm_aevent_is_on(xs_net(x)))
170 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 170 x->repl->notify(x, XFRM_REPLAY_UPDATE);
171} 171}
172 172
173static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) 173static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
@@ -279,7 +279,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
279 replay_esn->bmp[nr] |= (1U << bitnr); 279 replay_esn->bmp[nr] |= (1U << bitnr);
280 280
281 if (xfrm_aevent_is_on(xs_net(x))) 281 if (xfrm_aevent_is_on(xs_net(x)))
282 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 282 x->repl->notify(x, XFRM_REPLAY_UPDATE);
283} 283}
284 284
285static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) 285static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
@@ -473,7 +473,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
473 replay_esn->bmp[nr] |= (1U << bitnr); 473 replay_esn->bmp[nr] |= (1U << bitnr);
474 474
475 if (xfrm_aevent_is_on(xs_net(x))) 475 if (xfrm_aevent_is_on(xs_net(x)))
476 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 476 x->repl->notify(x, XFRM_REPLAY_UPDATE);
477} 477}
478 478
479static struct xfrm_replay xfrm_replay_legacy = { 479static struct xfrm_replay xfrm_replay_legacy = {
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05640bc9594b..380976f74c4c 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -54,7 +54,7 @@ int __net_init xfrm_sysctl_init(struct net *net)
54 table[2].data = &net->xfrm.sysctl_larval_drop; 54 table[2].data = &net->xfrm.sysctl_larval_drop;
55 table[3].data = &net->xfrm.sysctl_acq_expires; 55 table[3].data = &net->xfrm.sysctl_acq_expires;
56 56
57 net->xfrm.sysctl_hdr = register_net_sysctl_table(net, net_core_path, table); 57 net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table);
58 if (!net->xfrm.sysctl_hdr) 58 if (!net->xfrm.sysctl_hdr)
59 goto out_register; 59 goto out_register;
60 return 0; 60 return 0;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 66b84fbf2746..44293b3fd6a1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -756,40 +756,50 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
756{ 756{
757 copy_to_user_state(x, p); 757 copy_to_user_state(x, p);
758 758
759 if (x->coaddr) 759 if (x->coaddr &&
760 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 760 nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
761 goto nla_put_failure;
761 762
762 if (x->lastused) 763 if (x->lastused &&
763 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); 764 nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
765 goto nla_put_failure;
764 766
765 if (x->aead) 767 if (x->aead &&
766 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); 768 nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
767 if (x->aalg) { 769 goto nla_put_failure;
768 if (copy_to_user_auth(x->aalg, skb))
769 goto nla_put_failure;
770 770
771 NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC, 771 if (x->aalg &&
772 xfrm_alg_auth_len(x->aalg), x->aalg); 772 (copy_to_user_auth(x->aalg, skb) ||
773 } 773 nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
774 if (x->ealg) 774 xfrm_alg_auth_len(x->aalg), x->aalg)))
775 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); 775 goto nla_put_failure;
776 if (x->calg)
777 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
778 776
779 if (x->encap) 777 if (x->ealg &&
780 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 778 nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
779 goto nla_put_failure;
781 780
782 if (x->tfcpad) 781 if (x->calg &&
783 NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad); 782 nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
783 goto nla_put_failure;
784
785 if (x->encap &&
786 nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
787 goto nla_put_failure;
788
789 if (x->tfcpad &&
790 nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
791 goto nla_put_failure;
784 792
785 if (xfrm_mark_put(skb, &x->mark)) 793 if (xfrm_mark_put(skb, &x->mark))
786 goto nla_put_failure; 794 goto nla_put_failure;
787 795
788 if (x->replay_esn) 796 if (x->replay_esn &&
789 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, 797 nla_put(skb, XFRMA_REPLAY_ESN_VAL,
790 xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); 798 xfrm_replay_state_esn_len(x->replay_esn),
799 x->replay_esn))
800 goto nla_put_failure;
791 801
792 if (x->security && copy_sec_ctx(x->security, skb) < 0) 802 if (x->security && copy_sec_ctx(x->security, skb))
793 goto nla_put_failure; 803 goto nla_put_failure;
794 804
795 return 0; 805 return 0;
@@ -912,8 +922,9 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
912 sph.spdhcnt = si.spdhcnt; 922 sph.spdhcnt = si.spdhcnt;
913 sph.spdhmcnt = si.spdhmcnt; 923 sph.spdhmcnt = si.spdhmcnt;
914 924
915 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 925 if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) ||
916 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 926 nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph))
927 goto nla_put_failure;
917 928
918 return nlmsg_end(skb, nlh); 929 return nlmsg_end(skb, nlh);
919 930
@@ -967,8 +978,9 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
967 sh.sadhmcnt = si.sadhmcnt; 978 sh.sadhmcnt = si.sadhmcnt;
968 sh.sadhcnt = si.sadhcnt; 979 sh.sadhcnt = si.sadhcnt;
969 980
970 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt); 981 if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) ||
971 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 982 nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh))
983 goto nla_put_failure;
972 984
973 return nlmsg_end(skb, nlh); 985 return nlmsg_end(skb, nlh);
974 986
@@ -1690,21 +1702,27 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
1690 id->reqid = x->props.reqid; 1702 id->reqid = x->props.reqid;
1691 id->flags = c->data.aevent; 1703 id->flags = c->data.aevent;
1692 1704
1693 if (x->replay_esn) 1705 if (x->replay_esn) {
1694 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, 1706 if (nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1695 xfrm_replay_state_esn_len(x->replay_esn), 1707 xfrm_replay_state_esn_len(x->replay_esn),
1696 x->replay_esn); 1708 x->replay_esn))
1697 else 1709 goto nla_put_failure;
1698 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); 1710 } else {
1699 1711 if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1700 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); 1712 &x->replay))
1713 goto nla_put_failure;
1714 }
1715 if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft))
1716 goto nla_put_failure;
1701 1717
1702 if (id->flags & XFRM_AE_RTHR) 1718 if ((id->flags & XFRM_AE_RTHR) &&
1703 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 1719 nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff))
1720 goto nla_put_failure;
1704 1721
1705 if (id->flags & XFRM_AE_ETHR) 1722 if ((id->flags & XFRM_AE_ETHR) &&
1706 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, 1723 nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1707 x->replay_maxage * 10 / HZ); 1724 x->replay_maxage * 10 / HZ))
1725 goto nla_put_failure;
1708 1726
1709 if (xfrm_mark_put(skb, &x->mark)) 1727 if (xfrm_mark_put(skb, &x->mark))
1710 goto nla_put_failure; 1728 goto nla_put_failure;
@@ -2299,8 +2317,13 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2299 if (link->dump == NULL) 2317 if (link->dump == NULL)
2300 return -EINVAL; 2318 return -EINVAL;
2301 2319
2302 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, 2320 {
2303 link->dump, link->done, 0); 2321 struct netlink_dump_control c = {
2322 .dump = link->dump,
2323 .done = link->done,
2324 };
2325 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2326 }
2304 } 2327 }
2305 2328
2306 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 2329 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
@@ -2830,8 +2853,9 @@ static int build_report(struct sk_buff *skb, u8 proto,
2830 ur->proto = proto; 2853 ur->proto = proto;
2831 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2854 memcpy(&ur->sel, sel, sizeof(ur->sel));
2832 2855
2833 if (addr) 2856 if (addr &&
2834 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); 2857 nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr))
2858 goto nla_put_failure;
2835 2859
2836 return nlmsg_end(skb, nlh); 2860 return nlmsg_end(skb, nlh);
2837 2861