aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/Kconfig3
-rw-r--r--net/802/Makefile1
-rw-r--r--net/802/mrp.c895
-rw-r--r--net/8021q/Kconfig11
-rw-r--r--net/8021q/Makefile1
-rw-r--r--net/8021q/vlan.c42
-rw-r--r--net/8021q/vlan.h16
-rw-r--r--net/8021q/vlan_core.c25
-rw-r--r--net/8021q/vlan_dev.c20
-rw-r--r--net/8021q/vlan_mvrp.c72
-rw-r--r--net/8021q/vlan_netlink.c2
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/Kconfig2
-rw-r--r--net/9p/trans_virtio.c5
-rw-r--r--net/Kconfig9
-rw-r--r--net/Makefile2
-rw-r--r--net/atm/atm_sysfs.c40
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/ax25/af_ax25.c13
-rw-r--r--net/batman-adv/bat_algo.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c14
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bitarray.h2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c135
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h2
-rw-r--r--net/batman-adv/debugfs.c23
-rw-r--r--net/batman-adv/debugfs.h2
-rw-r--r--net/batman-adv/distributed-arp-table.c23
-rw-r--r--net/batman-adv/distributed-arp-table.h2
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c2
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/hard-interface.c26
-rw-r--r--net/batman-adv/hard-interface.h2
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h4
-rw-r--r--net/batman-adv/icmp_socket.c2
-rw-r--r--net/batman-adv/icmp_socket.h2
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/batman-adv/main.h12
-rw-r--r--net/batman-adv/originator.c28
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/packet.h2
-rw-r--r--net/batman-adv/ring_buffer.c2
-rw-r--r--net/batman-adv/ring_buffer.h2
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/batman-adv/routing.h2
-rw-r--r--net/batman-adv/send.c9
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c50
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/sysfs.c2
-rw-r--r--net/batman-adv/sysfs.h2
-rw-r--r--net/batman-adv/translation-table.c89
-rw-r--r--net/batman-adv/translation-table.h2
-rw-r--r--net/batman-adv/types.h571
-rw-r--r--net/batman-adv/unicast.c4
-rw-r--r--net/batman-adv/unicast.h2
-rw-r--r--net/batman-adv/vis.c40
-rw-r--r--net/batman-adv/vis.h2
-rw-r--r--net/bluetooth/a2mp.c42
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/amp.c25
-rw-r--r--net/bluetooth/bnep/core.c1
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_core.c40
-rw-r--r--net/bluetooth/hci_event.c66
-rw-r--r--net/bluetooth/hci_sysfs.c22
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c16
-rw-r--r--net/bluetooth/mgmt.c489
-rw-r--r--net/bluetooth/rfcomm/Kconfig1
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bluetooth/rfcomm/tty.c21
-rw-r--r--net/bluetooth/sco.c20
-rw-r--r--net/bluetooth/smp.c13
-rw-r--r--net/bridge/Kconfig14
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br_device.c19
-rw-r--r--net/bridge/br_fdb.c258
-rw-r--r--net/bridge/br_forward.c9
-rw-r--r--net/bridge/br_if.c22
-rw-r--r--net/bridge/br_input.c28
-rw-r--r--net/bridge/br_mdb.c25
-rw-r--r--net/bridge/br_multicast.c82
-rw-r--r--net/bridge/br_netlink.c259
-rw-r--r--net/bridge/br_notify.c2
-rw-r--r--net/bridge/br_private.h189
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c4
-rw-r--r--net/bridge/br_sysfs_br.c21
-rw-r--r--net/bridge/br_vlan.c415
-rw-r--r--net/bridge/netfilter/ebt_ulog.c6
-rw-r--r--net/bridge/netfilter/ebtables.c24
-rw-r--r--net/caif/cfcnfg.c2
-rw-r--r--net/can/Kconfig13
-rw-r--r--net/can/bcm.c14
-rw-r--r--net/can/gw.c72
-rw-r--r--net/can/proc.c2
-rw-r--r--net/can/raw.c8
-rw-r--r--net/ceph/Kconfig4
-rw-r--r--net/ceph/ceph_common.c3
-rw-r--r--net/ceph/messenger.c130
-rw-r--r--net/ceph/osd_client.c93
-rw-r--r--net/ceph/osdmap.c47
-rw-r--r--net/core/Makefile3
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c1742
-rw-r--r--net/core/dev_addr_lists.c74
-rw-r--r--net/core/dev_ioctl.c576
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/ethtool.c46
-rw-r--r--net/core/filter.c13
-rw-r--r--net/core/flow.c2
-rw-r--r--net/core/flow_dissector.c173
-rw-r--r--net/core/neighbour.c20
-rw-r--r--net/core/net-procfs.c412
-rw-r--r--net/core/net-sysfs.c184
-rw-r--r--net/core/net_namespace.c32
-rw-r--r--net/core/netpoll.c721
-rw-r--r--net/core/netprio_cgroup.c4
-rw-r--r--net/core/pktgen.c205
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/core/rtnetlink.c214
-rw-r--r--net/core/scm.c5
-rw-r--r--net/core/skbuff.c157
-rw-r--r--net/core/sock.c28
-rw-r--r--net/core/sysctl_net_core.c14
-rw-r--r--net/dccp/Kconfig4
-rw-r--r--net/dccp/ccids/Kconfig5
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/probe.c6
-rw-r--r--net/decnet/Kconfig4
-rw-r--r--net/decnet/af_decnet.c10
-rw-r--r--net/decnet/dn_dev.c4
-rw-r--r--net/decnet/dn_neigh.c7
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/decnet/dn_route.c7
-rw-r--r--net/decnet/netfilter/Kconfig2
-rw-r--r--net/dns_resolver/dns_key.c15
-rw-r--r--net/dsa/dsa.c6
-rw-r--r--net/dsa/slave.c14
-rw-r--r--net/ethernet/eth.c43
-rw-r--r--net/ieee802154/6lowpan.c94
-rw-r--r--net/ieee802154/Kconfig3
-rw-r--r--net/ieee802154/wpan-class.c5
-rw-r--r--net/ipv4/Kconfig11
-rw-r--r--net/ipv4/af_inet.c25
-rw-r--r--net/ipv4/ah4.c21
-rw-r--r--net/ipv4/arp.c35
-rw-r--r--net/ipv4/datagram.c25
-rw-r--r--net/ipv4/devinet.c217
-rw-r--r--net/ipv4/esp4.c12
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_trie.c18
-rw-r--r--net/ipv4/gre.c118
-rw-r--r--net/ipv4/igmp.c11
-rw-r--r--net/ipv4/inet_connection_sock.c61
-rw-r--r--net/ipv4/inet_fragment.c39
-rw-r--r--net/ipv4/inet_hashtables.c28
-rw-r--r--net/ipv4/ip_fragment.c52
-rw-r--r--net/ipv4/ip_gre.c110
-rw-r--r--net/ipv4/ip_input.c7
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ipcomp.c8
-rw-r--r--net/ipv4/ipconfig.c10
-rw-r--r--net/ipv4/ipip.c4
-rw-r--r--net/ipv4/ipmr.c130
-rw-r--r--net/ipv4/netfilter/Kconfig4
-rw-r--r--net/ipv4/netfilter/arp_tables.c10
-rw-r--r--net/ipv4/netfilter/ip_tables.c10
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c12
-rw-r--r--net/ipv4/netfilter/iptable_nat.c15
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c92
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c14
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/proc.c17
-rw-r--r--net/ipv4/protocol.c6
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c67
-rw-r--r--net/ipv4/syncookies.c7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c38
-rw-r--r--net/ipv4/tcp.c31
-rw-r--r--net/ipv4/tcp_cong.c42
-rw-r--r--net/ipv4/tcp_input.c47
-rw-r--r--net/ipv4/tcp_ipv4.c42
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv4/tcp_probe.c6
-rw-r--r--net/ipv4/udp.c69
-rw-r--r--net/ipv4/xfrm4_input.c2
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c4
-rw-r--r--net/ipv4/xfrm4_policy.c58
-rw-r--r--net/ipv6/Kconfig26
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c58
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/ah6.c14
-rw-r--r--net/ipv6/anycast.c20
-rw-r--r--net/ipv6/datagram.c35
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/exthdrs.c3
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/inet6_connection_sock.c19
-rw-r--r--net/ipv6/inet6_hashtables.c19
-rw-r--r--net/ipv6/ip6_checksum.c97
-rw-r--r--net/ipv6/ip6_flowlabel.c171
-rw-r--r--net/ipv6/ip6_gre.c11
-rw-r--r--net/ipv6/ip6_input.c35
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c81
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/ip6mr.c139
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/mcast.c91
-rw-r--r--net/ipv6/ndisc.c379
-rw-r--r--net/ipv6/netfilter/ip6_tables.c10
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c47
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c3
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c5
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c15
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c98
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c24
-rw-r--r--net/ipv6/proc.c16
-rw-r--r--net/ipv6/raw.c10
-rw-r--r--net/ipv6/reassembly.c28
-rw-r--r--net/ipv6/route.c201
-rw-r--r--net/ipv6/sit.c61
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c43
-rw-r--r--net/ipv6/udp.c70
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c4
-rw-r--r--net/ipv6/xfrm6_policy.c53
-rw-r--r--net/ipv6/xfrm6_tunnel.c6
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/irda/ircomm/Kconfig2
-rw-r--r--net/irda/ircomm/ircomm_tty.c8
-rw-r--r--net/irda/irnet/irnet_ppp.c117
-rw-r--r--net/irda/timer.c2
-rw-r--r--net/iucv/iucv.c7
-rw-r--r--net/key/af_key.c52
-rw-r--r--net/l2tp/Kconfig4
-rw-r--r--net/l2tp/l2tp_core.c223
-rw-r--r--net/l2tp/l2tp_core.h7
-rw-r--r--net/l2tp/l2tp_ip.c16
-rw-r--r--net/l2tp/l2tp_ip6.c10
-rw-r--r--net/l2tp/l2tp_netlink.c1
-rw-r--r--net/l2tp/l2tp_ppp.c11
-rw-r--r--net/lapb/Kconfig3
-rw-r--r--net/mac80211/Kconfig13
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-rx.c14
-rw-r--r--net/mac80211/agg-tx.c300
-rw-r--r--net/mac80211/cfg.c285
-rw-r--r--net/mac80211/chan.c164
-rw-r--r--net/mac80211/debug.h10
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/debugfs_netdev.c5
-rw-r--r--net/mac80211/debugfs_sta.c5
-rw-r--r--net/mac80211/driver-ops.h99
-rw-r--r--net/mac80211/ht.c115
-rw-r--r--net/mac80211/ibss.c119
-rw-r--r--net/mac80211/ieee80211_i.h244
-rw-r--r--net/mac80211/iface.c219
-rw-r--r--net/mac80211/key.c5
-rw-r--r--net/mac80211/main.c173
-rw-r--r--net/mac80211/mesh.c413
-rw-r--r--net/mac80211/mesh.h145
-rw-r--r--net/mac80211/mesh_hwmp.c125
-rw-r--r--net/mac80211/mesh_pathtbl.c95
-rw-r--r--net/mac80211/mesh_plink.c410
-rw-r--r--net/mac80211/mesh_ps.c598
-rw-r--r--net/mac80211/mesh_sync.c47
-rw-r--r--net/mac80211/mlme.c1195
-rw-r--r--net/mac80211/offchannel.c67
-rw-r--r--net/mac80211/pm.c76
-rw-r--r--net/mac80211/rate.h2
-rw-r--r--net/mac80211/rc80211_minstrel.c29
-rw-r--r--net/mac80211/rc80211_minstrel.h2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c181
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h5
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c112
-rw-r--r--net/mac80211/rx.c178
-rw-r--r--net/mac80211/scan.c136
-rw-r--r--net/mac80211/sta_info.c109
-rw-r--r--net/mac80211/sta_info.h67
-rw-r--r--net/mac80211/status.c25
-rw-r--r--net/mac80211/tkip.c10
-rw-r--r--net/mac80211/trace.h104
-rw-r--r--net/mac80211/tx.c189
-rw-r--r--net/mac80211/util.c107
-rw-r--r--net/mac80211/vht.c172
-rw-r--r--net/mac80211/wme.c13
-rw-r--r--net/mac80211/wpa.c5
-rw-r--r--net/mac802154/Kconfig2
-rw-r--r--net/mac802154/ieee802154_dev.c4
-rw-r--r--net/mac802154/wpan.c11
-rw-r--r--net/netfilter/Kconfig59
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/ipset/ip_set_core.c26
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c35
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/nf_conntrack_acct.c36
-rw-r--r--net/netfilter/nf_conntrack_amanda.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c192
-rw-r--r--net/netfilter/nf_conntrack_ecache.c37
-rw-r--r--net/netfilter/nf_conntrack_expect.c58
-rw-r--r--net/netfilter/nf_conntrack_ftp.c10
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c6
-rw-r--r--net/netfilter/nf_conntrack_helper.c76
-rw-r--r--net/netfilter/nf_conntrack_irc.c7
-rw-r--r--net/netfilter/nf_conntrack_labels.c112
-rw-r--r--net/netfilter/nf_conntrack_netlink.c105
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto.c92
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c43
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c23
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c43
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c40
-rw-r--r--net/netfilter/nf_conntrack_sane.c5
-rw-r--r--net/netfilter/nf_conntrack_sip.c90
-rw-r--r--net/netfilter/nf_conntrack_snmp.c1
-rw-r--r--net/netfilter/nf_conntrack_standalone.c70
-rw-r--r--net/netfilter/nf_conntrack_tftp.c8
-rw-r--r--net/netfilter/nf_conntrack_timeout.c23
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c39
-rw-r--r--net/netfilter/nf_nat_amanda.c8
-rw-r--r--net/netfilter/nf_nat_ftp.c5
-rw-r--r--net/netfilter/nf_nat_irc.c8
-rw-r--r--net/netfilter/nf_nat_sip.c72
-rw-r--r--net/netfilter/nf_nat_tftp.c4
-rw-r--r--net/netfilter/nfnetlink.c52
-rw-r--r--net/netfilter/nfnetlink_log.c16
-rw-r--r--net/netfilter/x_tables.c38
-rw-r--r--net/netfilter/xt_CT.c229
-rw-r--r--net/netfilter/xt_bpf.c73
-rw-r--r--net/netfilter/xt_connlabel.c99
-rw-r--r--net/netfilter/xt_hashlimit.c60
-rw-r--r--net/netfilter/xt_recent.c45
-rw-r--r--net/netlink/af_netlink.c12
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/nfc/core.c54
-rw-r--r--net/nfc/hci/command.c7
-rw-r--r--net/nfc/hci/core.c57
-rw-r--r--net/nfc/hci/hcp.c7
-rw-r--r--net/nfc/llcp/commands.c2
-rw-r--r--net/nfc/llcp/llcp.c49
-rw-r--r--net/nfc/llcp/llcp.h1
-rw-r--r--net/nfc/llcp/sock.c22
-rw-r--r--net/nfc/nci/core.c2
-rw-r--r--net/nfc/netlink.c1
-rw-r--r--net/openvswitch/datapath.c5
-rw-r--r--net/openvswitch/vport-internal_dev.c3
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/phonet/pn_dev.c8
-rw-r--r--net/rds/Kconfig4
-rw-r--r--net/rds/ib_cm.c11
-rw-r--r--net/rds/ib_recv.c9
-rw-r--r--net/rfkill/input.c8
-rw-r--r--net/rose/af_rose.c19
-rw-r--r--net/rxrpc/Kconfig2
-rw-r--r--net/rxrpc/af_rxrpc.c13
-rw-r--r--net/sched/act_api.c18
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c5
-rw-r--r--net/sched/act_ipt.c8
-rw-r--r--net/sched/act_mirred.c7
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c8
-rw-r--r--net/sched/act_police.c105
-rw-r--r--net/sched/act_simple.c5
-rw-r--r--net/sched/act_skbedit.c5
-rw-r--r--net/sched/cls_api.c11
-rw-r--r--net/sched/cls_basic.c13
-rw-r--r--net/sched/cls_cgroup.c5
-rw-r--r--net/sched/cls_flow.c4
-rw-r--r--net/sched/cls_fw.c10
-rw-r--r--net/sched/cls_route.c15
-rw-r--r--net/sched/cls_rsvp.h4
-rw-r--r--net/sched/cls_tcindex.c14
-rw-r--r--net/sched/cls_u32.c13
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sched/sch_generic.c37
-rw-r--r--net/sched/sch_htb.c82
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sched/sch_tbf.c76
-rw-r--r--net/sctp/Kconfig31
-rw-r--r--net/sctp/associola.c5
-rw-r--r--net/sctp/auth.c31
-rw-r--r--net/sctp/endpointola.c6
-rw-r--r--net/sctp/input.c3
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/outqueue.c16
-rw-r--r--net/sctp/probe.c26
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/sctp/sm_make_chunk.c33
-rw-r--r--net/sctp/sm_sideeffect.c6
-rw-r--r--net/sctp/sm_statefuns.c4
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/sctp/transport.c20
-rw-r--r--net/socket.c19
-rw-r--r--net/sunrpc/Kconfig2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c18
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c4
-rw-r--r--net/sunrpc/backchannel_rqst.c9
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/clnt.c61
-rw-r--r--net/sunrpc/rpc_pipe.c9
-rw-r--r--net/sunrpc/rpcb_clnt.c6
-rw-r--r--net/sunrpc/sched.c117
-rw-r--r--net/sunrpc/svc.c20
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcsock.c106
-rw-r--r--net/sunrpc/xdr.c5
-rw-r--r--net/sunrpc/xprt.c33
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c4
-rw-r--r--net/sunrpc/xprtrdma/transport.c7
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h6
-rw-r--r--net/sunrpc/xprtsock.c35
-rw-r--r--net/tipc/Kconfig4
-rw-r--r--net/tipc/bcast.c1
-rw-r--r--net/tipc/socket.c103
-rw-r--r--net/unix/af_unix.c7
-rw-r--r--net/vmw_vsock/Kconfig28
-rw-r--r--net/vmw_vsock/Makefile7
-rw-r--r--net/vmw_vsock/af_vsock.c2012
-rw-r--r--net/vmw_vsock/af_vsock.h175
-rw-r--r--net/vmw_vsock/vmci_transport.c2155
-rw-r--r--net/vmw_vsock/vmci_transport.h139
-rw-r--r--net/vmw_vsock/vmci_transport_notify.c680
-rw-r--r--net/vmw_vsock/vmci_transport_notify.h83
-rw-r--r--net/vmw_vsock/vmci_transport_notify_qstate.c438
-rw-r--r--net/vmw_vsock/vsock_addr.c86
-rw-r--r--net/vmw_vsock/vsock_addr.h32
-rw-r--r--net/wanrouter/Kconfig27
-rw-r--r--net/wanrouter/Makefile7
-rw-r--r--net/wanrouter/patchlevel1
-rw-r--r--net/wanrouter/wanmain.c782
-rw-r--r--net/wanrouter/wanproc.c380
-rw-r--r--net/wireless/ap.c62
-rw-r--r--net/wireless/chan.c146
-rw-r--r--net/wireless/core.c42
-rw-r--r--net/wireless/core.h60
-rw-r--r--net/wireless/ethtool.c4
-rw-r--r--net/wireless/ibss.c4
-rw-r--r--net/wireless/mesh.c8
-rw-r--r--net/wireless/mlme.c162
-rw-r--r--net/wireless/nl80211.c1148
-rw-r--r--net/wireless/nl80211.h7
-rw-r--r--net/wireless/rdev-ops.h12
-rw-r--r--net/wireless/reg.c1149
-rw-r--r--net/wireless/reg.h7
-rw-r--r--net/wireless/scan.c632
-rw-r--r--net/wireless/sme.c25
-rw-r--r--net/wireless/sysfs.c6
-rw-r--r--net/wireless/trace.h98
-rw-r--r--net/wireless/util.c42
-rw-r--r--net/wireless/wext-proc.c5
-rw-r--r--net/x25/Kconfig3
-rw-r--r--net/xfrm/Kconfig16
-rw-r--r--net/xfrm/xfrm_algo.c68
-rw-r--r--net/xfrm/xfrm_output.c6
-rw-r--r--net/xfrm/xfrm_policy.c273
-rw-r--r--net/xfrm/xfrm_proc.c7
-rw-r--r--net/xfrm/xfrm_replay.c4
-rw-r--r--net/xfrm/xfrm_state.c165
-rw-r--r--net/xfrm/xfrm_user.c2
487 files changed, 24575 insertions, 10272 deletions
diff --git a/net/802/Kconfig b/net/802/Kconfig
index be33d27c8e69..80d4bf78905d 100644
--- a/net/802/Kconfig
+++ b/net/802/Kconfig
@@ -5,3 +5,6 @@ config STP
5config GARP 5config GARP
6 tristate 6 tristate
7 select STP 7 select STP
8
9config MRP
10 tristate
diff --git a/net/802/Makefile b/net/802/Makefile
index a30d6e385aed..37e654d6615e 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o
11obj-$(CONFIG_ATALK) += p8022.o psnap.o 11obj-$(CONFIG_ATALK) += p8022.o psnap.o
12obj-$(CONFIG_STP) += stp.o 12obj-$(CONFIG_STP) += stp.o
13obj-$(CONFIG_GARP) += garp.o 13obj-$(CONFIG_GARP) += garp.o
14obj-$(CONFIG_MRP) += mrp.o
diff --git a/net/802/mrp.c b/net/802/mrp.c
new file mode 100644
index 000000000000..a4cc3229952a
--- /dev/null
+++ b/net/802/mrp.c
@@ -0,0 +1,895 @@
1/*
2 * IEEE 802.1Q Multiple Registration Protocol (MRP)
3 *
4 * Copyright (c) 2012 Massachusetts Institute of Technology
5 *
6 * Adapted from code in net/802/garp.c
7 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 */
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/rtnetlink.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/mrp.h>
22#include <asm/unaligned.h>
23
24static unsigned int mrp_join_time __read_mostly = 200;
25module_param(mrp_join_time, uint, 0644);
26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
27MODULE_LICENSE("GPL");
28
29static const u8
30mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
31 [MRP_APPLICANT_VO] = {
32 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
33 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
34 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
35 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
36 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
37 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
38 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
39 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
40 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
41 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
42 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
43 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
44 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
45 },
46 [MRP_APPLICANT_VP] = {
47 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
48 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
49 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
50 [MRP_EVENT_TX] = MRP_APPLICANT_AA,
51 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
52 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
53 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
54 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
55 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
56 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
57 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
58 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
59 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
60 },
61 [MRP_APPLICANT_VN] = {
62 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
63 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
64 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
65 [MRP_EVENT_TX] = MRP_APPLICANT_AN,
66 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
67 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
68 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
69 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
70 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
71 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
72 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
73 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
74 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
75 },
76 [MRP_APPLICANT_AN] = {
77 [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
78 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
79 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
80 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
81 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
82 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
83 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
84 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
85 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
86 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
87 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
88 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
89 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
90 },
91 [MRP_APPLICANT_AA] = {
92 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
93 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
94 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
95 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
96 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
97 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
98 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
99 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
100 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
101 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
102 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
103 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
104 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
105 },
106 [MRP_APPLICANT_QA] = {
107 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
108 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
109 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
110 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
111 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
112 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
113 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
114 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
115 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
116 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
117 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
118 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
119 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
120 },
121 [MRP_APPLICANT_LA] = {
122 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
123 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
124 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
125 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
126 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
127 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
128 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
129 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
130 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
131 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
132 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
133 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
134 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
135 },
136 [MRP_APPLICANT_AO] = {
137 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
138 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
139 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
140 [MRP_EVENT_TX] = MRP_APPLICANT_AO,
141 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
142 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
143 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
144 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
145 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
146 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
147 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
148 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
149 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
150 },
151 [MRP_APPLICANT_QO] = {
152 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
153 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
154 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
155 [MRP_EVENT_TX] = MRP_APPLICANT_QO,
156 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
157 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
158 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
159 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
160 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
161 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
162 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
163 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
164 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
165 },
166 [MRP_APPLICANT_AP] = {
167 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
168 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
169 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
170 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
171 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
172 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
173 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
174 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
175 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
176 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
177 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
178 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
179 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
180 },
181 [MRP_APPLICANT_QP] = {
182 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
183 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
184 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
185 [MRP_EVENT_TX] = MRP_APPLICANT_QP,
186 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
187 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
188 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
189 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
190 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
191 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
192 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
193 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
194 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
195 },
196};
197
198static const u8
199mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
200 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
201 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
202 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
203 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
204 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
205 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
206 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
207 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
208 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
209 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
210 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
211};
212
213static void mrp_attrvalue_inc(void *value, u8 len)
214{
215 u8 *v = (u8 *)value;
216
217 /* Add 1 to the last byte. If it becomes zero,
218 * go to the previous byte and repeat.
219 */
220 while (len > 0 && !++v[--len])
221 ;
222}
223
224static int mrp_attr_cmp(const struct mrp_attr *attr,
225 const void *value, u8 len, u8 type)
226{
227 if (attr->type != type)
228 return attr->type - type;
229 if (attr->len != len)
230 return attr->len - len;
231 return memcmp(attr->value, value, len);
232}
233
234static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
235 const void *value, u8 len, u8 type)
236{
237 struct rb_node *parent = app->mad.rb_node;
238 struct mrp_attr *attr;
239 int d;
240
241 while (parent) {
242 attr = rb_entry(parent, struct mrp_attr, node);
243 d = mrp_attr_cmp(attr, value, len, type);
244 if (d > 0)
245 parent = parent->rb_left;
246 else if (d < 0)
247 parent = parent->rb_right;
248 else
249 return attr;
250 }
251 return NULL;
252}
253
254static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
255 const void *value, u8 len, u8 type)
256{
257 struct rb_node *parent = NULL, **p = &app->mad.rb_node;
258 struct mrp_attr *attr;
259 int d;
260
261 while (*p) {
262 parent = *p;
263 attr = rb_entry(parent, struct mrp_attr, node);
264 d = mrp_attr_cmp(attr, value, len, type);
265 if (d > 0)
266 p = &parent->rb_left;
267 else if (d < 0)
268 p = &parent->rb_right;
269 else {
270 /* The attribute already exists; re-use it. */
271 return attr;
272 }
273 }
274 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
275 if (!attr)
276 return attr;
277 attr->state = MRP_APPLICANT_VO;
278 attr->type = type;
279 attr->len = len;
280 memcpy(attr->value, value, len);
281
282 rb_link_node(&attr->node, parent, p);
283 rb_insert_color(&attr->node, &app->mad);
284 return attr;
285}
286
287static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
288{
289 rb_erase(&attr->node, &app->mad);
290 kfree(attr);
291}
292
293static int mrp_pdu_init(struct mrp_applicant *app)
294{
295 struct sk_buff *skb;
296 struct mrp_pdu_hdr *ph;
297
298 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
299 GFP_ATOMIC);
300 if (!skb)
301 return -ENOMEM;
302
303 skb->dev = app->dev;
304 skb->protocol = app->app->pkttype.type;
305 skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
306 skb_reset_network_header(skb);
307 skb_reset_transport_header(skb);
308
309 ph = (struct mrp_pdu_hdr *)__skb_put(skb, sizeof(*ph));
310 ph->version = app->app->version;
311
312 app->pdu = skb;
313 return 0;
314}
315
316static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
317{
318 __be16 *endmark;
319
320 if (skb_tailroom(app->pdu) < sizeof(*endmark))
321 return -1;
322 endmark = (__be16 *)__skb_put(app->pdu, sizeof(*endmark));
323 put_unaligned(MRP_END_MARK, endmark);
324 return 0;
325}
326
327static void mrp_pdu_queue(struct mrp_applicant *app)
328{
329 if (!app->pdu)
330 return;
331
332 if (mrp_cb(app->pdu)->mh)
333 mrp_pdu_append_end_mark(app);
334 mrp_pdu_append_end_mark(app);
335
336 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
337 app->app->group_address, app->dev->dev_addr,
338 app->pdu->len);
339
340 skb_queue_tail(&app->queue, app->pdu);
341 app->pdu = NULL;
342}
343
344static void mrp_queue_xmit(struct mrp_applicant *app)
345{
346 struct sk_buff *skb;
347
348 while ((skb = skb_dequeue(&app->queue)))
349 dev_queue_xmit(skb);
350}
351
352static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
353 u8 attrtype, u8 attrlen)
354{
355 struct mrp_msg_hdr *mh;
356
357 if (mrp_cb(app->pdu)->mh) {
358 if (mrp_pdu_append_end_mark(app) < 0)
359 return -1;
360 mrp_cb(app->pdu)->mh = NULL;
361 mrp_cb(app->pdu)->vah = NULL;
362 }
363
364 if (skb_tailroom(app->pdu) < sizeof(*mh))
365 return -1;
366 mh = (struct mrp_msg_hdr *)__skb_put(app->pdu, sizeof(*mh));
367 mh->attrtype = attrtype;
368 mh->attrlen = attrlen;
369 mrp_cb(app->pdu)->mh = mh;
370 return 0;
371}
372
373static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
374 const void *firstattrvalue, u8 attrlen)
375{
376 struct mrp_vecattr_hdr *vah;
377
378 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
379 return -1;
380 vah = (struct mrp_vecattr_hdr *)__skb_put(app->pdu,
381 sizeof(*vah) + attrlen);
382 put_unaligned(0, &vah->lenflags);
383 memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
384 mrp_cb(app->pdu)->vah = vah;
385 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
386 return 0;
387}
388
389static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
390 const struct mrp_attr *attr,
391 enum mrp_vecattr_event vaevent)
392{
393 u16 len, pos;
394 u8 *vaevents;
395 int err;
396again:
397 if (!app->pdu) {
398 err = mrp_pdu_init(app);
399 if (err < 0)
400 return err;
401 }
402
403 /* If there is no Message header in the PDU, or the Message header is
404 * for a different attribute type, add an EndMark (if necessary) and a
405 * new Message header to the PDU.
406 */
407 if (!mrp_cb(app->pdu)->mh ||
408 mrp_cb(app->pdu)->mh->attrtype != attr->type ||
409 mrp_cb(app->pdu)->mh->attrlen != attr->len) {
410 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
411 goto queue;
412 }
413
414 /* If there is no VectorAttribute header for this Message in the PDU,
415 * or this attribute's value does not sequentially follow the previous
416 * attribute's value, add a new VectorAttribute header to the PDU.
417 */
418 if (!mrp_cb(app->pdu)->vah ||
419 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
420 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
421 goto queue;
422 }
423
424 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
425 pos = len % 3;
426
427 /* Events are packed into Vectors in the PDU, three to a byte. Add a
428 * byte to the end of the Vector if necessary.
429 */
430 if (!pos) {
431 if (skb_tailroom(app->pdu) < sizeof(u8))
432 goto queue;
433 vaevents = (u8 *)__skb_put(app->pdu, sizeof(u8));
434 } else {
435 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
436 }
437
438 switch (pos) {
439 case 0:
440 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
441 __MRP_VECATTR_EVENT_MAX);
442 break;
443 case 1:
444 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
445 break;
446 case 2:
447 *vaevents += vaevent;
448 break;
449 default:
450 WARN_ON(1);
451 }
452
453 /* Increment the length of the VectorAttribute in the PDU, as well as
454 * the value of the next attribute that would continue its Vector.
455 */
456 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
457 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
458
459 return 0;
460
461queue:
462 mrp_pdu_queue(app);
463 goto again;
464}
465
466static void mrp_attr_event(struct mrp_applicant *app,
467 struct mrp_attr *attr, enum mrp_event event)
468{
469 enum mrp_applicant_state state;
470
471 state = mrp_applicant_state_table[attr->state][event];
472 if (state == MRP_APPLICANT_INVALID) {
473 WARN_ON(1);
474 return;
475 }
476
477 if (event == MRP_EVENT_TX) {
478 /* When appending the attribute fails, don't update its state
479 * in order to retry at the next TX event.
480 */
481
482 switch (mrp_tx_action_table[attr->state]) {
483 case MRP_TX_ACTION_NONE:
484 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
485 case MRP_TX_ACTION_S_IN_OPTIONAL:
486 break;
487 case MRP_TX_ACTION_S_NEW:
488 if (mrp_pdu_append_vecattr_event(
489 app, attr, MRP_VECATTR_EVENT_NEW) < 0)
490 return;
491 break;
492 case MRP_TX_ACTION_S_JOIN_IN:
493 if (mrp_pdu_append_vecattr_event(
494 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
495 return;
496 break;
497 case MRP_TX_ACTION_S_LV:
498 if (mrp_pdu_append_vecattr_event(
499 app, attr, MRP_VECATTR_EVENT_LV) < 0)
500 return;
501 /* As a pure applicant, sending a leave message
502 * implies that the attribute was unregistered and
503 * can be destroyed.
504 */
505 mrp_attr_destroy(app, attr);
506 return;
507 default:
508 WARN_ON(1);
509 }
510 }
511
512 attr->state = state;
513}
514
515int mrp_request_join(const struct net_device *dev,
516 const struct mrp_application *appl,
517 const void *value, u8 len, u8 type)
518{
519 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
520 struct mrp_applicant *app = rtnl_dereference(
521 port->applicants[appl->type]);
522 struct mrp_attr *attr;
523
524 if (sizeof(struct mrp_skb_cb) + len >
525 FIELD_SIZEOF(struct sk_buff, cb))
526 return -ENOMEM;
527
528 spin_lock_bh(&app->lock);
529 attr = mrp_attr_create(app, value, len, type);
530 if (!attr) {
531 spin_unlock_bh(&app->lock);
532 return -ENOMEM;
533 }
534 mrp_attr_event(app, attr, MRP_EVENT_JOIN);
535 spin_unlock_bh(&app->lock);
536 return 0;
537}
538EXPORT_SYMBOL_GPL(mrp_request_join);
539
540void mrp_request_leave(const struct net_device *dev,
541 const struct mrp_application *appl,
542 const void *value, u8 len, u8 type)
543{
544 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
545 struct mrp_applicant *app = rtnl_dereference(
546 port->applicants[appl->type]);
547 struct mrp_attr *attr;
548
549 if (sizeof(struct mrp_skb_cb) + len >
550 FIELD_SIZEOF(struct sk_buff, cb))
551 return;
552
553 spin_lock_bh(&app->lock);
554 attr = mrp_attr_lookup(app, value, len, type);
555 if (!attr) {
556 spin_unlock_bh(&app->lock);
557 return;
558 }
559 mrp_attr_event(app, attr, MRP_EVENT_LV);
560 spin_unlock_bh(&app->lock);
561}
562EXPORT_SYMBOL_GPL(mrp_request_leave);
563
564static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
565{
566 struct rb_node *node, *next;
567 struct mrp_attr *attr;
568
569 for (node = rb_first(&app->mad);
570 next = node ? rb_next(node) : NULL, node != NULL;
571 node = next) {
572 attr = rb_entry(node, struct mrp_attr, node);
573 mrp_attr_event(app, attr, event);
574 }
575}
576
577static void mrp_join_timer_arm(struct mrp_applicant *app)
578{
579 unsigned long delay;
580
581 delay = (u64)msecs_to_jiffies(mrp_join_time) * net_random() >> 32;
582 mod_timer(&app->join_timer, jiffies + delay);
583}
584
585static void mrp_join_timer(unsigned long data)
586{
587 struct mrp_applicant *app = (struct mrp_applicant *)data;
588
589 spin_lock(&app->lock);
590 mrp_mad_event(app, MRP_EVENT_TX);
591 mrp_pdu_queue(app);
592 spin_unlock(&app->lock);
593
594 mrp_queue_xmit(app);
595 mrp_join_timer_arm(app);
596}
597
598static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
599{
600 __be16 endmark;
601
602 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
603 return -1;
604 if (endmark == MRP_END_MARK) {
605 *offset += sizeof(endmark);
606 return -1;
607 }
608 return 0;
609}
610
611static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
612 struct sk_buff *skb,
613 enum mrp_vecattr_event vaevent)
614{
615 struct mrp_attr *attr;
616 enum mrp_event event;
617
618 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
619 mrp_cb(skb)->mh->attrlen,
620 mrp_cb(skb)->mh->attrtype);
621 if (attr == NULL)
622 return;
623
624 switch (vaevent) {
625 case MRP_VECATTR_EVENT_NEW:
626 event = MRP_EVENT_R_NEW;
627 break;
628 case MRP_VECATTR_EVENT_JOIN_IN:
629 event = MRP_EVENT_R_JOIN_IN;
630 break;
631 case MRP_VECATTR_EVENT_IN:
632 event = MRP_EVENT_R_IN;
633 break;
634 case MRP_VECATTR_EVENT_JOIN_MT:
635 event = MRP_EVENT_R_JOIN_MT;
636 break;
637 case MRP_VECATTR_EVENT_MT:
638 event = MRP_EVENT_R_MT;
639 break;
640 case MRP_VECATTR_EVENT_LV:
641 event = MRP_EVENT_R_LV;
642 break;
643 default:
644 return;
645 }
646
647 mrp_attr_event(app, attr, event);
648}
649
650static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
651 struct sk_buff *skb, int *offset)
652{
653 struct mrp_vecattr_hdr _vah;
654 u16 valen;
655 u8 vaevents, vaevent;
656
657 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
658 &_vah);
659 if (!mrp_cb(skb)->vah)
660 return -1;
661 *offset += sizeof(_vah);
662
663 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
664 MRP_VECATTR_HDR_FLAG_LA)
665 mrp_mad_event(app, MRP_EVENT_R_LA);
666 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
667 MRP_VECATTR_HDR_LEN_MASK);
668
669 /* The VectorAttribute structure in a PDU carries event information
670 * about one or more attributes having consecutive values. Only the
671 * value for the first attribute is contained in the structure. So
672 * we make a copy of that value, and then increment it each time we
673 * advance to the next event in its Vector.
674 */
675 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
676 FIELD_SIZEOF(struct sk_buff, cb))
677 return -1;
678 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
679 mrp_cb(skb)->mh->attrlen) < 0)
680 return -1;
681 *offset += mrp_cb(skb)->mh->attrlen;
682
683 /* In a VectorAttribute, the Vector contains events which are packed
684 * three to a byte. We process one byte of the Vector at a time.
685 */
686 while (valen > 0) {
687 if (skb_copy_bits(skb, *offset, &vaevents,
688 sizeof(vaevents)) < 0)
689 return -1;
690 *offset += sizeof(vaevents);
691
692 /* Extract and process the first event. */
693 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
694 __MRP_VECATTR_EVENT_MAX);
695 if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
696 /* The byte is malformed; stop processing. */
697 return -1;
698 }
699 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
700
701 /* If present, extract and process the second event. */
702 if (!--valen)
703 break;
704 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
705 mrp_cb(skb)->mh->attrlen);
706 vaevents %= (__MRP_VECATTR_EVENT_MAX *
707 __MRP_VECATTR_EVENT_MAX);
708 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
709 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
710
711 /* If present, extract and process the third event. */
712 if (!--valen)
713 break;
714 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
715 mrp_cb(skb)->mh->attrlen);
716 vaevents %= __MRP_VECATTR_EVENT_MAX;
717 vaevent = vaevents;
718 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
719 }
720 return 0;
721}
722
723static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
724 int *offset)
725{
726 struct mrp_msg_hdr _mh;
727
728 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
729 if (!mrp_cb(skb)->mh)
730 return -1;
731 *offset += sizeof(_mh);
732
733 if (mrp_cb(skb)->mh->attrtype == 0 ||
734 mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
735 mrp_cb(skb)->mh->attrlen == 0)
736 return -1;
737
738 while (skb->len > *offset) {
739 if (mrp_pdu_parse_end_mark(skb, offset) < 0)
740 break;
741 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
742 return -1;
743 }
744 return 0;
745}
746
747static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
748 struct packet_type *pt, struct net_device *orig_dev)
749{
750 struct mrp_application *appl = container_of(pt, struct mrp_application,
751 pkttype);
752 struct mrp_port *port;
753 struct mrp_applicant *app;
754 struct mrp_pdu_hdr _ph;
755 const struct mrp_pdu_hdr *ph;
756 int offset = skb_network_offset(skb);
757
758 /* If the interface is in promiscuous mode, drop the packet if
759 * it was unicast to another host.
760 */
761 if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
762 goto out;
763 skb = skb_share_check(skb, GFP_ATOMIC);
764 if (unlikely(!skb))
765 goto out;
766 port = rcu_dereference(dev->mrp_port);
767 if (unlikely(!port))
768 goto out;
769 app = rcu_dereference(port->applicants[appl->type]);
770 if (unlikely(!app))
771 goto out;
772
773 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
774 if (!ph)
775 goto out;
776 offset += sizeof(_ph);
777
778 if (ph->version != app->app->version)
779 goto out;
780
781 spin_lock(&app->lock);
782 while (skb->len > offset) {
783 if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
784 break;
785 if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
786 break;
787 }
788 spin_unlock(&app->lock);
789out:
790 kfree_skb(skb);
791 return 0;
792}
793
794static int mrp_init_port(struct net_device *dev)
795{
796 struct mrp_port *port;
797
798 port = kzalloc(sizeof(*port), GFP_KERNEL);
799 if (!port)
800 return -ENOMEM;
801 rcu_assign_pointer(dev->mrp_port, port);
802 return 0;
803}
804
805static void mrp_release_port(struct net_device *dev)
806{
807 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
808 unsigned int i;
809
810 for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
811 if (rtnl_dereference(port->applicants[i]))
812 return;
813 }
814 RCU_INIT_POINTER(dev->mrp_port, NULL);
815 kfree_rcu(port, rcu);
816}
817
818int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
819{
820 struct mrp_applicant *app;
821 int err;
822
823 ASSERT_RTNL();
824
825 if (!rtnl_dereference(dev->mrp_port)) {
826 err = mrp_init_port(dev);
827 if (err < 0)
828 goto err1;
829 }
830
831 err = -ENOMEM;
832 app = kzalloc(sizeof(*app), GFP_KERNEL);
833 if (!app)
834 goto err2;
835
836 err = dev_mc_add(dev, appl->group_address);
837 if (err < 0)
838 goto err3;
839
840 app->dev = dev;
841 app->app = appl;
842 app->mad = RB_ROOT;
843 spin_lock_init(&app->lock);
844 skb_queue_head_init(&app->queue);
845 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
846 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
847 mrp_join_timer_arm(app);
848 return 0;
849
850err3:
851 kfree(app);
852err2:
853 mrp_release_port(dev);
854err1:
855 return err;
856}
857EXPORT_SYMBOL_GPL(mrp_init_applicant);
858
859void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
860{
861 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
862 struct mrp_applicant *app = rtnl_dereference(
863 port->applicants[appl->type]);
864
865 ASSERT_RTNL();
866
867 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
868
869 /* Delete timer and generate a final TX event to flush out
870 * all pending messages before the applicant is gone.
871 */
872 del_timer_sync(&app->join_timer);
873 mrp_mad_event(app, MRP_EVENT_TX);
874 mrp_pdu_queue(app);
875 mrp_queue_xmit(app);
876
877 dev_mc_del(dev, appl->group_address);
878 kfree_rcu(app, rcu);
879 mrp_release_port(dev);
880}
881EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
882
883int mrp_register_application(struct mrp_application *appl)
884{
885 appl->pkttype.func = mrp_rcv;
886 dev_add_pack(&appl->pkttype);
887 return 0;
888}
889EXPORT_SYMBOL_GPL(mrp_register_application);
890
891void mrp_unregister_application(struct mrp_application *appl)
892{
893 dev_remove_pack(&appl->pkttype);
894}
895EXPORT_SYMBOL_GPL(mrp_unregister_application);
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig
index fa073a54963e..8f7517df41a5 100644
--- a/net/8021q/Kconfig
+++ b/net/8021q/Kconfig
@@ -27,3 +27,14 @@ config VLAN_8021Q_GVRP
27 automatic propagation of registered VLANs to switches. 27 automatic propagation of registered VLANs to switches.
28 28
29 If unsure, say N. 29 If unsure, say N.
30
31config VLAN_8021Q_MVRP
32 bool "MVRP (Multiple VLAN Registration Protocol) support"
33 depends on VLAN_8021Q
34 select MRP
35 help
36 Select this to enable MVRP end-system support. MVRP is used for
37 automatic propagation of registered VLANs to switches; it
38 supersedes GVRP and is not backwards-compatible.
39
40 If unsure, say N.
diff --git a/net/8021q/Makefile b/net/8021q/Makefile
index 9f4f174ead1c..7bc8db08d7ef 100644
--- a/net/8021q/Makefile
+++ b/net/8021q/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_VLAN_8021Q) += 8021q.o
6 6
78021q-y := vlan.o vlan_dev.o vlan_netlink.o 78021q-y := vlan.o vlan_dev.o vlan_netlink.o
88021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o 88021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
98021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o
98021q-$(CONFIG_PROC_FS) += vlanproc.o 108021q-$(CONFIG_PROC_FS) += vlanproc.o
10 11
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a292e8050ef2..a18714469bf7 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -95,6 +95,8 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
95 95
96 grp->nr_vlan_devs--; 96 grp->nr_vlan_devs--;
97 97
98 if (vlan->flags & VLAN_FLAG_MVRP)
99 vlan_mvrp_request_leave(dev);
98 if (vlan->flags & VLAN_FLAG_GVRP) 100 if (vlan->flags & VLAN_FLAG_GVRP)
99 vlan_gvrp_request_leave(dev); 101 vlan_gvrp_request_leave(dev);
100 102
@@ -105,8 +107,12 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
105 */ 107 */
106 unregister_netdevice_queue(dev, head); 108 unregister_netdevice_queue(dev, head);
107 109
108 if (grp->nr_vlan_devs == 0) 110 netdev_upper_dev_unlink(real_dev, dev);
111
112 if (grp->nr_vlan_devs == 0) {
113 vlan_mvrp_uninit_applicant(real_dev);
109 vlan_gvrp_uninit_applicant(real_dev); 114 vlan_gvrp_uninit_applicant(real_dev);
115 }
110 116
111 /* Get rid of the vlan's reference to real_dev */ 117 /* Get rid of the vlan's reference to real_dev */
112 dev_put(real_dev); 118 dev_put(real_dev);
@@ -115,19 +121,12 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
115int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) 121int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
116{ 122{
117 const char *name = real_dev->name; 123 const char *name = real_dev->name;
118 const struct net_device_ops *ops = real_dev->netdev_ops;
119 124
120 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { 125 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
121 pr_info("VLANs not supported on %s\n", name); 126 pr_info("VLANs not supported on %s\n", name);
122 return -EOPNOTSUPP; 127 return -EOPNOTSUPP;
123 } 128 }
124 129
125 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
126 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
127 pr_info("Device %s has buggy VLAN hw accel\n", name);
128 return -EOPNOTSUPP;
129 }
130
131 if (vlan_find_dev(real_dev, vlan_id) != NULL) 130 if (vlan_find_dev(real_dev, vlan_id) != NULL)
132 return -EEXIST; 131 return -EEXIST;
133 132
@@ -156,15 +155,22 @@ int register_vlan_dev(struct net_device *dev)
156 err = vlan_gvrp_init_applicant(real_dev); 155 err = vlan_gvrp_init_applicant(real_dev);
157 if (err < 0) 156 if (err < 0)
158 goto out_vid_del; 157 goto out_vid_del;
158 err = vlan_mvrp_init_applicant(real_dev);
159 if (err < 0)
160 goto out_uninit_gvrp;
159 } 161 }
160 162
161 err = vlan_group_prealloc_vid(grp, vlan_id); 163 err = vlan_group_prealloc_vid(grp, vlan_id);
162 if (err < 0) 164 if (err < 0)
163 goto out_uninit_applicant; 165 goto out_uninit_mvrp;
166
167 err = netdev_upper_dev_link(real_dev, dev);
168 if (err)
169 goto out_uninit_mvrp;
164 170
165 err = register_netdevice(dev); 171 err = register_netdevice(dev);
166 if (err < 0) 172 if (err < 0)
167 goto out_uninit_applicant; 173 goto out_upper_dev_unlink;
168 174
169 /* Account for reference in struct vlan_dev_priv */ 175 /* Account for reference in struct vlan_dev_priv */
170 dev_hold(real_dev); 176 dev_hold(real_dev);
@@ -180,7 +186,12 @@ int register_vlan_dev(struct net_device *dev)
180 186
181 return 0; 187 return 0;
182 188
183out_uninit_applicant: 189out_upper_dev_unlink:
190 netdev_upper_dev_unlink(real_dev, dev);
191out_uninit_mvrp:
192 if (grp->nr_vlan_devs == 0)
193 vlan_mvrp_uninit_applicant(real_dev);
194out_uninit_gvrp:
184 if (grp->nr_vlan_devs == 0) 195 if (grp->nr_vlan_devs == 0)
185 vlan_gvrp_uninit_applicant(real_dev); 196 vlan_gvrp_uninit_applicant(real_dev);
186out_vid_del: 197out_vid_del:
@@ -654,13 +665,19 @@ static int __init vlan_proto_init(void)
654 if (err < 0) 665 if (err < 0)
655 goto err3; 666 goto err3;
656 667
657 err = vlan_netlink_init(); 668 err = vlan_mvrp_init();
658 if (err < 0) 669 if (err < 0)
659 goto err4; 670 goto err4;
660 671
672 err = vlan_netlink_init();
673 if (err < 0)
674 goto err5;
675
661 vlan_ioctl_set(vlan_ioctl_handler); 676 vlan_ioctl_set(vlan_ioctl_handler);
662 return 0; 677 return 0;
663 678
679err5:
680 vlan_mvrp_uninit();
664err4: 681err4:
665 vlan_gvrp_uninit(); 682 vlan_gvrp_uninit();
666err3: 683err3:
@@ -681,6 +698,7 @@ static void __exit vlan_cleanup_module(void)
681 unregister_pernet_subsys(&vlan_net_ops); 698 unregister_pernet_subsys(&vlan_net_ops);
682 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 699 rcu_barrier(); /* Wait for completion of call_rcu()'s */
683 700
701 vlan_mvrp_uninit();
684 vlan_gvrp_uninit(); 702 vlan_gvrp_uninit();
685} 703}
686 704
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index a4886d94c40c..670f1e8cfc0f 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -171,6 +171,22 @@ static inline int vlan_gvrp_init(void) { return 0; }
171static inline void vlan_gvrp_uninit(void) {} 171static inline void vlan_gvrp_uninit(void) {}
172#endif 172#endif
173 173
174#ifdef CONFIG_VLAN_8021Q_MVRP
175extern int vlan_mvrp_request_join(const struct net_device *dev);
176extern void vlan_mvrp_request_leave(const struct net_device *dev);
177extern int vlan_mvrp_init_applicant(struct net_device *dev);
178extern void vlan_mvrp_uninit_applicant(struct net_device *dev);
179extern int vlan_mvrp_init(void);
180extern void vlan_mvrp_uninit(void);
181#else
182static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
183static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
184static inline int vlan_mvrp_init_applicant(struct net_device *dev) { return 0; }
185static inline void vlan_mvrp_uninit_applicant(struct net_device *dev) {}
186static inline int vlan_mvrp_init(void) { return 0; }
187static inline void vlan_mvrp_uninit(void) {}
188#endif
189
174extern const char vlan_fullname[]; 190extern const char vlan_fullname[];
175extern const char vlan_version[]; 191extern const char vlan_version[];
176extern int vlan_netlink_init(void); 192extern int vlan_netlink_init(void);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 65e06abe023f..f3b6f515eba6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -60,21 +60,25 @@ bool vlan_do_receive(struct sk_buff **skbp)
60 return true; 60 return true;
61} 61}
62 62
63/* Must be invoked with rcu_read_lock or with RTNL. */ 63/* Must be invoked with rcu_read_lock. */
64struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, 64struct net_device *__vlan_find_dev_deep(struct net_device *dev,
65 u16 vlan_id) 65 u16 vlan_id)
66{ 66{
67 struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); 67 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
68 68
69 if (vlan_info) { 69 if (vlan_info) {
70 return vlan_group_get_device(&vlan_info->grp, vlan_id); 70 return vlan_group_get_device(&vlan_info->grp, vlan_id);
71 } else { 71 } else {
72 /* 72 /*
73 * Bonding slaves do not have grp assigned to themselves. 73 * Lower devices of master uppers (bonding, team) do not have
74 * Grp is assigned to bonding master instead. 74 * grp assigned to themselves. Grp is assigned to upper device
75 * instead.
75 */ 76 */
76 if (netif_is_bond_slave(real_dev)) 77 struct net_device *upper_dev;
77 return __vlan_find_dev_deep(real_dev->master, vlan_id); 78
79 upper_dev = netdev_master_upper_dev_get_rcu(dev);
80 if (upper_dev)
81 return __vlan_find_dev_deep(upper_dev, vlan_id);
78 } 82 }
79 83
80 return NULL; 84 return NULL;
@@ -140,6 +144,7 @@ err_free:
140 kfree_skb(skb); 144 kfree_skb(skb);
141 return NULL; 145 return NULL;
142} 146}
147EXPORT_SYMBOL(vlan_untag);
143 148
144 149
145/* 150/*
@@ -220,8 +225,7 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
220 if (!vid_info) 225 if (!vid_info)
221 return -ENOMEM; 226 return -ENOMEM;
222 227
223 if ((dev->features & NETIF_F_HW_VLAN_FILTER) && 228 if (dev->features & NETIF_F_HW_VLAN_FILTER) {
224 ops->ndo_vlan_rx_add_vid) {
225 err = ops->ndo_vlan_rx_add_vid(dev, vid); 229 err = ops->ndo_vlan_rx_add_vid(dev, vid);
226 if (err) { 230 if (err) {
227 kfree(vid_info); 231 kfree(vid_info);
@@ -278,8 +282,7 @@ static void __vlan_vid_del(struct vlan_info *vlan_info,
278 unsigned short vid = vid_info->vid; 282 unsigned short vid = vid_info->vid;
279 int err; 283 int err;
280 284
281 if ((dev->features & NETIF_F_HW_VLAN_FILTER) && 285 if (dev->features & NETIF_F_HW_VLAN_FILTER) {
282 ops->ndo_vlan_rx_kill_vid) {
283 err = ops->ndo_vlan_rx_kill_vid(dev, vid); 286 err = ops->ndo_vlan_rx_kill_vid(dev, vid);
284 if (err) { 287 if (err) {
285 pr_warn("failed to kill vid %d for device %s\n", 288 pr_warn("failed to kill vid %d for device %s\n",
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4a6d31a082b9..19cf81bf9f69 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -261,7 +261,7 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
261 u32 old_flags = vlan->flags; 261 u32 old_flags = vlan->flags;
262 262
263 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | 263 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
264 VLAN_FLAG_LOOSE_BINDING)) 264 VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
265 return -EINVAL; 265 return -EINVAL;
266 266
267 vlan->flags = (old_flags & ~mask) | (flags & mask); 267 vlan->flags = (old_flags & ~mask) | (flags & mask);
@@ -272,6 +272,13 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
272 else 272 else
273 vlan_gvrp_request_leave(dev); 273 vlan_gvrp_request_leave(dev);
274 } 274 }
275
276 if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
277 if (vlan->flags & VLAN_FLAG_MVRP)
278 vlan_mvrp_request_join(dev);
279 else
280 vlan_mvrp_request_leave(dev);
281 }
275 return 0; 282 return 0;
276} 283}
277 284
@@ -312,6 +319,9 @@ static int vlan_dev_open(struct net_device *dev)
312 if (vlan->flags & VLAN_FLAG_GVRP) 319 if (vlan->flags & VLAN_FLAG_GVRP)
313 vlan_gvrp_request_join(dev); 320 vlan_gvrp_request_join(dev);
314 321
322 if (vlan->flags & VLAN_FLAG_MVRP)
323 vlan_mvrp_request_join(dev);
324
315 if (netif_carrier_ok(real_dev)) 325 if (netif_carrier_ok(real_dev))
316 netif_carrier_on(dev); 326 netif_carrier_on(dev);
317 return 0; 327 return 0;
@@ -640,9 +650,9 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
640static void vlan_ethtool_get_drvinfo(struct net_device *dev, 650static void vlan_ethtool_get_drvinfo(struct net_device *dev,
641 struct ethtool_drvinfo *info) 651 struct ethtool_drvinfo *info)
642{ 652{
643 strcpy(info->driver, vlan_fullname); 653 strlcpy(info->driver, vlan_fullname, sizeof(info->driver));
644 strcpy(info->version, vlan_version); 654 strlcpy(info->version, vlan_version, sizeof(info->version));
645 strcpy(info->fw_version, "N/A"); 655 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
646} 656}
647 657
648static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 658static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
@@ -723,7 +733,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
723 733
724 vlan->netpoll = NULL; 734 vlan->netpoll = NULL;
725 735
726 __netpoll_free_rcu(netpoll); 736 __netpoll_free_async(netpoll);
727} 737}
728#endif /* CONFIG_NET_POLL_CONTROLLER */ 738#endif /* CONFIG_NET_POLL_CONTROLLER */
729 739
diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c
new file mode 100644
index 000000000000..d9ec1d5964aa
--- /dev/null
+++ b/net/8021q/vlan_mvrp.c
@@ -0,0 +1,72 @@
1/*
2 * IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP)
3 *
4 * Copyright (c) 2012 Massachusetts Institute of Technology
5 *
6 * Adapted from code in net/8021q/vlan_gvrp.c
7 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 */
13#include <linux/types.h>
14#include <linux/if_ether.h>
15#include <linux/if_vlan.h>
16#include <net/mrp.h>
17#include "vlan.h"
18
19#define MRP_MVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
20
21enum mvrp_attributes {
22 MVRP_ATTR_INVALID,
23 MVRP_ATTR_VID,
24 __MVRP_ATTR_MAX
25};
26#define MVRP_ATTR_MAX (__MVRP_ATTR_MAX - 1)
27
28static struct mrp_application vlan_mrp_app __read_mostly = {
29 .type = MRP_APPLICATION_MVRP,
30 .maxattr = MVRP_ATTR_MAX,
31 .pkttype.type = htons(ETH_P_MVRP),
32 .group_address = MRP_MVRP_ADDRESS,
33 .version = 0,
34};
35
36int vlan_mvrp_request_join(const struct net_device *dev)
37{
38 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
39 __be16 vlan_id = htons(vlan->vlan_id);
40
41 return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
42 &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
43}
44
45void vlan_mvrp_request_leave(const struct net_device *dev)
46{
47 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
48 __be16 vlan_id = htons(vlan->vlan_id);
49
50 mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
51 &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
52}
53
54int vlan_mvrp_init_applicant(struct net_device *dev)
55{
56 return mrp_init_applicant(dev, &vlan_mrp_app);
57}
58
59void vlan_mvrp_uninit_applicant(struct net_device *dev)
60{
61 mrp_uninit_applicant(dev, &vlan_mrp_app);
62}
63
64int __init vlan_mvrp_init(void)
65{
66 return mrp_register_application(&vlan_mrp_app);
67}
68
69void vlan_mvrp_uninit(void)
70{
71 mrp_unregister_application(&vlan_mrp_app);
72}
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 708c80ea1874..1789658b7cd7 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -62,7 +62,7 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
62 flags = nla_data(data[IFLA_VLAN_FLAGS]); 62 flags = nla_data(data[IFLA_VLAN_FLAGS]);
63 if ((flags->flags & flags->mask) & 63 if ((flags->flags & flags->mask) &
64 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | 64 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
65 VLAN_FLAG_LOOSE_BINDING)) 65 VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
66 return -EINVAL; 66 return -EINVAL;
67 } 67 }
68 68
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 4de77ea5fa37..dc526ec965e4 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -131,7 +131,7 @@ void vlan_proc_cleanup(struct net *net)
131 remove_proc_entry(name_conf, vn->proc_vlan_dir); 131 remove_proc_entry(name_conf, vn->proc_vlan_dir);
132 132
133 if (vn->proc_vlan_dir) 133 if (vn->proc_vlan_dir)
134 proc_net_remove(net, name_root); 134 remove_proc_entry(name_root, net->proc_net);
135 135
136 /* Dynamically added entries should be cleaned up as their vlan_device 136 /* Dynamically added entries should be cleaned up as their vlan_device
137 * is removed, so we should not have to take care of it here... 137 * is removed, so we should not have to take care of it here...
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index d9ea09b11cf8..a75174a33723 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -23,7 +23,7 @@ config NET_9P_VIRTIO
23 guest partitions and a host partition. 23 guest partitions and a host partition.
24 24
25config NET_9P_RDMA 25config NET_9P_RDMA
26 depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL 26 depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
27 tristate "9P RDMA Transport (Experimental)" 27 tristate "9P RDMA Transport (Experimental)"
28 help 28 help
29 This builds support for an RDMA transport. 29 This builds support for an RDMA transport.
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 35b8911b1c8e..de2e950a0a7a 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -39,6 +39,7 @@
39#include <linux/inet.h> 39#include <linux/inet.h>
40#include <linux/idr.h> 40#include <linux/idr.h>
41#include <linux/file.h> 41#include <linux/file.h>
42#include <linux/highmem.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <net/9p/9p.h> 44#include <net/9p/9p.h>
44#include <linux/parser.h> 45#include <linux/parser.h>
@@ -86,7 +87,7 @@ struct virtio_chan {
86 /* This is global limit. Since we don't have a global structure, 87 /* This is global limit. Since we don't have a global structure,
87 * will be placing it in each channel. 88 * will be placing it in each channel.
88 */ 89 */
89 int p9_max_pages; 90 unsigned long p9_max_pages;
90 /* Scatterlist: can be too big for stack. */ 91 /* Scatterlist: can be too big for stack. */
91 struct scatterlist sg[VIRTQUEUE_NUM]; 92 struct scatterlist sg[VIRTQUEUE_NUM];
92 93
@@ -325,7 +326,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
325 int count = nr_pages; 326 int count = nr_pages;
326 while (nr_pages) { 327 while (nr_pages) {
327 s = rest_of_page(data); 328 s = rest_of_page(data);
328 pages[index++] = virt_to_page(data); 329 pages[index++] = kmap_to_page(data);
329 data += s; 330 data += s;
330 nr_pages--; 331 nr_pages--;
331 } 332 }
diff --git a/net/Kconfig b/net/Kconfig
index 30b48f523135..6f676ab885be 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -90,7 +90,6 @@ config NETWORK_SECMARK
90 90
91config NETWORK_PHY_TIMESTAMPING 91config NETWORK_PHY_TIMESTAMPING
92 bool "Timestamping in PHY devices" 92 bool "Timestamping in PHY devices"
93 depends on EXPERIMENTAL
94 help 93 help
95 This allows timestamping of network packets by PHYs with 94 This allows timestamping of network packets by PHYs with
96 hardware timestamping capabilities. This option adds some 95 hardware timestamping capabilities. This option adds some
@@ -209,7 +208,6 @@ source "net/ipx/Kconfig"
209source "drivers/net/appletalk/Kconfig" 208source "drivers/net/appletalk/Kconfig"
210source "net/x25/Kconfig" 209source "net/x25/Kconfig"
211source "net/lapb/Kconfig" 210source "net/lapb/Kconfig"
212source "net/wanrouter/Kconfig"
213source "net/phonet/Kconfig" 211source "net/phonet/Kconfig"
214source "net/ieee802154/Kconfig" 212source "net/ieee802154/Kconfig"
215source "net/mac802154/Kconfig" 213source "net/mac802154/Kconfig"
@@ -218,6 +216,7 @@ source "net/dcb/Kconfig"
218source "net/dns_resolver/Kconfig" 216source "net/dns_resolver/Kconfig"
219source "net/batman-adv/Kconfig" 217source "net/batman-adv/Kconfig"
220source "net/openvswitch/Kconfig" 218source "net/openvswitch/Kconfig"
219source "net/vmw_vsock/Kconfig"
221 220
222config RPS 221config RPS
223 boolean 222 boolean
@@ -232,7 +231,7 @@ config RFS_ACCEL
232 231
233config XPS 232config XPS
234 boolean 233 boolean
235 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 234 depends on SMP && USE_GENERIC_SMP_HELPERS
236 default y 235 default y
237 236
238config NETPRIO_CGROUP 237config NETPRIO_CGROUP
@@ -278,7 +277,7 @@ config NET_PKTGEN
278 277
279config NET_TCPPROBE 278config NET_TCPPROBE
280 tristate "TCP connection probing" 279 tristate "TCP connection probing"
281 depends on INET && EXPERIMENTAL && PROC_FS && KPROBES 280 depends on INET && PROC_FS && KPROBES
282 ---help--- 281 ---help---
283 This module allows for capturing the changes to TCP connection 282 This module allows for capturing the changes to TCP connection
284 state in response to incoming packets. It is used for debugging 283 state in response to incoming packets. It is used for debugging
@@ -295,7 +294,7 @@ config NET_TCPPROBE
295 294
296config NET_DROP_MONITOR 295config NET_DROP_MONITOR
297 tristate "Network packet drop alerting service" 296 tristate "Network packet drop alerting service"
298 depends on INET && EXPERIMENTAL && TRACEPOINTS 297 depends on INET && TRACEPOINTS
299 ---help--- 298 ---help---
300 This feature provides an alerting service to userspace in the 299 This feature provides an alerting service to userspace in the
301 event that packets are discarded in the network stack. Alerts 300 event that packets are discarded in the network stack. Alerts
diff --git a/net/Makefile b/net/Makefile
index 4f4ee083064c..091e7b04f301 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_BRIDGE) += bridge/
26obj-$(CONFIG_NET_DSA) += dsa/ 26obj-$(CONFIG_NET_DSA) += dsa/
27obj-$(CONFIG_IPX) += ipx/ 27obj-$(CONFIG_IPX) += ipx/
28obj-$(CONFIG_ATALK) += appletalk/ 28obj-$(CONFIG_ATALK) += appletalk/
29obj-$(CONFIG_WAN_ROUTER) += wanrouter/
30obj-$(CONFIG_X25) += x25/ 29obj-$(CONFIG_X25) += x25/
31obj-$(CONFIG_LAPB) += lapb/ 30obj-$(CONFIG_LAPB) += lapb/
32obj-$(CONFIG_NETROM) += netrom/ 31obj-$(CONFIG_NETROM) += netrom/
@@ -70,3 +69,4 @@ obj-$(CONFIG_CEPH_LIB) += ceph/
70obj-$(CONFIG_BATMAN_ADV) += batman-adv/ 69obj-$(CONFIG_BATMAN_ADV) += batman-adv/
71obj-$(CONFIG_NFC) += nfc/ 70obj-$(CONFIG_NFC) += nfc/
72obj-$(CONFIG_OPENVSWITCH) += openvswitch/ 71obj-$(CONFIG_OPENVSWITCH) += openvswitch/
72obj-$(CONFIG_VSOCKETS) += vmw_vsock/
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f49da5814bc3..350bf62b2ae3 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -14,49 +14,45 @@ static ssize_t show_type(struct device *cdev,
14 struct device_attribute *attr, char *buf) 14 struct device_attribute *attr, char *buf)
15{ 15{
16 struct atm_dev *adev = to_atm_dev(cdev); 16 struct atm_dev *adev = to_atm_dev(cdev);
17 return sprintf(buf, "%s\n", adev->type); 17
18 return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type);
18} 19}
19 20
20static ssize_t show_address(struct device *cdev, 21static ssize_t show_address(struct device *cdev,
21 struct device_attribute *attr, char *buf) 22 struct device_attribute *attr, char *buf)
22{ 23{
23 char *pos = buf;
24 struct atm_dev *adev = to_atm_dev(cdev); 24 struct atm_dev *adev = to_atm_dev(cdev);
25 int i;
26
27 for (i = 0; i < (ESI_LEN - 1); i++)
28 pos += sprintf(pos, "%02x:", adev->esi[i]);
29 pos += sprintf(pos, "%02x\n", adev->esi[i]);
30 25
31 return pos - buf; 26 return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi);
32} 27}
33 28
34static ssize_t show_atmaddress(struct device *cdev, 29static ssize_t show_atmaddress(struct device *cdev,
35 struct device_attribute *attr, char *buf) 30 struct device_attribute *attr, char *buf)
36{ 31{
37 unsigned long flags; 32 unsigned long flags;
38 char *pos = buf;
39 struct atm_dev *adev = to_atm_dev(cdev); 33 struct atm_dev *adev = to_atm_dev(cdev);
40 struct atm_dev_addr *aaddr; 34 struct atm_dev_addr *aaddr;
41 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin; 35 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin;
42 int i, j; 36 int i, j, count = 0;
43 37
44 spin_lock_irqsave(&adev->lock, flags); 38 spin_lock_irqsave(&adev->lock, flags);
45 list_for_each_entry(aaddr, &adev->local, entry) { 39 list_for_each_entry(aaddr, &adev->local, entry) {
46 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 40 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
47 if (j == *fmt) { 41 if (j == *fmt) {
48 pos += sprintf(pos, "."); 42 count += scnprintf(buf + count,
43 PAGE_SIZE - count, ".");
49 ++fmt; 44 ++fmt;
50 j = 0; 45 j = 0;
51 } 46 }
52 pos += sprintf(pos, "%02x", 47 count += scnprintf(buf + count,
53 aaddr->addr.sas_addr.prv[i]); 48 PAGE_SIZE - count, "%02x",
49 aaddr->addr.sas_addr.prv[i]);
54 } 50 }
55 pos += sprintf(pos, "\n"); 51 count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
56 } 52 }
57 spin_unlock_irqrestore(&adev->lock, flags); 53 spin_unlock_irqrestore(&adev->lock, flags);
58 54
59 return pos - buf; 55 return count;
60} 56}
61 57
62static ssize_t show_atmindex(struct device *cdev, 58static ssize_t show_atmindex(struct device *cdev,
@@ -64,25 +60,21 @@ static ssize_t show_atmindex(struct device *cdev,
64{ 60{
65 struct atm_dev *adev = to_atm_dev(cdev); 61 struct atm_dev *adev = to_atm_dev(cdev);
66 62
67 return sprintf(buf, "%d\n", adev->number); 63 return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number);
68} 64}
69 65
70static ssize_t show_carrier(struct device *cdev, 66static ssize_t show_carrier(struct device *cdev,
71 struct device_attribute *attr, char *buf) 67 struct device_attribute *attr, char *buf)
72{ 68{
73 char *pos = buf;
74 struct atm_dev *adev = to_atm_dev(cdev); 69 struct atm_dev *adev = to_atm_dev(cdev);
75 70
76 pos += sprintf(pos, "%d\n", 71 return scnprintf(buf, PAGE_SIZE, "%d\n",
77 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1); 72 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
78
79 return pos - buf;
80} 73}
81 74
82static ssize_t show_link_rate(struct device *cdev, 75static ssize_t show_link_rate(struct device *cdev,
83 struct device_attribute *attr, char *buf) 76 struct device_attribute *attr, char *buf)
84{ 77{
85 char *pos = buf;
86 struct atm_dev *adev = to_atm_dev(cdev); 78 struct atm_dev *adev = to_atm_dev(cdev);
87 int link_rate; 79 int link_rate;
88 80
@@ -100,9 +92,7 @@ static ssize_t show_link_rate(struct device *cdev,
100 default: 92 default:
101 link_rate = adev->link_rate * 8 * 53; 93 link_rate = adev->link_rate * 8 * 53;
102 } 94 }
103 pos += sprintf(pos, "%d\n", link_rate); 95 return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
104
105 return pos - buf;
106} 96}
107 97
108static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 98static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 0d020de8d233..b4e75340b162 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -460,7 +460,7 @@ static void atm_proc_dirs_remove(void)
460 if (e->dirent) 460 if (e->dirent)
461 remove_proc_entry(e->name, atm_proc_root); 461 remove_proc_entry(e->name, atm_proc_root);
462 } 462 }
463 proc_net_remove(&init_net, "atm"); 463 remove_proc_entry("atm", init_net.proc_net);
464} 464}
465 465
466int __init atm_proc_init(void) 466int __init atm_proc_init(void)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 779095ded689..69a06c47b648 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1992,9 +1992,10 @@ static int __init ax25_init(void)
1992 dev_add_pack(&ax25_packet_type); 1992 dev_add_pack(&ax25_packet_type);
1993 register_netdevice_notifier(&ax25_dev_notifier); 1993 register_netdevice_notifier(&ax25_dev_notifier);
1994 1994
1995 proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops); 1995 proc_create("ax25_route", S_IRUGO, init_net.proc_net,
1996 proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops); 1996 &ax25_route_fops);
1997 proc_net_fops_create(&init_net, "ax25_calls", S_IRUGO, &ax25_uid_fops); 1997 proc_create("ax25", S_IRUGO, init_net.proc_net, &ax25_info_fops);
1998 proc_create("ax25_calls", S_IRUGO, init_net.proc_net, &ax25_uid_fops);
1998out: 1999out:
1999 return rc; 2000 return rc;
2000} 2001}
@@ -2008,9 +2009,9 @@ MODULE_ALIAS_NETPROTO(PF_AX25);
2008 2009
2009static void __exit ax25_exit(void) 2010static void __exit ax25_exit(void)
2010{ 2011{
2011 proc_net_remove(&init_net, "ax25_route"); 2012 remove_proc_entry("ax25_route", init_net.proc_net);
2012 proc_net_remove(&init_net, "ax25"); 2013 remove_proc_entry("ax25", init_net.proc_net);
2013 proc_net_remove(&init_net, "ax25_calls"); 2014 remove_proc_entry("ax25_calls", init_net.proc_net);
2014 2015
2015 unregister_netdevice_notifier(&ax25_dev_notifier); 2016 unregister_netdevice_notifier(&ax25_dev_notifier);
2016 2017
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index a0ba3bff9b36..a4808c29ea3d 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 9f3925a85aab..72fe1bbf7721 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
@@ -123,7 +123,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
123 unsigned int msecs; 123 unsigned int msecs;
124 124
125 msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER; 125 msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
126 msecs += (random32() % 2 * BATADV_JITTER); 126 msecs += prandom_u32() % (2 * BATADV_JITTER);
127 127
128 return jiffies + msecs_to_jiffies(msecs); 128 return jiffies + msecs_to_jiffies(msecs);
129} 129}
@@ -131,7 +131,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
131/* when do we schedule a ogm packet to be sent */ 131/* when do we schedule a ogm packet to be sent */
132static unsigned long batadv_iv_ogm_fwd_send_time(void) 132static unsigned long batadv_iv_ogm_fwd_send_time(void)
133{ 133{
134 return jiffies + msecs_to_jiffies(random32() % (BATADV_JITTER / 2)); 134 return jiffies + msecs_to_jiffies(prandom_u32() % (BATADV_JITTER / 2));
135} 135}
136 136
137/* apply hop penalty for a normal link */ 137/* apply hop penalty for a normal link */
@@ -183,7 +183,6 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
183 /* adjust all flags and log packets */ 183 /* adjust all flags and log packets */
184 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 184 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
185 batadv_ogm_packet->tt_num_changes)) { 185 batadv_ogm_packet->tt_num_changes)) {
186
187 /* we might have aggregated direct link packets with an 186 /* we might have aggregated direct link packets with an
188 * ordinary base packet 187 * ordinary base packet
189 */ 188 */
@@ -261,7 +260,6 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
261 */ 260 */
262 if ((directlink && (batadv_ogm_packet->header.ttl == 1)) || 261 if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
263 (forw_packet->own && (forw_packet->if_incoming != primary_if))) { 262 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
264
265 /* FIXME: what about aggregated packets ? */ 263 /* FIXME: what about aggregated packets ? */
266 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 264 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
267 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n", 265 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
@@ -325,7 +323,6 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
325 if (time_before(send_time, forw_packet->send_time) && 323 if (time_before(send_time, forw_packet->send_time) &&
326 time_after_eq(aggregation_end_time, forw_packet->send_time) && 324 time_after_eq(aggregation_end_time, forw_packet->send_time) &&
327 (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) { 325 (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
328
329 /* check aggregation compatibility 326 /* check aggregation compatibility
330 * -> direct link packets are broadcasted on 327 * -> direct link packets are broadcasted on
331 * their interface only 328 * their interface only
@@ -815,7 +812,6 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
815 rcu_read_lock(); 812 rcu_read_lock();
816 hlist_for_each_entry_rcu(tmp_neigh_node, node, 813 hlist_for_each_entry_rcu(tmp_neigh_node, node,
817 &orig_neigh_node->neigh_list, list) { 814 &orig_neigh_node->neigh_list, list) {
818
819 if (!batadv_compare_eth(tmp_neigh_node->addr, 815 if (!batadv_compare_eth(tmp_neigh_node->addr,
820 orig_neigh_node->orig)) 816 orig_neigh_node->orig))
821 continue; 817 continue;
@@ -949,7 +945,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
949 rcu_read_lock(); 945 rcu_read_lock();
950 hlist_for_each_entry_rcu(tmp_neigh_node, node, 946 hlist_for_each_entry_rcu(tmp_neigh_node, node,
951 &orig_node->neigh_list, list) { 947 &orig_node->neigh_list, list) {
952
953 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, 948 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
954 orig_node->last_real_seqno, 949 orig_node->last_real_seqno,
955 seqno); 950 seqno);
@@ -1033,7 +1028,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1033 is_single_hop_neigh = true; 1028 is_single_hop_neigh = true;
1034 1029
1035 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1030 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1036 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", 1031 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %#.4x, changes %u, tq %d, TTL %d, V %d, IDF %d)\n",
1037 ethhdr->h_source, if_incoming->net_dev->name, 1032 ethhdr->h_source, if_incoming->net_dev->name,
1038 if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, 1033 if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
1039 batadv_ogm_packet->prev_sender, 1034 batadv_ogm_packet->prev_sender,
@@ -1223,7 +1218,6 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1223 1218
1224 /* is single hop (direct) neighbor */ 1219 /* is single hop (direct) neighbor */
1225 if (is_single_hop_neigh) { 1220 if (is_single_hop_neigh) {
1226
1227 /* mark direct link on incoming interface */ 1221 /* mark direct link on incoming interface */
1228 batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet, 1222 batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
1229 is_single_hop_neigh, 1223 is_single_hop_neigh,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 5453b17d8df2..973982414d58 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index cebaae7e148b..a81b9322e382 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5aebe9327d68..30f46526cbbd 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich 3 * Simon Wunderlich
4 * 4 *
@@ -34,13 +34,14 @@
34static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; 34static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
35 35
36static void batadv_bla_periodic_work(struct work_struct *work); 36static void batadv_bla_periodic_work(struct work_struct *work);
37static void batadv_bla_send_announce(struct batadv_priv *bat_priv, 37static void
38 struct batadv_backbone_gw *backbone_gw); 38batadv_bla_send_announce(struct batadv_priv *bat_priv,
39 struct batadv_bla_backbone_gw *backbone_gw);
39 40
40/* return the index of the claim */ 41/* return the index of the claim */
41static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) 42static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
42{ 43{
43 struct batadv_claim *claim = (struct batadv_claim *)data; 44 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
44 uint32_t hash = 0; 45 uint32_t hash = 0;
45 46
46 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); 47 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
@@ -57,7 +58,7 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
57static inline uint32_t batadv_choose_backbone_gw(const void *data, 58static inline uint32_t batadv_choose_backbone_gw(const void *data,
58 uint32_t size) 59 uint32_t size)
59{ 60{
60 struct batadv_claim *claim = (struct batadv_claim *)data; 61 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
61 uint32_t hash = 0; 62 uint32_t hash = 0;
62 63
63 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); 64 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
@@ -75,9 +76,9 @@ static inline uint32_t batadv_choose_backbone_gw(const void *data,
75static int batadv_compare_backbone_gw(const struct hlist_node *node, 76static int batadv_compare_backbone_gw(const struct hlist_node *node,
76 const void *data2) 77 const void *data2)
77{ 78{
78 const void *data1 = container_of(node, struct batadv_backbone_gw, 79 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
79 hash_entry); 80 hash_entry);
80 const struct batadv_backbone_gw *gw1 = data1, *gw2 = data2; 81 const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
81 82
82 if (!batadv_compare_eth(gw1->orig, gw2->orig)) 83 if (!batadv_compare_eth(gw1->orig, gw2->orig))
83 return 0; 84 return 0;
@@ -92,9 +93,9 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node,
92static int batadv_compare_claim(const struct hlist_node *node, 93static int batadv_compare_claim(const struct hlist_node *node,
93 const void *data2) 94 const void *data2)
94{ 95{
95 const void *data1 = container_of(node, struct batadv_claim, 96 const void *data1 = container_of(node, struct batadv_bla_claim,
96 hash_entry); 97 hash_entry);
97 const struct batadv_claim *cl1 = data1, *cl2 = data2; 98 const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
98 99
99 if (!batadv_compare_eth(cl1->addr, cl2->addr)) 100 if (!batadv_compare_eth(cl1->addr, cl2->addr))
100 return 0; 101 return 0;
@@ -106,7 +107,8 @@ static int batadv_compare_claim(const struct hlist_node *node,
106} 107}
107 108
108/* free a backbone gw */ 109/* free a backbone gw */
109static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw) 110static void
111batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
110{ 112{
111 if (atomic_dec_and_test(&backbone_gw->refcount)) 113 if (atomic_dec_and_test(&backbone_gw->refcount))
112 kfree_rcu(backbone_gw, rcu); 114 kfree_rcu(backbone_gw, rcu);
@@ -115,16 +117,16 @@ static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
115/* finally deinitialize the claim */ 117/* finally deinitialize the claim */
116static void batadv_claim_free_rcu(struct rcu_head *rcu) 118static void batadv_claim_free_rcu(struct rcu_head *rcu)
117{ 119{
118 struct batadv_claim *claim; 120 struct batadv_bla_claim *claim;
119 121
120 claim = container_of(rcu, struct batadv_claim, rcu); 122 claim = container_of(rcu, struct batadv_bla_claim, rcu);
121 123
122 batadv_backbone_gw_free_ref(claim->backbone_gw); 124 batadv_backbone_gw_free_ref(claim->backbone_gw);
123 kfree(claim); 125 kfree(claim);
124} 126}
125 127
126/* free a claim, call claim_free_rcu if its the last reference */ 128/* free a claim, call claim_free_rcu if its the last reference */
127static void batadv_claim_free_ref(struct batadv_claim *claim) 129static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
128{ 130{
129 if (atomic_dec_and_test(&claim->refcount)) 131 if (atomic_dec_and_test(&claim->refcount))
130 call_rcu(&claim->rcu, batadv_claim_free_rcu); 132 call_rcu(&claim->rcu, batadv_claim_free_rcu);
@@ -136,14 +138,15 @@ static void batadv_claim_free_ref(struct batadv_claim *claim)
136 * looks for a claim in the hash, and returns it if found 138 * looks for a claim in the hash, and returns it if found
137 * or NULL otherwise. 139 * or NULL otherwise.
138 */ 140 */
139static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, 141static struct batadv_bla_claim
140 struct batadv_claim *data) 142*batadv_claim_hash_find(struct batadv_priv *bat_priv,
143 struct batadv_bla_claim *data)
141{ 144{
142 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 145 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
143 struct hlist_head *head; 146 struct hlist_head *head;
144 struct hlist_node *node; 147 struct hlist_node *node;
145 struct batadv_claim *claim; 148 struct batadv_bla_claim *claim;
146 struct batadv_claim *claim_tmp = NULL; 149 struct batadv_bla_claim *claim_tmp = NULL;
147 int index; 150 int index;
148 151
149 if (!hash) 152 if (!hash)
@@ -176,15 +179,15 @@ static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
176 * 179 *
177 * Returns claim if found or NULL otherwise. 180 * Returns claim if found or NULL otherwise.
178 */ 181 */
179static struct batadv_backbone_gw * 182static struct batadv_bla_backbone_gw *
180batadv_backbone_hash_find(struct batadv_priv *bat_priv, 183batadv_backbone_hash_find(struct batadv_priv *bat_priv,
181 uint8_t *addr, short vid) 184 uint8_t *addr, short vid)
182{ 185{
183 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 186 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
184 struct hlist_head *head; 187 struct hlist_head *head;
185 struct hlist_node *node; 188 struct hlist_node *node;
186 struct batadv_backbone_gw search_entry, *backbone_gw; 189 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
187 struct batadv_backbone_gw *backbone_gw_tmp = NULL; 190 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
188 int index; 191 int index;
189 192
190 if (!hash) 193 if (!hash)
@@ -215,12 +218,12 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
215 218
216/* delete all claims for a backbone */ 219/* delete all claims for a backbone */
217static void 220static void
218batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw) 221batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
219{ 222{
220 struct batadv_hashtable *hash; 223 struct batadv_hashtable *hash;
221 struct hlist_node *node, *node_tmp; 224 struct hlist_node *node, *node_tmp;
222 struct hlist_head *head; 225 struct hlist_head *head;
223 struct batadv_claim *claim; 226 struct batadv_bla_claim *claim;
224 int i; 227 int i;
225 spinlock_t *list_lock; /* protects write access to the hash lists */ 228 spinlock_t *list_lock; /* protects write access to the hash lists */
226 229
@@ -235,7 +238,6 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
235 spin_lock_bh(list_lock); 238 spin_lock_bh(list_lock);
236 hlist_for_each_entry_safe(claim, node, node_tmp, 239 hlist_for_each_entry_safe(claim, node, node_tmp,
237 head, hash_entry) { 240 head, hash_entry) {
238
239 if (claim->backbone_gw != backbone_gw) 241 if (claim->backbone_gw != backbone_gw)
240 continue; 242 continue;
241 243
@@ -338,7 +340,6 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
338 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", 340 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
339 ethhdr->h_source, ethhdr->h_dest, vid); 341 ethhdr->h_source, ethhdr->h_dest, vid);
340 break; 342 break;
341
342 } 343 }
343 344
344 if (vid != -1) 345 if (vid != -1)
@@ -366,11 +367,11 @@ out:
366 * searches for the backbone gw or creates a new one if it could not 367 * searches for the backbone gw or creates a new one if it could not
367 * be found. 368 * be found.
368 */ 369 */
369static struct batadv_backbone_gw * 370static struct batadv_bla_backbone_gw *
370batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, 371batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
371 short vid, bool own_backbone) 372 short vid, bool own_backbone)
372{ 373{
373 struct batadv_backbone_gw *entry; 374 struct batadv_bla_backbone_gw *entry;
374 struct batadv_orig_node *orig_node; 375 struct batadv_orig_node *orig_node;
375 int hash_added; 376 int hash_added;
376 377
@@ -437,7 +438,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
437 struct batadv_hard_iface *primary_if, 438 struct batadv_hard_iface *primary_if,
438 short vid) 439 short vid)
439{ 440{
440 struct batadv_backbone_gw *backbone_gw; 441 struct batadv_bla_backbone_gw *backbone_gw;
441 442
442 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, 443 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
443 primary_if->net_dev->dev_addr, 444 primary_if->net_dev->dev_addr,
@@ -462,8 +463,8 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
462 struct hlist_node *node; 463 struct hlist_node *node;
463 struct hlist_head *head; 464 struct hlist_head *head;
464 struct batadv_hashtable *hash; 465 struct batadv_hashtable *hash;
465 struct batadv_claim *claim; 466 struct batadv_bla_claim *claim;
466 struct batadv_backbone_gw *backbone_gw; 467 struct batadv_bla_backbone_gw *backbone_gw;
467 int i; 468 int i;
468 469
469 batadv_dbg(BATADV_DBG_BLA, bat_priv, 470 batadv_dbg(BATADV_DBG_BLA, bat_priv,
@@ -502,7 +503,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
502 * After the request, it will repeat all of his own claims and finally 503 * After the request, it will repeat all of his own claims and finally
503 * send an announcement claim with which we can check again. 504 * send an announcement claim with which we can check again.
504 */ 505 */
505static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw) 506static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
506{ 507{
507 /* first, remove all old entries */ 508 /* first, remove all old entries */
508 batadv_bla_del_backbone_claims(backbone_gw); 509 batadv_bla_del_backbone_claims(backbone_gw);
@@ -528,7 +529,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
528 * places. 529 * places.
529 */ 530 */
530static void batadv_bla_send_announce(struct batadv_priv *bat_priv, 531static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
531 struct batadv_backbone_gw *backbone_gw) 532 struct batadv_bla_backbone_gw *backbone_gw)
532{ 533{
533 uint8_t mac[ETH_ALEN]; 534 uint8_t mac[ETH_ALEN];
534 __be16 crc; 535 __be16 crc;
@@ -539,7 +540,6 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
539 540
540 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid, 541 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
541 BATADV_CLAIM_TYPE_ANNOUNCE); 542 BATADV_CLAIM_TYPE_ANNOUNCE);
542
543} 543}
544 544
545/** 545/**
@@ -551,10 +551,10 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
551 */ 551 */
552static void batadv_bla_add_claim(struct batadv_priv *bat_priv, 552static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
553 const uint8_t *mac, const short vid, 553 const uint8_t *mac, const short vid,
554 struct batadv_backbone_gw *backbone_gw) 554 struct batadv_bla_backbone_gw *backbone_gw)
555{ 555{
556 struct batadv_claim *claim; 556 struct batadv_bla_claim *claim;
557 struct batadv_claim search_claim; 557 struct batadv_bla_claim search_claim;
558 int hash_added; 558 int hash_added;
559 559
560 memcpy(search_claim.addr, mac, ETH_ALEN); 560 memcpy(search_claim.addr, mac, ETH_ALEN);
@@ -598,7 +598,6 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
598 598
599 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 599 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
600 batadv_backbone_gw_free_ref(claim->backbone_gw); 600 batadv_backbone_gw_free_ref(claim->backbone_gw);
601
602 } 601 }
603 /* set (new) backbone gw */ 602 /* set (new) backbone gw */
604 atomic_inc(&backbone_gw->refcount); 603 atomic_inc(&backbone_gw->refcount);
@@ -617,7 +616,7 @@ claim_free_ref:
617static void batadv_bla_del_claim(struct batadv_priv *bat_priv, 616static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
618 const uint8_t *mac, const short vid) 617 const uint8_t *mac, const short vid)
619{ 618{
620 struct batadv_claim search_claim, *claim; 619 struct batadv_bla_claim search_claim, *claim;
621 620
622 memcpy(search_claim.addr, mac, ETH_ALEN); 621 memcpy(search_claim.addr, mac, ETH_ALEN);
623 search_claim.vid = vid; 622 search_claim.vid = vid;
@@ -643,7 +642,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
643 uint8_t *an_addr, uint8_t *backbone_addr, 642 uint8_t *an_addr, uint8_t *backbone_addr,
644 short vid) 643 short vid)
645{ 644{
646 struct batadv_backbone_gw *backbone_gw; 645 struct batadv_bla_backbone_gw *backbone_gw;
647 uint16_t crc; 646 uint16_t crc;
648 647
649 if (memcmp(an_addr, batadv_announce_mac, 4) != 0) 648 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
@@ -661,12 +660,12 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
661 crc = ntohs(*((__be16 *)(&an_addr[4]))); 660 crc = ntohs(*((__be16 *)(&an_addr[4])));
662 661
663 batadv_dbg(BATADV_DBG_BLA, bat_priv, 662 batadv_dbg(BATADV_DBG_BLA, bat_priv,
664 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n", 663 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
665 vid, backbone_gw->orig, crc); 664 vid, backbone_gw->orig, crc);
666 665
667 if (backbone_gw->crc != crc) { 666 if (backbone_gw->crc != crc) {
668 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 667 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
669 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n", 668 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
670 backbone_gw->orig, backbone_gw->vid, 669 backbone_gw->orig, backbone_gw->vid,
671 backbone_gw->crc, crc); 670 backbone_gw->crc, crc);
672 671
@@ -715,7 +714,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
715 uint8_t *backbone_addr, 714 uint8_t *backbone_addr,
716 uint8_t *claim_addr, short vid) 715 uint8_t *claim_addr, short vid)
717{ 716{
718 struct batadv_backbone_gw *backbone_gw; 717 struct batadv_bla_backbone_gw *backbone_gw;
719 718
720 /* unclaim in any case if it is our own */ 719 /* unclaim in any case if it is our own */
721 if (primary_if && batadv_compare_eth(backbone_addr, 720 if (primary_if && batadv_compare_eth(backbone_addr,
@@ -744,7 +743,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
744 uint8_t *backbone_addr, uint8_t *claim_addr, 743 uint8_t *backbone_addr, uint8_t *claim_addr,
745 short vid) 744 short vid)
746{ 745{
747 struct batadv_backbone_gw *backbone_gw; 746 struct batadv_bla_backbone_gw *backbone_gw;
748 747
749 /* register the gateway if not yet available, and add the claim. */ 748 /* register the gateway if not yet available, and add the claim. */
750 749
@@ -835,7 +834,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
835 /* if our mesh friends mac is bigger, use it for ourselves. */ 834 /* if our mesh friends mac is bigger, use it for ourselves. */
836 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { 835 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
837 batadv_dbg(BATADV_DBG_BLA, bat_priv, 836 batadv_dbg(BATADV_DBG_BLA, bat_priv,
838 "taking other backbones claim group: %04x\n", 837 "taking other backbones claim group: %#.4x\n",
839 ntohs(bla_dst->group)); 838 ntohs(bla_dst->group));
840 bla_dst_own->group = bla_dst->group; 839 bla_dst_own->group = bla_dst->group;
841 } 840 }
@@ -958,7 +957,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
958 */ 957 */
959static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) 958static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
960{ 959{
961 struct batadv_backbone_gw *backbone_gw; 960 struct batadv_bla_backbone_gw *backbone_gw;
962 struct hlist_node *node, *node_tmp; 961 struct hlist_node *node, *node_tmp;
963 struct hlist_head *head; 962 struct hlist_head *head;
964 struct batadv_hashtable *hash; 963 struct batadv_hashtable *hash;
@@ -1013,7 +1012,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1013 struct batadv_hard_iface *primary_if, 1012 struct batadv_hard_iface *primary_if,
1014 int now) 1013 int now)
1015{ 1014{
1016 struct batadv_claim *claim; 1015 struct batadv_bla_claim *claim;
1017 struct hlist_node *node; 1016 struct hlist_node *node;
1018 struct hlist_head *head; 1017 struct hlist_head *head;
1019 struct batadv_hashtable *hash; 1018 struct batadv_hashtable *hash;
@@ -1062,7 +1061,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1062 struct batadv_hard_iface *primary_if, 1061 struct batadv_hard_iface *primary_if,
1063 struct batadv_hard_iface *oldif) 1062 struct batadv_hard_iface *oldif)
1064{ 1063{
1065 struct batadv_backbone_gw *backbone_gw; 1064 struct batadv_bla_backbone_gw *backbone_gw;
1066 struct hlist_node *node; 1065 struct hlist_node *node;
1067 struct hlist_head *head; 1066 struct hlist_head *head;
1068 struct batadv_hashtable *hash; 1067 struct batadv_hashtable *hash;
@@ -1104,16 +1103,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1104 } 1103 }
1105} 1104}
1106 1105
1107
1108
1109/* (re)start the timer */
1110static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
1111{
1112 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1113 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1114 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1115}
1116
1117/* periodic work to do: 1106/* periodic work to do:
1118 * * purge structures when they are too old 1107 * * purge structures when they are too old
1119 * * send announcements 1108 * * send announcements
@@ -1125,7 +1114,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1125 struct batadv_priv_bla *priv_bla; 1114 struct batadv_priv_bla *priv_bla;
1126 struct hlist_node *node; 1115 struct hlist_node *node;
1127 struct hlist_head *head; 1116 struct hlist_head *head;
1128 struct batadv_backbone_gw *backbone_gw; 1117 struct batadv_bla_backbone_gw *backbone_gw;
1129 struct batadv_hashtable *hash; 1118 struct batadv_hashtable *hash;
1130 struct batadv_hard_iface *primary_if; 1119 struct batadv_hard_iface *primary_if;
1131 int i; 1120 int i;
@@ -1184,7 +1173,8 @@ out:
1184 if (primary_if) 1173 if (primary_if)
1185 batadv_hardif_free_ref(primary_if); 1174 batadv_hardif_free_ref(primary_if);
1186 1175
1187 batadv_bla_start_timer(bat_priv); 1176 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1177 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1188} 1178}
1189 1179
1190/* The hash for claim and backbone hash receive the same key because they 1180/* The hash for claim and backbone hash receive the same key because they
@@ -1242,7 +1232,10 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
1242 1232
1243 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); 1233 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1244 1234
1245 batadv_bla_start_timer(bat_priv); 1235 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1236
1237 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1238 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1246 return 0; 1239 return 0;
1247} 1240}
1248 1241
@@ -1330,7 +1323,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1330 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1323 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1331 struct hlist_head *head; 1324 struct hlist_head *head;
1332 struct hlist_node *node; 1325 struct hlist_node *node;
1333 struct batadv_backbone_gw *backbone_gw; 1326 struct batadv_bla_backbone_gw *backbone_gw;
1334 int i; 1327 int i;
1335 1328
1336 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1329 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
@@ -1371,7 +1364,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1371{ 1364{
1372 struct ethhdr *ethhdr; 1365 struct ethhdr *ethhdr;
1373 struct vlan_ethhdr *vhdr; 1366 struct vlan_ethhdr *vhdr;
1374 struct batadv_backbone_gw *backbone_gw; 1367 struct batadv_bla_backbone_gw *backbone_gw;
1375 short vid = -1; 1368 short vid = -1;
1376 1369
1377 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) 1370 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
@@ -1442,7 +1435,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
1442 bool is_bcast) 1435 bool is_bcast)
1443{ 1436{
1444 struct ethhdr *ethhdr; 1437 struct ethhdr *ethhdr;
1445 struct batadv_claim search_claim, *claim = NULL; 1438 struct batadv_bla_claim search_claim, *claim = NULL;
1446 struct batadv_hard_iface *primary_if; 1439 struct batadv_hard_iface *primary_if;
1447 int ret; 1440 int ret;
1448 1441
@@ -1536,7 +1529,7 @@ out:
1536int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) 1529int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
1537{ 1530{
1538 struct ethhdr *ethhdr; 1531 struct ethhdr *ethhdr;
1539 struct batadv_claim search_claim, *claim = NULL; 1532 struct batadv_bla_claim search_claim, *claim = NULL;
1540 struct batadv_hard_iface *primary_if; 1533 struct batadv_hard_iface *primary_if;
1541 int ret = 0; 1534 int ret = 0;
1542 1535
@@ -1612,7 +1605,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1612 struct net_device *net_dev = (struct net_device *)seq->private; 1605 struct net_device *net_dev = (struct net_device *)seq->private;
1613 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1606 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1614 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 1607 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1615 struct batadv_claim *claim; 1608 struct batadv_bla_claim *claim;
1616 struct batadv_hard_iface *primary_if; 1609 struct batadv_hard_iface *primary_if;
1617 struct hlist_node *node; 1610 struct hlist_node *node;
1618 struct hlist_head *head; 1611 struct hlist_head *head;
@@ -1626,10 +1619,10 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1626 1619
1627 primary_addr = primary_if->net_dev->dev_addr; 1620 primary_addr = primary_if->net_dev->dev_addr;
1628 seq_printf(seq, 1621 seq_printf(seq,
1629 "Claims announced for the mesh %s (orig %pM, group id %04x)\n", 1622 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
1630 net_dev->name, primary_addr, 1623 net_dev->name, primary_addr,
1631 ntohs(bat_priv->bla.claim_dest.group)); 1624 ntohs(bat_priv->bla.claim_dest.group));
1632 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", 1625 seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n",
1633 "Client", "VID", "Originator", "CRC"); 1626 "Client", "VID", "Originator", "CRC");
1634 for (i = 0; i < hash->size; i++) { 1627 for (i = 0; i < hash->size; i++) {
1635 head = &hash->table[i]; 1628 head = &hash->table[i];
@@ -1638,7 +1631,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1638 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1631 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1639 is_own = batadv_compare_eth(claim->backbone_gw->orig, 1632 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1640 primary_addr); 1633 primary_addr);
1641 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n", 1634 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n",
1642 claim->addr, claim->vid, 1635 claim->addr, claim->vid,
1643 claim->backbone_gw->orig, 1636 claim->backbone_gw->orig,
1644 (is_own ? 'x' : ' '), 1637 (is_own ? 'x' : ' '),
@@ -1657,7 +1650,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1657 struct net_device *net_dev = (struct net_device *)seq->private; 1650 struct net_device *net_dev = (struct net_device *)seq->private;
1658 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1651 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1659 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1652 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1660 struct batadv_backbone_gw *backbone_gw; 1653 struct batadv_bla_backbone_gw *backbone_gw;
1661 struct batadv_hard_iface *primary_if; 1654 struct batadv_hard_iface *primary_if;
1662 struct hlist_node *node; 1655 struct hlist_node *node;
1663 struct hlist_head *head; 1656 struct hlist_head *head;
@@ -1672,10 +1665,10 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1672 1665
1673 primary_addr = primary_if->net_dev->dev_addr; 1666 primary_addr = primary_if->net_dev->dev_addr;
1674 seq_printf(seq, 1667 seq_printf(seq,
1675 "Backbones announced for the mesh %s (orig %pM, group id %04x)\n", 1668 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
1676 net_dev->name, primary_addr, 1669 net_dev->name, primary_addr,
1677 ntohs(bat_priv->bla.claim_dest.group)); 1670 ntohs(bat_priv->bla.claim_dest.group));
1678 seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n", 1671 seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n",
1679 "Originator", "VID", "last seen", "CRC"); 1672 "Originator", "VID", "last seen", "CRC");
1680 for (i = 0; i < hash->size; i++) { 1673 for (i = 0; i < hash->size; i++) {
1681 head = &hash->table[i]; 1674 head = &hash->table[i];
@@ -1693,7 +1686,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1693 continue; 1686 continue;
1694 1687
1695 seq_printf(seq, 1688 seq_printf(seq,
1696 " * %pM on % 5d % 4i.%03is (%04x)\n", 1689 " * %pM on % 5d % 4i.%03is (%#.4x)\n",
1697 backbone_gw->orig, backbone_gw->vid, 1690 backbone_gw->orig, backbone_gw->vid,
1698 secs, msecs, backbone_gw->crc); 1691 secs, msecs, backbone_gw->crc);
1699 } 1692 }
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 196d9a0254bc..dea2fbc5d98d 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich 3 * Simon Wunderlich
4 * 4 *
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 6f58ddd53bff..6ae86516db4d 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
@@ -40,13 +40,14 @@ static struct dentry *batadv_debugfs;
40 40
41static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN; 41static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
42 42
43static char *batadv_log_char_addr(struct batadv_debug_log *debug_log, 43static char *batadv_log_char_addr(struct batadv_priv_debug_log *debug_log,
44 size_t idx) 44 size_t idx)
45{ 45{
46 return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK]; 46 return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
47} 47}
48 48
49static void batadv_emit_log_char(struct batadv_debug_log *debug_log, char c) 49static void batadv_emit_log_char(struct batadv_priv_debug_log *debug_log,
50 char c)
50{ 51{
51 char *char_addr; 52 char *char_addr;
52 53
@@ -59,7 +60,7 @@ static void batadv_emit_log_char(struct batadv_debug_log *debug_log, char c)
59} 60}
60 61
61__printf(2, 3) 62__printf(2, 3)
62static int batadv_fdebug_log(struct batadv_debug_log *debug_log, 63static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
63 const char *fmt, ...) 64 const char *fmt, ...)
64{ 65{
65 va_list args; 66 va_list args;
@@ -114,7 +115,7 @@ static int batadv_log_release(struct inode *inode, struct file *file)
114 return 0; 115 return 0;
115} 116}
116 117
117static int batadv_log_empty(struct batadv_debug_log *debug_log) 118static int batadv_log_empty(struct batadv_priv_debug_log *debug_log)
118{ 119{
119 return !(debug_log->log_start - debug_log->log_end); 120 return !(debug_log->log_start - debug_log->log_end);
120} 121}
@@ -123,7 +124,7 @@ static ssize_t batadv_log_read(struct file *file, char __user *buf,
123 size_t count, loff_t *ppos) 124 size_t count, loff_t *ppos)
124{ 125{
125 struct batadv_priv *bat_priv = file->private_data; 126 struct batadv_priv *bat_priv = file->private_data;
126 struct batadv_debug_log *debug_log = bat_priv->debug_log; 127 struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
127 int error, i = 0; 128 int error, i = 0;
128 char *char_addr; 129 char *char_addr;
129 char c; 130 char c;
@@ -164,7 +165,6 @@ static ssize_t batadv_log_read(struct file *file, char __user *buf,
164 165
165 buf++; 166 buf++;
166 i++; 167 i++;
167
168 } 168 }
169 169
170 spin_unlock_bh(&debug_log->lock); 170 spin_unlock_bh(&debug_log->lock);
@@ -178,7 +178,7 @@ static ssize_t batadv_log_read(struct file *file, char __user *buf,
178static unsigned int batadv_log_poll(struct file *file, poll_table *wait) 178static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
179{ 179{
180 struct batadv_priv *bat_priv = file->private_data; 180 struct batadv_priv *bat_priv = file->private_data;
181 struct batadv_debug_log *debug_log = bat_priv->debug_log; 181 struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
182 182
183 poll_wait(file, &debug_log->queue_wait, wait); 183 poll_wait(file, &debug_log->queue_wait, wait);
184 184
@@ -230,7 +230,6 @@ static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
230#else /* CONFIG_BATMAN_ADV_DEBUG */ 230#else /* CONFIG_BATMAN_ADV_DEBUG */
231static int batadv_debug_log_setup(struct batadv_priv *bat_priv) 231static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
232{ 232{
233 bat_priv->debug_log = NULL;
234 return 0; 233 return 0;
235} 234}
236 235
@@ -397,10 +396,8 @@ err:
397 396
398void batadv_debugfs_destroy(void) 397void batadv_debugfs_destroy(void)
399{ 398{
400 if (batadv_debugfs) { 399 debugfs_remove_recursive(batadv_debugfs);
401 debugfs_remove_recursive(batadv_debugfs); 400 batadv_debugfs = NULL;
402 batadv_debugfs = NULL;
403 }
404} 401}
405 402
406int batadv_debugfs_add_meshif(struct net_device *dev) 403int batadv_debugfs_add_meshif(struct net_device *dev)
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 3319e1f21f55..f8c3849edff4 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 8e1d89d2b1c1..761a59002e34 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Antonio Quartulli 3 * Antonio Quartulli
4 * 4 *
@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
440 /* this is an hash collision with the temporary selected node. Choose 440 /* this is an hash collision with the temporary selected node. Choose
441 * the one with the lowest address 441 * the one with the lowest address
442 */ 442 */
443 if ((tmp_max == max) && 443 if ((tmp_max == max) && max_orig_node &&
444 (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) 444 (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
445 goto out; 445 goto out;
446 446
@@ -738,6 +738,7 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
738 struct arphdr *arphdr; 738 struct arphdr *arphdr;
739 struct ethhdr *ethhdr; 739 struct ethhdr *ethhdr;
740 __be32 ip_src, ip_dst; 740 __be32 ip_src, ip_dst;
741 uint8_t *hw_src, *hw_dst;
741 uint16_t type = 0; 742 uint16_t type = 0;
742 743
743 /* pull the ethernet header */ 744 /* pull the ethernet header */
@@ -777,9 +778,23 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
777 ip_src = batadv_arp_ip_src(skb, hdr_size); 778 ip_src = batadv_arp_ip_src(skb, hdr_size);
778 ip_dst = batadv_arp_ip_dst(skb, hdr_size); 779 ip_dst = batadv_arp_ip_dst(skb, hdr_size);
779 if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) || 780 if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
780 ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst)) 781 ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
782 ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
783 ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
781 goto out; 784 goto out;
782 785
786 hw_src = batadv_arp_hw_src(skb, hdr_size);
787 if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
788 goto out;
789
790 /* we don't care about the destination MAC address in ARP requests */
791 if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
792 hw_dst = batadv_arp_hw_dst(skb, hdr_size);
793 if (is_zero_ether_addr(hw_dst) ||
794 is_multicast_ether_addr(hw_dst))
795 goto out;
796 }
797
783 type = ntohs(arphdr->ar_op); 798 type = ntohs(arphdr->ar_op);
784out: 799out:
785 return type; 800 return type;
@@ -1012,6 +1027,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1012 */ 1027 */
1013 ret = !batadv_is_my_client(bat_priv, hw_dst); 1028 ret = !batadv_is_my_client(bat_priv, hw_dst);
1014out: 1029out:
1030 if (ret)
1031 kfree_skb(skb);
1015 /* if ret == false -> packet has to be delivered to the interface */ 1032 /* if ret == false -> packet has to be delivered to the interface */
1016 return ret; 1033 return ret;
1017} 1034}
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index d060c033e7de..125c8c6fcfad 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Antonio Quartulli 3 * Antonio Quartulli
4 * 4 *
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index dd07c7e3654f..074107f2cfaa 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index f0d129e323c8..039902dca4a6 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 9001208d1752..84bb2b18d711 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 13697f6e7113..509b2bf8c2f4 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index f1d37cd81815..368219e026a9 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
@@ -457,6 +457,24 @@ out:
457 batadv_hardif_free_ref(primary_if); 457 batadv_hardif_free_ref(primary_if);
458} 458}
459 459
460/**
461 * batadv_hardif_remove_interface_finish - cleans up the remains of a hardif
462 * @work: work queue item
463 *
464 * Free the parts of the hard interface which can not be removed under
465 * rtnl lock (to prevent deadlock situations).
466 */
467static void batadv_hardif_remove_interface_finish(struct work_struct *work)
468{
469 struct batadv_hard_iface *hard_iface;
470
471 hard_iface = container_of(work, struct batadv_hard_iface,
472 cleanup_work);
473
474 batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
475 batadv_hardif_free_ref(hard_iface);
476}
477
460static struct batadv_hard_iface * 478static struct batadv_hard_iface *
461batadv_hardif_add_interface(struct net_device *net_dev) 479batadv_hardif_add_interface(struct net_device *net_dev)
462{ 480{
@@ -484,6 +502,9 @@ batadv_hardif_add_interface(struct net_device *net_dev)
484 hard_iface->soft_iface = NULL; 502 hard_iface->soft_iface = NULL;
485 hard_iface->if_status = BATADV_IF_NOT_IN_USE; 503 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
486 INIT_LIST_HEAD(&hard_iface->list); 504 INIT_LIST_HEAD(&hard_iface->list);
505 INIT_WORK(&hard_iface->cleanup_work,
506 batadv_hardif_remove_interface_finish);
507
487 /* extra reference for return */ 508 /* extra reference for return */
488 atomic_set(&hard_iface->refcount, 2); 509 atomic_set(&hard_iface->refcount, 2);
489 510
@@ -518,8 +539,7 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
518 return; 539 return;
519 540
520 hard_iface->if_status = BATADV_IF_TO_BE_REMOVED; 541 hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
521 batadv_sysfs_del_hardif(&hard_iface->hardif_obj); 542 queue_work(batadv_event_workqueue, &hard_iface->cleanup_work);
522 batadv_hardif_free_ref(hard_iface);
523} 543}
524 544
525void batadv_hardif_remove_interfaces(void) 545void batadv_hardif_remove_interfaces(void)
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 3732366e7445..308437d52e22 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 15a849c2d414..7198dafd3bf3 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index e05333905afd..1b4da72f2093 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
4 * 4 *
@@ -89,7 +89,7 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash,
89 * 89 *
90 * Returns the new hash value. 90 * Returns the new hash value.
91 */ 91 */
92static inline uint32_t batadv_hash_bytes(uint32_t hash, void *data, 92static inline uint32_t batadv_hash_bytes(uint32_t hash, const void *data,
93 uint32_t size) 93 uint32_t size)
94{ 94{
95 const unsigned char *key = data; 95 const unsigned char *key = data;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 87ca8095b011..0ba6c899b2d3 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 29443a1dbb5c..1fcca37b6223 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index f65a222b7b83..21fe6987733b 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 2f85577086a7..ced08b936a96 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
@@ -26,7 +26,7 @@
26#define BATADV_DRIVER_DEVICE "batman-adv" 26#define BATADV_DRIVER_DEVICE "batman-adv"
27 27
28#ifndef BATADV_SOURCE_VERSION 28#ifndef BATADV_SOURCE_VERSION
29#define BATADV_SOURCE_VERSION "2012.5.0" 29#define BATADV_SOURCE_VERSION "2013.1.0"
30#endif 30#endif
31 31
32/* B.A.T.M.A.N. parameters */ 32/* B.A.T.M.A.N. parameters */
@@ -41,9 +41,11 @@
41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE 41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
42 */ 42 */
43#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */ 43#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */ 44#define BATADV_TT_LOCAL_TIMEOUT 600000 /* in milliseconds */
45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */ 45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
46#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */ 46#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
47#define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */
48#define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */
47#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */ 49#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */
48/* sliding packet range of received originator messages in sequence numbers 50/* sliding packet range of received originator messages in sequence numbers
49 * (should be a multiple of our word size) 51 * (should be a multiple of our word size)
@@ -276,9 +278,7 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
276static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx, 278static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
277 size_t count) 279 size_t count)
278{ 280{
279 int cpu = get_cpu(); 281 this_cpu_add(bat_priv->bat_counters[idx], count);
280 per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count;
281 put_cpu();
282} 282}
283 283
284#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1) 284#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 8c32cf1c2dec..457ea445217c 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
@@ -29,14 +29,10 @@
29#include "soft-interface.h" 29#include "soft-interface.h"
30#include "bridge_loop_avoidance.h" 30#include "bridge_loop_avoidance.h"
31 31
32static void batadv_purge_orig(struct work_struct *work); 32/* hash class keys */
33static struct lock_class_key batadv_orig_hash_lock_class_key;
33 34
34static void batadv_start_purge_timer(struct batadv_priv *bat_priv) 35static void batadv_purge_orig(struct work_struct *work);
35{
36 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
37 queue_delayed_work(batadv_event_workqueue,
38 &bat_priv->orig_work, msecs_to_jiffies(1000));
39}
40 36
41/* returns 1 if they are the same originator */ 37/* returns 1 if they are the same originator */
42static int batadv_compare_orig(const struct hlist_node *node, const void *data2) 38static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
@@ -57,7 +53,14 @@ int batadv_originator_init(struct batadv_priv *bat_priv)
57 if (!bat_priv->orig_hash) 53 if (!bat_priv->orig_hash)
58 goto err; 54 goto err;
59 55
60 batadv_start_purge_timer(bat_priv); 56 batadv_hash_set_lock_class(bat_priv->orig_hash,
57 &batadv_orig_hash_lock_class_key);
58
59 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
60 queue_delayed_work(batadv_event_workqueue,
61 &bat_priv->orig_work,
62 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
63
61 return 0; 64 return 0;
62 65
63err: 66err:
@@ -178,7 +181,6 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
178 spin_lock_bh(list_lock); 181 spin_lock_bh(list_lock);
179 hlist_for_each_entry_safe(orig_node, node, node_tmp, 182 hlist_for_each_entry_safe(orig_node, node, node_tmp,
180 head, hash_entry) { 183 head, hash_entry) {
181
182 hlist_del_rcu(node); 184 hlist_del_rcu(node);
183 batadv_orig_node_free_ref(orig_node); 185 batadv_orig_node_free_ref(orig_node);
184 } 186 }
@@ -285,7 +287,6 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
285 /* for all neighbors towards this originator ... */ 287 /* for all neighbors towards this originator ... */
286 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 288 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
287 &orig_node->neigh_list, list) { 289 &orig_node->neigh_list, list) {
288
289 last_seen = neigh_node->last_seen; 290 last_seen = neigh_node->last_seen;
290 if_incoming = neigh_node->if_incoming; 291 if_incoming = neigh_node->if_incoming;
291 292
@@ -293,7 +294,6 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
293 (if_incoming->if_status == BATADV_IF_INACTIVE) || 294 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
294 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 295 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
295 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) { 296 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
296
297 if ((if_incoming->if_status == BATADV_IF_INACTIVE) || 297 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
298 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 298 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
299 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) 299 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
@@ -393,7 +393,9 @@ static void batadv_purge_orig(struct work_struct *work)
393 delayed_work = container_of(work, struct delayed_work, work); 393 delayed_work = container_of(work, struct delayed_work, work);
394 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); 394 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
395 _batadv_purge_orig(bat_priv); 395 _batadv_purge_orig(bat_priv);
396 batadv_start_purge_timer(bat_priv); 396 queue_delayed_work(batadv_event_workqueue,
397 &bat_priv->orig_work,
398 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
397} 399}
398 400
399void batadv_purge_orig_ref(struct batadv_priv *bat_priv) 401void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 9778e656dec7..286bf743e76a 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index cb6405bf755c..ed0aa89bbf8b 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index c8f61e395b74..ccab0bbdbb59 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index fda8c17df273..3f92ae248e83 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 1aa1722d0187..60ba03fc8390 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
@@ -80,7 +80,6 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
80 80
81 /* route added */ 81 /* route added */
82 } else if ((!curr_router) && (neigh_node)) { 82 } else if ((!curr_router) && (neigh_node)) {
83
84 batadv_dbg(BATADV_DBG_ROUTES, bat_priv, 83 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
85 "Adding route towards: %pM (via %pM)\n", 84 "Adding route towards: %pM (via %pM)\n",
86 orig_node->orig, neigh_node->addr); 85 orig_node->orig, neigh_node->addr);
@@ -172,7 +171,6 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
172 */ 171 */
173 hlist_for_each_entry_rcu(tmp_neigh_node, node, 172 hlist_for_each_entry_rcu(tmp_neigh_node, node,
174 &orig_node->neigh_list, list) { 173 &orig_node->neigh_list, list) {
175
176 if (tmp_neigh_node == neigh_node) 174 if (tmp_neigh_node == neigh_node)
177 continue; 175 continue;
178 176
@@ -836,7 +834,6 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
836 if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG && 834 if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
837 batadv_frag_can_reassemble(skb, 835 batadv_frag_can_reassemble(skb,
838 neigh_node->if_incoming->net_dev->mtu)) { 836 neigh_node->if_incoming->net_dev->mtu)) {
839
840 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); 837 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
841 838
842 if (ret == NET_RX_DROP) 839 if (ret == NET_RX_DROP)
@@ -1103,7 +1100,6 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
1103 1100
1104 /* packet for me */ 1101 /* packet for me */
1105 if (batadv_is_my_mac(unicast_packet->dest)) { 1102 if (batadv_is_my_mac(unicast_packet->dest)) {
1106
1107 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); 1103 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
1108 1104
1109 if (ret == NET_RX_DROP) 1105 if (ret == NET_RX_DROP)
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 9262279ea667..99eeafaba407 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 4425af9dad40..80ca65fc89a1 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
@@ -155,8 +155,6 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
155 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 155 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
156 156
157 /* start timer for this packet */ 157 /* start timer for this packet */
158 INIT_DELAYED_WORK(&forw_packet->delayed_work,
159 batadv_send_outstanding_bcast_packet);
160 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, 158 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
161 send_time); 159 send_time);
162} 160}
@@ -210,6 +208,9 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
210 /* how often did we send the bcast packet ? */ 208 /* how often did we send the bcast packet ? */
211 forw_packet->num_packets = 0; 209 forw_packet->num_packets = 0;
212 210
211 INIT_DELAYED_WORK(&forw_packet->delayed_work,
212 batadv_send_outstanding_bcast_packet);
213
213 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay); 214 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
214 return NETDEV_TX_OK; 215 return NETDEV_TX_OK;
215 216
@@ -330,7 +331,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
330 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 331 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
331 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 332 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
332 &bat_priv->forw_bcast_list, list) { 333 &bat_priv->forw_bcast_list, list) {
333
334 /* if purge_outstanding_packets() was called with an argument 334 /* if purge_outstanding_packets() was called with an argument
335 * we delete only packets belonging to the given interface 335 * we delete only packets belonging to the given interface
336 */ 336 */
@@ -357,7 +357,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
357 spin_lock_bh(&bat_priv->forw_bat_list_lock); 357 spin_lock_bh(&bat_priv->forw_bat_list_lock);
358 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 358 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
359 &bat_priv->forw_bat_list, list) { 359 &bat_priv->forw_bat_list, list) {
360
361 /* if purge_outstanding_packets() was called with an argument 360 /* if purge_outstanding_packets() was called with an argument
362 * we delete only packets belonging to the given interface 361 * we delete only packets belonging to the given interface
363 */ 362 */
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 0078dece1abc..38e662f619ac 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6b548fde8e04..2711e870f557 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
@@ -124,7 +124,6 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
124 batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX); 124 batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
125 } 125 }
126 126
127 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
128 return 0; 127 return 0;
129} 128}
130 129
@@ -181,7 +180,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
181 goto dropped; 180 goto dropped;
182 181
183 /* Register the client MAC in the transtable */ 182 /* Register the client MAC in the transtable */
184 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 183 if (!is_multicast_ether_addr(ethhdr->h_source))
184 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
185 185
186 /* don't accept stp packets. STP does not help in meshes. 186 /* don't accept stp packets. STP does not help in meshes.
187 * better use the bridge loop avoidance ... 187 * better use the bridge loop avoidance ...
@@ -449,6 +449,30 @@ static void batadv_interface_setup(struct net_device *dev)
449 memset(priv, 0, sizeof(*priv)); 449 memset(priv, 0, sizeof(*priv));
450} 450}
451 451
452/**
453 * batadv_softif_destroy_finish - cleans up the remains of a softif
454 * @work: work queue item
455 *
456 * Free the parts of the soft interface which can not be removed under
457 * rtnl lock (to prevent deadlock situations).
458 */
459static void batadv_softif_destroy_finish(struct work_struct *work)
460{
461 struct batadv_priv *bat_priv;
462 struct net_device *soft_iface;
463
464 bat_priv = container_of(work, struct batadv_priv,
465 cleanup_work);
466 soft_iface = bat_priv->soft_iface;
467
468 batadv_debugfs_del_meshif(soft_iface);
469 batadv_sysfs_del_meshif(soft_iface);
470
471 rtnl_lock();
472 unregister_netdevice(soft_iface);
473 rtnl_unlock();
474}
475
452struct net_device *batadv_softif_create(const char *name) 476struct net_device *batadv_softif_create(const char *name)
453{ 477{
454 struct net_device *soft_iface; 478 struct net_device *soft_iface;
@@ -463,6 +487,8 @@ struct net_device *batadv_softif_create(const char *name)
463 goto out; 487 goto out;
464 488
465 bat_priv = netdev_priv(soft_iface); 489 bat_priv = netdev_priv(soft_iface);
490 bat_priv->soft_iface = soft_iface;
491 INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish);
466 492
467 /* batadv_interface_stats() needs to be available as soon as 493 /* batadv_interface_stats() needs to be available as soon as
468 * register_netdevice() has been called 494 * register_netdevice() has been called
@@ -480,7 +506,9 @@ struct net_device *batadv_softif_create(const char *name)
480 506
481 atomic_set(&bat_priv->aggregated_ogms, 1); 507 atomic_set(&bat_priv->aggregated_ogms, 1);
482 atomic_set(&bat_priv->bonding, 0); 508 atomic_set(&bat_priv->bonding, 0);
509#ifdef CONFIG_BATMAN_ADV_BLA
483 atomic_set(&bat_priv->bridge_loop_avoidance, 0); 510 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
511#endif
484#ifdef CONFIG_BATMAN_ADV_DAT 512#ifdef CONFIG_BATMAN_ADV_DAT
485 atomic_set(&bat_priv->distributed_arp_table, 1); 513 atomic_set(&bat_priv->distributed_arp_table, 1);
486#endif 514#endif
@@ -491,7 +519,9 @@ struct net_device *batadv_softif_create(const char *name)
491 atomic_set(&bat_priv->gw_bandwidth, 41); 519 atomic_set(&bat_priv->gw_bandwidth, 41);
492 atomic_set(&bat_priv->orig_interval, 1000); 520 atomic_set(&bat_priv->orig_interval, 1000);
493 atomic_set(&bat_priv->hop_penalty, 30); 521 atomic_set(&bat_priv->hop_penalty, 30);
522#ifdef CONFIG_BATMAN_ADV_DEBUG
494 atomic_set(&bat_priv->log_level, 0); 523 atomic_set(&bat_priv->log_level, 0);
524#endif
495 atomic_set(&bat_priv->fragmentation, 1); 525 atomic_set(&bat_priv->fragmentation, 1);
496 atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN); 526 atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
497 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN); 527 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
@@ -547,10 +577,10 @@ out:
547 577
548void batadv_softif_destroy(struct net_device *soft_iface) 578void batadv_softif_destroy(struct net_device *soft_iface)
549{ 579{
550 batadv_debugfs_del_meshif(soft_iface); 580 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
551 batadv_sysfs_del_meshif(soft_iface); 581
552 batadv_mesh_free(soft_iface); 582 batadv_mesh_free(soft_iface);
553 unregister_netdevice(soft_iface); 583 queue_work(batadv_event_workqueue, &bat_priv->cleanup_work);
554} 584}
555 585
556int batadv_softif_is_valid(const struct net_device *net_dev) 586int batadv_softif_is_valid(const struct net_device *net_dev)
@@ -581,10 +611,10 @@ static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
581static void batadv_get_drvinfo(struct net_device *dev, 611static void batadv_get_drvinfo(struct net_device *dev,
582 struct ethtool_drvinfo *info) 612 struct ethtool_drvinfo *info)
583{ 613{
584 strcpy(info->driver, "B.A.T.M.A.N. advanced"); 614 strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
585 strcpy(info->version, BATADV_SOURCE_VERSION); 615 strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
586 strcpy(info->fw_version, "N/A"); 616 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
587 strcpy(info->bus_info, "batman"); 617 strlcpy(info->bus_info, "batman", sizeof(info->bus_info));
588} 618}
589 619
590static u32 batadv_get_msglevel(struct net_device *dev) 620static u32 batadv_get_msglevel(struct net_device *dev)
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 07a08fed28b9..43182e5e603a 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 84a55cb19b0b..afbba319d73a 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index 3fd1412b0620..479acf4c16f4 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 22457a7952ba..d44672f4a349 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli 3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4 * 4 *
@@ -29,6 +29,10 @@
29 29
30#include <linux/crc16.h> 30#include <linux/crc16.h>
31 31
32/* hash class keys */
33static struct lock_class_key batadv_tt_local_hash_lock_class_key;
34static struct lock_class_key batadv_tt_global_hash_lock_class_key;
35
32static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, 36static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
33 struct batadv_orig_node *orig_node); 37 struct batadv_orig_node *orig_node);
34static void batadv_tt_purge(struct work_struct *work); 38static void batadv_tt_purge(struct work_struct *work);
@@ -48,13 +52,6 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
48 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 52 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
49} 53}
50 54
51static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
52{
53 INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
54 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
55 msecs_to_jiffies(5000));
56}
57
58static struct batadv_tt_common_entry * 55static struct batadv_tt_common_entry *
59batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) 56batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
60{ 57{
@@ -112,7 +109,6 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
112 struct batadv_tt_global_entry, 109 struct batadv_tt_global_entry,
113 common); 110 common);
114 return tt_global_entry; 111 return tt_global_entry;
115
116} 112}
117 113
118static void 114static void
@@ -235,6 +231,9 @@ static int batadv_tt_local_init(struct batadv_priv *bat_priv)
235 if (!bat_priv->tt.local_hash) 231 if (!bat_priv->tt.local_hash)
236 return -ENOMEM; 232 return -ENOMEM;
237 233
234 batadv_hash_set_lock_class(bat_priv->tt.local_hash,
235 &batadv_tt_local_hash_lock_class_key);
236
238 return 0; 237 return 0;
239} 238}
240 239
@@ -249,7 +248,6 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
249 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, 248 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
250 batadv_choose_orig, tt_global->common.addr); 249 batadv_choose_orig, tt_global->common.addr);
251 batadv_tt_global_entry_free_ref(tt_global); 250 batadv_tt_global_entry_free_ref(tt_global);
252
253} 251}
254 252
255void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 253void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
@@ -305,7 +303,11 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
305 (uint8_t)atomic_read(&bat_priv->tt.vn)); 303 (uint8_t)atomic_read(&bat_priv->tt.vn));
306 304
307 memcpy(tt_local->common.addr, addr, ETH_ALEN); 305 memcpy(tt_local->common.addr, addr, ETH_ALEN);
308 tt_local->common.flags = BATADV_NO_FLAGS; 306 /* The local entry has to be marked as NEW to avoid to send it in
307 * a full table response going out before the next ttvn increment
308 * (consistency check)
309 */
310 tt_local->common.flags = BATADV_TT_CLIENT_NEW;
309 if (batadv_is_wifi_iface(ifindex)) 311 if (batadv_is_wifi_iface(ifindex))
310 tt_local->common.flags |= BATADV_TT_CLIENT_WIFI; 312 tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
311 atomic_set(&tt_local->common.refcount, 2); 313 atomic_set(&tt_local->common.refcount, 2);
@@ -316,12 +318,6 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
316 if (batadv_compare_eth(addr, soft_iface->dev_addr)) 318 if (batadv_compare_eth(addr, soft_iface->dev_addr))
317 tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE; 319 tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
318 320
319 /* The local entry has to be marked as NEW to avoid to send it in
320 * a full table response going out before the next ttvn increment
321 * (consistency check)
322 */
323 tt_local->common.flags |= BATADV_TT_CLIENT_NEW;
324
325 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, 321 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
326 batadv_choose_orig, &tt_local->common, 322 batadv_choose_orig, &tt_local->common,
327 &tt_local->common.hash_entry); 323 &tt_local->common.hash_entry);
@@ -472,18 +468,27 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
472 struct batadv_priv *bat_priv = netdev_priv(net_dev); 468 struct batadv_priv *bat_priv = netdev_priv(net_dev);
473 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 469 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
474 struct batadv_tt_common_entry *tt_common_entry; 470 struct batadv_tt_common_entry *tt_common_entry;
471 struct batadv_tt_local_entry *tt_local;
475 struct batadv_hard_iface *primary_if; 472 struct batadv_hard_iface *primary_if;
476 struct hlist_node *node; 473 struct hlist_node *node;
477 struct hlist_head *head; 474 struct hlist_head *head;
478 uint32_t i; 475 uint32_t i;
476 int last_seen_secs;
477 int last_seen_msecs;
478 unsigned long last_seen_jiffies;
479 bool no_purge;
480 uint16_t np_flag = BATADV_TT_CLIENT_NOPURGE;
479 481
480 primary_if = batadv_seq_print_text_primary_if_get(seq); 482 primary_if = batadv_seq_print_text_primary_if_get(seq);
481 if (!primary_if) 483 if (!primary_if)
482 goto out; 484 goto out;
483 485
484 seq_printf(seq, 486 seq_printf(seq,
485 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", 487 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u CRC: %#.4x):\n",
486 net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn)); 488 net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn),
489 bat_priv->tt.local_crc);
490 seq_printf(seq, " %-13s %-7s %-10s\n", "Client", "Flags",
491 "Last seen");
487 492
488 for (i = 0; i < hash->size; i++) { 493 for (i = 0; i < hash->size; i++) {
489 head = &hash->table[i]; 494 head = &hash->table[i];
@@ -491,18 +496,29 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
491 rcu_read_lock(); 496 rcu_read_lock();
492 hlist_for_each_entry_rcu(tt_common_entry, node, 497 hlist_for_each_entry_rcu(tt_common_entry, node,
493 head, hash_entry) { 498 head, hash_entry) {
494 seq_printf(seq, " * %pM [%c%c%c%c%c]\n", 499 tt_local = container_of(tt_common_entry,
500 struct batadv_tt_local_entry,
501 common);
502 last_seen_jiffies = jiffies - tt_local->last_seen;
503 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
504 last_seen_secs = last_seen_msecs / 1000;
505 last_seen_msecs = last_seen_msecs % 1000;
506
507 no_purge = tt_common_entry->flags & np_flag;
508
509 seq_printf(seq, " * %pM [%c%c%c%c%c] %3u.%03u\n",
495 tt_common_entry->addr, 510 tt_common_entry->addr,
496 (tt_common_entry->flags & 511 (tt_common_entry->flags &
497 BATADV_TT_CLIENT_ROAM ? 'R' : '.'), 512 BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
498 (tt_common_entry->flags & 513 no_purge ? 'P' : '.',
499 BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
500 (tt_common_entry->flags & 514 (tt_common_entry->flags &
501 BATADV_TT_CLIENT_NEW ? 'N' : '.'), 515 BATADV_TT_CLIENT_NEW ? 'N' : '.'),
502 (tt_common_entry->flags & 516 (tt_common_entry->flags &
503 BATADV_TT_CLIENT_PENDING ? 'X' : '.'), 517 BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
504 (tt_common_entry->flags & 518 (tt_common_entry->flags &
505 BATADV_TT_CLIENT_WIFI ? 'W' : '.')); 519 BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
520 no_purge ? 0 : last_seen_secs,
521 no_purge ? 0 : last_seen_msecs);
506 } 522 }
507 rcu_read_unlock(); 523 rcu_read_unlock();
508 } 524 }
@@ -627,7 +643,6 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
627 batadv_tt_local_purge_list(bat_priv, head); 643 batadv_tt_local_purge_list(bat_priv, head);
628 spin_unlock_bh(list_lock); 644 spin_unlock_bh(list_lock);
629 } 645 }
630
631} 646}
632 647
633static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) 648static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
@@ -676,6 +691,9 @@ static int batadv_tt_global_init(struct batadv_priv *bat_priv)
676 if (!bat_priv->tt.global_hash) 691 if (!bat_priv->tt.global_hash)
677 return -ENOMEM; 692 return -ENOMEM;
678 693
694 batadv_hash_set_lock_class(bat_priv->tt.global_hash,
695 &batadv_tt_global_hash_lock_class_key);
696
679 return 0; 697 return 0;
680} 698}
681 699
@@ -967,10 +985,11 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
967 best_entry = batadv_transtable_best_orig(tt_global_entry); 985 best_entry = batadv_transtable_best_orig(tt_global_entry);
968 if (best_entry) { 986 if (best_entry) {
969 last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn); 987 last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
970 seq_printf(seq, " %c %pM (%3u) via %pM (%3u) [%c%c%c]\n", 988 seq_printf(seq,
989 " %c %pM (%3u) via %pM (%3u) (%#.4x) [%c%c%c]\n",
971 '*', tt_global_entry->common.addr, 990 '*', tt_global_entry->common.addr,
972 best_entry->ttvn, best_entry->orig_node->orig, 991 best_entry->ttvn, best_entry->orig_node->orig,
973 last_ttvn, 992 last_ttvn, best_entry->orig_node->tt_crc,
974 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'), 993 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
975 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'), 994 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
976 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.')); 995 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
@@ -1012,8 +1031,9 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
1012 seq_printf(seq, 1031 seq_printf(seq,
1013 "Globally announced TT entries received via the mesh %s\n", 1032 "Globally announced TT entries received via the mesh %s\n",
1014 net_dev->name); 1033 net_dev->name);
1015 seq_printf(seq, " %-13s %s %-15s %s %s\n", 1034 seq_printf(seq, " %-13s %s %-15s %s (%-6s) %s\n",
1016 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags"); 1035 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "CRC",
1036 "Flags");
1017 1037
1018 for (i = 0; i < hash->size; i++) { 1038 for (i = 0; i < hash->size; i++) {
1019 head = &hash->table[i]; 1039 head = &hash->table[i];
@@ -1049,7 +1069,6 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
1049 batadv_tt_orig_list_entry_free_ref(orig_entry); 1069 batadv_tt_orig_list_entry_free_ref(orig_entry);
1050 } 1070 }
1051 spin_unlock_bh(&tt_global_entry->list_lock); 1071 spin_unlock_bh(&tt_global_entry->list_lock);
1052
1053} 1072}
1054 1073
1055static void 1074static void
@@ -1825,7 +1844,6 @@ out:
1825 if (!ret) 1844 if (!ret)
1826 kfree_skb(skb); 1845 kfree_skb(skb);
1827 return ret; 1846 return ret;
1828
1829} 1847}
1830 1848
1831static bool 1849static bool
@@ -2111,7 +2129,9 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
2111 if (ret < 0) 2129 if (ret < 0)
2112 return ret; 2130 return ret;
2113 2131
2114 batadv_tt_start_timer(bat_priv); 2132 INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
2133 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
2134 msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
2115 2135
2116 return 1; 2136 return 1;
2117} 2137}
@@ -2261,7 +2281,8 @@ static void batadv_tt_purge(struct work_struct *work)
2261 batadv_tt_req_purge(bat_priv); 2281 batadv_tt_req_purge(bat_priv);
2262 batadv_tt_roam_purge(bat_priv); 2282 batadv_tt_roam_purge(bat_priv);
2263 2283
2264 batadv_tt_start_timer(bat_priv); 2284 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
2285 msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
2265} 2286}
2266 2287
2267void batadv_tt_free(struct batadv_priv *bat_priv) 2288void batadv_tt_free(struct batadv_priv *bat_priv)
@@ -2352,7 +2373,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2352 } 2373 }
2353 spin_unlock_bh(list_lock); 2374 spin_unlock_bh(list_lock);
2354 } 2375 }
2355
2356} 2376}
2357 2377
2358static int batadv_tt_commit_changes(struct batadv_priv *bat_priv, 2378static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
@@ -2496,7 +2516,7 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
2496 orig_node->tt_crc != tt_crc) { 2516 orig_node->tt_crc != tt_crc) {
2497request_table: 2517request_table:
2498 batadv_dbg(BATADV_DBG_TT, bat_priv, 2518 batadv_dbg(BATADV_DBG_TT, bat_priv,
2499 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n", 2519 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %#.4x last_crc: %#.4x num_changes: %u)\n",
2500 orig_node->orig, ttvn, orig_ttvn, tt_crc, 2520 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2501 orig_node->tt_crc, tt_num_changes); 2521 orig_node->tt_crc, tt_num_changes);
2502 batadv_send_tt_request(bat_priv, orig_node, ttvn, 2522 batadv_send_tt_request(bat_priv, orig_node, ttvn,
@@ -2549,7 +2569,6 @@ bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
2549 batadv_tt_local_entry_free_ref(tt_local_entry); 2569 batadv_tt_local_entry_free_ref(tt_local_entry);
2550out: 2570out:
2551 return ret; 2571 return ret;
2552
2553} 2572}
2554 2573
2555bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, 2574bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 46d4451a59ee..ab8e683b402f 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli 3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4 * 4 *
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ae9ac9aca8c5..4cd87a0b5b80 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
@@ -24,6 +24,9 @@
24#include "bitarray.h" 24#include "bitarray.h"
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26 26
27/**
28 * Maximum overhead for the encapsulation for a payload packet
29 */
27#define BATADV_HEADER_LEN \ 30#define BATADV_HEADER_LEN \
28 (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \ 31 (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
29 sizeof(struct batadv_bcast_packet))) 32 sizeof(struct batadv_bcast_packet)))
@@ -51,6 +54,22 @@ struct batadv_hard_iface_bat_iv {
51 atomic_t ogm_seqno; 54 atomic_t ogm_seqno;
52}; 55};
53 56
57/**
58 * struct batadv_hard_iface - network device known to batman-adv
59 * @list: list node for batadv_hardif_list
60 * @if_num: identificator of the interface
61 * @if_status: status of the interface for batman-adv
62 * @net_dev: pointer to the net_device
63 * @frag_seqno: last fragment sequence number sent by this interface
64 * @hardif_obj: kobject of the per interface sysfs "mesh" directory
65 * @refcount: number of contexts the object is used
66 * @batman_adv_ptype: packet type describing packets that should be processed by
67 * batman-adv for this interface
68 * @soft_iface: the batman-adv interface which uses this network interface
69 * @rcu: struct used for freeing in an RCU-safe manner
70 * @bat_iv: BATMAN IV specific per hard interface data
71 * @cleanup_work: work queue callback item for hard interface deinit
72 */
54struct batadv_hard_iface { 73struct batadv_hard_iface {
55 struct list_head list; 74 struct list_head list;
56 int16_t if_num; 75 int16_t if_num;
@@ -63,22 +82,52 @@ struct batadv_hard_iface {
63 struct net_device *soft_iface; 82 struct net_device *soft_iface;
64 struct rcu_head rcu; 83 struct rcu_head rcu;
65 struct batadv_hard_iface_bat_iv bat_iv; 84 struct batadv_hard_iface_bat_iv bat_iv;
85 struct work_struct cleanup_work;
66}; 86};
67 87
68/** 88/**
69 * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh 89 * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
70 * @primary_addr: hosts primary interface address 90 * @orig: originator ethernet address
71 * @last_seen: when last packet from this node was received 91 * @primary_addr: hosts primary interface address
72 * @bcast_seqno_reset: time when the broadcast seqno window was reset 92 * @router: router that should be used to reach this originator
73 * @batman_seqno_reset: time when the batman seqno window was reset 93 * @batadv_dat_addr_t: address of the orig node in the distributed hash
74 * @gw_flags: flags related to gateway class 94 * @bcast_own: bitfield containing the number of our OGMs this orig_node
75 * @flags: for now only VIS_SERVER flag 95 * rebroadcasted "back" to us (relative to last_real_seqno)
76 * @last_real_seqno: last and best known sequence number 96 * @bcast_own_sum: counted result of bcast_own
77 * @last_ttl: ttl of last received packet 97 * @last_seen: time when last packet from this node was received
78 * @last_bcast_seqno: last broadcast sequence number received by this host 98 * @bcast_seqno_reset: time when the broadcast seqno window was reset
79 * 99 * @batman_seqno_reset: time when the batman seqno window was reset
80 * @candidates: how many candidates are available 100 * @gw_flags: flags related to gateway class
81 * @selected: next bonding candidate 101 * @flags: for now only VIS_SERVER flag
102 * @last_ttvn: last seen translation table version number
103 * @tt_crc: CRC of the translation table
104 * @tt_buff: last tt changeset this node received from the orig node
105 * @tt_buff_len: length of the last tt changeset this node received from the
106 * orig node
107 * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
108 * @tt_size: number of global TT entries announced by the orig node
109 * @tt_initialised: bool keeping track of whether or not this node have received
110 * any translation table information from the orig node yet
111 * @last_real_seqno: last and best known sequence number
112 * @last_ttl: ttl of last received packet
113 * @bcast_bits: bitfield containing the info which payload broadcast originated
114 * from this orig node this host already has seen (relative to
115 * last_bcast_seqno)
116 * @last_bcast_seqno: last broadcast sequence number received by this host
117 * @neigh_list: list of potential next hop neighbor towards this orig node
118 * @frag_list: fragmentation buffer list for fragment re-assembly
119 * @last_frag_packet: time when last fragmented packet from this node was
120 * received
121 * @neigh_list_lock: lock protecting neigh_list, router and bonding_list
122 * @hash_entry: hlist node for batadv_priv::orig_hash
123 * @bat_priv: pointer to soft_iface this orig node belongs to
124 * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
125 * neigh_node->real_bits & neigh_node->real_packet_count
126 * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
127 * @bond_candidates: how many candidates are available
128 * @bond_list: list of bonding candidates
129 * @refcount: number of contexts the object is used
130 * @rcu: struct used for freeing in an RCU-safe manner
82 */ 131 */
83struct batadv_orig_node { 132struct batadv_orig_node {
84 uint8_t orig[ETH_ALEN]; 133 uint8_t orig[ETH_ALEN];
@@ -94,11 +143,11 @@ struct batadv_orig_node {
94 unsigned long batman_seqno_reset; 143 unsigned long batman_seqno_reset;
95 uint8_t gw_flags; 144 uint8_t gw_flags;
96 uint8_t flags; 145 uint8_t flags;
97 atomic_t last_ttvn; /* last seen translation table version number */ 146 atomic_t last_ttvn;
98 uint16_t tt_crc; 147 uint16_t tt_crc;
99 unsigned char *tt_buff; 148 unsigned char *tt_buff;
100 int16_t tt_buff_len; 149 int16_t tt_buff_len;
101 spinlock_t tt_buff_lock; /* protects tt_buff */ 150 spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
102 atomic_t tt_size; 151 atomic_t tt_size;
103 bool tt_initialised; 152 bool tt_initialised;
104 uint32_t last_real_seqno; 153 uint32_t last_real_seqno;
@@ -107,23 +156,31 @@ struct batadv_orig_node {
107 uint32_t last_bcast_seqno; 156 uint32_t last_bcast_seqno;
108 struct hlist_head neigh_list; 157 struct hlist_head neigh_list;
109 struct list_head frag_list; 158 struct list_head frag_list;
110 spinlock_t neigh_list_lock; /* protects neigh_list and router */ 159 unsigned long last_frag_packet;
111 atomic_t refcount; 160 /* neigh_list_lock protects: neigh_list, router & bonding_list */
112 struct rcu_head rcu; 161 spinlock_t neigh_list_lock;
113 struct hlist_node hash_entry; 162 struct hlist_node hash_entry;
114 struct batadv_priv *bat_priv; 163 struct batadv_priv *bat_priv;
115 unsigned long last_frag_packet;
116 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum, 164 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
117 * neigh_node->real_bits, neigh_node->real_packet_count 165 * neigh_node->real_bits & neigh_node->real_packet_count
118 */ 166 */
119 spinlock_t ogm_cnt_lock; 167 spinlock_t ogm_cnt_lock;
120 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ 168 /* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
121 spinlock_t bcast_seqno_lock; 169 spinlock_t bcast_seqno_lock;
122 spinlock_t tt_list_lock; /* protects tt_list */
123 atomic_t bond_candidates; 170 atomic_t bond_candidates;
124 struct list_head bond_list; 171 struct list_head bond_list;
172 atomic_t refcount;
173 struct rcu_head rcu;
125}; 174};
126 175
176/**
177 * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
178 * @list: list node for batadv_priv_gw::list
179 * @orig_node: pointer to corresponding orig node
180 * @deleted: this struct is scheduled for deletion
181 * @refcount: number of contexts the object is used
182 * @rcu: struct used for freeing in an RCU-safe manner
183 */
127struct batadv_gw_node { 184struct batadv_gw_node {
128 struct hlist_node list; 185 struct hlist_node list;
129 struct batadv_orig_node *orig_node; 186 struct batadv_orig_node *orig_node;
@@ -132,13 +189,28 @@ struct batadv_gw_node {
132 struct rcu_head rcu; 189 struct rcu_head rcu;
133}; 190};
134 191
135/* batadv_neigh_node 192/**
136 * @last_seen: when last packet via this neighbor was received 193 * struct batadv_neigh_node - structure for single hop neighbors
194 * @list: list node for batadv_orig_node::neigh_list
195 * @addr: mac address of neigh node
196 * @tq_recv: ring buffer of received TQ values from this neigh node
197 * @tq_index: ring buffer index
198 * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
199 * @last_ttl: last received ttl from this neigh node
200 * @bonding_list: list node for batadv_orig_node::bond_list
201 * @last_seen: when last packet via this neighbor was received
202 * @real_bits: bitfield containing the number of OGMs received from this neigh
203 * node (relative to orig_node->last_real_seqno)
204 * @real_packet_count: counted result of real_bits
205 * @orig_node: pointer to corresponding orig_node
206 * @if_incoming: pointer to incoming hard interface
207 * @lq_update_lock: lock protecting tq_recv & tq_index
208 * @refcount: number of contexts the object is used
209 * @rcu: struct used for freeing in an RCU-safe manner
137 */ 210 */
138struct batadv_neigh_node { 211struct batadv_neigh_node {
139 struct hlist_node list; 212 struct hlist_node list;
140 uint8_t addr[ETH_ALEN]; 213 uint8_t addr[ETH_ALEN];
141 uint8_t real_packet_count;
142 uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE]; 214 uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
143 uint8_t tq_index; 215 uint8_t tq_index;
144 uint8_t tq_avg; 216 uint8_t tq_avg;
@@ -146,13 +218,20 @@ struct batadv_neigh_node {
146 struct list_head bonding_list; 218 struct list_head bonding_list;
147 unsigned long last_seen; 219 unsigned long last_seen;
148 DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); 220 DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
149 atomic_t refcount; 221 uint8_t real_packet_count;
150 struct rcu_head rcu;
151 struct batadv_orig_node *orig_node; 222 struct batadv_orig_node *orig_node;
152 struct batadv_hard_iface *if_incoming; 223 struct batadv_hard_iface *if_incoming;
153 spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */ 224 spinlock_t lq_update_lock; /* protects tq_recv & tq_index */
225 atomic_t refcount;
226 struct rcu_head rcu;
154}; 227};
155 228
229/**
230 * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
231 * @orig[ETH_ALEN]: mac address of orig node orginating the broadcast
232 * @crc: crc32 checksum of broadcast payload
233 * @entrytime: time when the broadcast packet was received
234 */
156#ifdef CONFIG_BATMAN_ADV_BLA 235#ifdef CONFIG_BATMAN_ADV_BLA
157struct batadv_bcast_duplist_entry { 236struct batadv_bcast_duplist_entry {
158 uint8_t orig[ETH_ALEN]; 237 uint8_t orig[ETH_ALEN];
@@ -161,6 +240,33 @@ struct batadv_bcast_duplist_entry {
161}; 240};
162#endif 241#endif
163 242
243/**
244 * enum batadv_counters - indices for traffic counters
245 * @BATADV_CNT_TX: transmitted payload traffic packet counter
246 * @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter
247 * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet counter
248 * @BATADV_CNT_RX: received payload traffic packet counter
249 * @BATADV_CNT_RX_BYTES: received payload traffic bytes counter
250 * @BATADV_CNT_FORWARD: forwarded payload traffic packet counter
251 * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
252 * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet counter
253 * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
254 * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
255 * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
256 * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
257 * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
258 * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
259 * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
260 * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet counter
261 * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
262 * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
263 * @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter
264 * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
265 * @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter
266 * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic packet
267 * counter
268 * @BATADV_CNT_NUM: number of traffic counters
269 */
164enum batadv_counters { 270enum batadv_counters {
165 BATADV_CNT_TX, 271 BATADV_CNT_TX,
166 BATADV_CNT_TX_BYTES, 272 BATADV_CNT_TX_BYTES,
@@ -192,14 +298,23 @@ enum batadv_counters {
192/** 298/**
193 * struct batadv_priv_tt - per mesh interface translation table data 299 * struct batadv_priv_tt - per mesh interface translation table data
194 * @vn: translation table version number 300 * @vn: translation table version number
301 * @ogm_append_cnt: counter of number of OGMs containing the local tt diff
195 * @local_changes: changes registered in an originator interval 302 * @local_changes: changes registered in an originator interval
196 * @poss_change: Detect an ongoing roaming phase. If true, then this node
197 * received a roaming_adv and has to inspect every packet directed to it to
198 * check whether it still is the true destination or not. This flag will be
199 * reset to false as soon as the this node's ttvn is increased
200 * @changes_list: tracks tt local changes within an originator interval 303 * @changes_list: tracks tt local changes within an originator interval
201 * @req_list: list of pending tt_requests 304 * @local_hash: local translation table hash table
305 * @global_hash: global translation table hash table
306 * @req_list: list of pending & unanswered tt_requests
307 * @roam_list: list of the last roaming events of each client limiting the
308 * number of roaming events to avoid route flapping
309 * @changes_list_lock: lock protecting changes_list
310 * @req_list_lock: lock protecting req_list
311 * @roam_list_lock: lock protecting roam_list
312 * @local_entry_num: number of entries in the local hash table
202 * @local_crc: Checksum of the local table, recomputed before sending a new OGM 313 * @local_crc: Checksum of the local table, recomputed before sending a new OGM
314 * @last_changeset: last tt changeset this host has generated
315 * @last_changeset_len: length of last tt changeset this host has generated
316 * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
317 * @work: work queue callback item for translation table purging
203 */ 318 */
204struct batadv_priv_tt { 319struct batadv_priv_tt {
205 atomic_t vn; 320 atomic_t vn;
@@ -217,36 +332,83 @@ struct batadv_priv_tt {
217 uint16_t local_crc; 332 uint16_t local_crc;
218 unsigned char *last_changeset; 333 unsigned char *last_changeset;
219 int16_t last_changeset_len; 334 int16_t last_changeset_len;
220 spinlock_t last_changeset_lock; /* protects last_changeset */ 335 /* protects last_changeset & last_changeset_len */
336 spinlock_t last_changeset_lock;
221 struct delayed_work work; 337 struct delayed_work work;
222}; 338};
223 339
340/**
341 * struct batadv_priv_bla - per mesh interface bridge loope avoidance data
342 * @num_requests; number of bla requests in flight
343 * @claim_hash: hash table containing mesh nodes this host has claimed
344 * @backbone_hash: hash table containing all detected backbone gateways
345 * @bcast_duplist: recently received broadcast packets array (for broadcast
346 * duplicate suppression)
347 * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
348 * @bcast_duplist_lock: lock protecting bcast_duplist & bcast_duplist_curr
349 * @claim_dest: local claim data (e.g. claim group)
350 * @work: work queue callback item for cleanups & bla announcements
351 */
224#ifdef CONFIG_BATMAN_ADV_BLA 352#ifdef CONFIG_BATMAN_ADV_BLA
225struct batadv_priv_bla { 353struct batadv_priv_bla {
226 atomic_t num_requests; /* number of bla requests in flight */ 354 atomic_t num_requests;
227 struct batadv_hashtable *claim_hash; 355 struct batadv_hashtable *claim_hash;
228 struct batadv_hashtable *backbone_hash; 356 struct batadv_hashtable *backbone_hash;
229 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE]; 357 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
230 int bcast_duplist_curr; 358 int bcast_duplist_curr;
231 /* protects bcast_duplist and bcast_duplist_curr */ 359 /* protects bcast_duplist & bcast_duplist_curr */
232 spinlock_t bcast_duplist_lock; 360 spinlock_t bcast_duplist_lock;
233 struct batadv_bla_claim_dst claim_dest; 361 struct batadv_bla_claim_dst claim_dest;
234 struct delayed_work work; 362 struct delayed_work work;
235}; 363};
236#endif 364#endif
237 365
366/**
367 * struct batadv_debug_log - debug logging data
368 * @log_buff: buffer holding the logs (ring bufer)
369 * @log_start: index of next character to read
370 * @log_end: index of next character to write
371 * @lock: lock protecting log_buff, log_start & log_end
372 * @queue_wait: log reader's wait queue
373 */
374#ifdef CONFIG_BATMAN_ADV_DEBUG
375struct batadv_priv_debug_log {
376 char log_buff[BATADV_LOG_BUF_LEN];
377 unsigned long log_start;
378 unsigned long log_end;
379 spinlock_t lock; /* protects log_buff, log_start and log_end */
380 wait_queue_head_t queue_wait;
381};
382#endif
383
384/**
385 * struct batadv_priv_gw - per mesh interface gateway data
386 * @list: list of available gateway nodes
387 * @list_lock: lock protecting gw_list & curr_gw
388 * @curr_gw: pointer to currently selected gateway node
389 * @reselect: bool indicating a gateway re-selection is in progress
390 */
238struct batadv_priv_gw { 391struct batadv_priv_gw {
239 struct hlist_head list; 392 struct hlist_head list;
240 spinlock_t list_lock; /* protects gw_list and curr_gw */ 393 spinlock_t list_lock; /* protects gw_list & curr_gw */
241 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */ 394 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
242 atomic_t reselect; 395 atomic_t reselect;
243}; 396};
244 397
398/**
399 * struct batadv_priv_vis - per mesh interface vis data
400 * @send_list: list of batadv_vis_info packets to sent
401 * @hash: hash table containing vis data from other nodes in the network
402 * @hash_lock: lock protecting the hash table
403 * @list_lock: lock protecting my_info::recv_list
404 * @work: work queue callback item for vis packet sending
405 * @my_info: holds this node's vis data sent on a regular basis
406 */
245struct batadv_priv_vis { 407struct batadv_priv_vis {
246 struct list_head send_list; 408 struct list_head send_list;
247 struct batadv_hashtable *hash; 409 struct batadv_hashtable *hash;
248 spinlock_t hash_lock; /* protects hash */ 410 spinlock_t hash_lock; /* protects hash */
249 spinlock_t list_lock; /* protects info::recv_list */ 411 spinlock_t list_lock; /* protects my_info::recv_list */
250 struct delayed_work work; 412 struct delayed_work work;
251 struct batadv_vis_info *my_info; 413 struct batadv_vis_info *my_info;
252}; 414};
@@ -265,30 +427,78 @@ struct batadv_priv_dat {
265}; 427};
266#endif 428#endif
267 429
430/**
431 * struct batadv_priv - per mesh interface data
432 * @mesh_state: current status of the mesh (inactive/active/deactivating)
433 * @soft_iface: net device which holds this struct as private data
434 * @stats: structure holding the data for the ndo_get_stats() call
435 * @bat_counters: mesh internal traffic statistic counters (see batadv_counters)
436 * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
437 * @bonding: bool indicating whether traffic bonding is enabled
438 * @fragmentation: bool indicating whether traffic fragmentation is enabled
439 * @ap_isolation: bool indicating whether ap isolation is enabled
440 * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
441 * enabled
442 * @distributed_arp_table: bool indicating whether distributed ARP table is
443 * enabled
444 * @vis_mode: vis operation: client or server (see batadv_vis_packettype)
445 * @gw_mode: gateway operation: off, client or server (see batadv_gw_modes)
446 * @gw_sel_class: gateway selection class (applies if gw_mode client)
447 * @gw_bandwidth: gateway announced bandwidth (applies if gw_mode server)
448 * @orig_interval: OGM broadcast interval in milliseconds
449 * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
450 * @log_level: configured log level (see batadv_dbg_level)
451 * @bcast_seqno: last sent broadcast packet sequence number
452 * @bcast_queue_left: number of remaining buffered broadcast packet slots
453 * @batman_queue_left: number of remaining OGM packet slots
454 * @num_ifaces: number of interfaces assigned to this mesh interface
455 * @mesh_obj: kobject for sysfs mesh subdirectory
456 * @debug_dir: dentry for debugfs batman-adv subdirectory
457 * @forw_bat_list: list of aggregated OGMs that will be forwarded
458 * @forw_bcast_list: list of broadcast packets that will be rebroadcasted
459 * @orig_hash: hash table containing mesh participants (orig nodes)
460 * @forw_bat_list_lock: lock protecting forw_bat_list
461 * @forw_bcast_list_lock: lock protecting forw_bcast_list
462 * @orig_work: work queue callback item for orig node purging
463 * @cleanup_work: work queue callback item for soft interface deinit
464 * @primary_if: one of the hard interfaces assigned to this mesh interface
465 * becomes the primary interface
466 * @bat_algo_ops: routing algorithm used by this mesh interface
467 * @bla: bridge loope avoidance data
468 * @debug_log: holding debug logging relevant data
469 * @gw: gateway data
470 * @tt: translation table data
471 * @vis: vis data
472 * @dat: distributed arp table data
473 */
268struct batadv_priv { 474struct batadv_priv {
269 atomic_t mesh_state; 475 atomic_t mesh_state;
476 struct net_device *soft_iface;
270 struct net_device_stats stats; 477 struct net_device_stats stats;
271 uint64_t __percpu *bat_counters; /* Per cpu counters */ 478 uint64_t __percpu *bat_counters; /* Per cpu counters */
272 atomic_t aggregated_ogms; /* boolean */ 479 atomic_t aggregated_ogms;
273 atomic_t bonding; /* boolean */ 480 atomic_t bonding;
274 atomic_t fragmentation; /* boolean */ 481 atomic_t fragmentation;
275 atomic_t ap_isolation; /* boolean */ 482 atomic_t ap_isolation;
276 atomic_t bridge_loop_avoidance; /* boolean */ 483#ifdef CONFIG_BATMAN_ADV_BLA
484 atomic_t bridge_loop_avoidance;
485#endif
277#ifdef CONFIG_BATMAN_ADV_DAT 486#ifdef CONFIG_BATMAN_ADV_DAT
278 atomic_t distributed_arp_table; /* boolean */ 487 atomic_t distributed_arp_table;
488#endif
489 atomic_t vis_mode;
490 atomic_t gw_mode;
491 atomic_t gw_sel_class;
492 atomic_t gw_bandwidth;
493 atomic_t orig_interval;
494 atomic_t hop_penalty;
495#ifdef CONFIG_BATMAN_ADV_DEBUG
496 atomic_t log_level;
279#endif 497#endif
280 atomic_t vis_mode; /* VIS_TYPE_* */
281 atomic_t gw_mode; /* GW_MODE_* */
282 atomic_t gw_sel_class; /* uint */
283 atomic_t gw_bandwidth; /* gw bandwidth */
284 atomic_t orig_interval; /* uint */
285 atomic_t hop_penalty; /* uint */
286 atomic_t log_level; /* uint */
287 atomic_t bcast_seqno; 498 atomic_t bcast_seqno;
288 atomic_t bcast_queue_left; 499 atomic_t bcast_queue_left;
289 atomic_t batman_queue_left; 500 atomic_t batman_queue_left;
290 char num_ifaces; 501 char num_ifaces;
291 struct batadv_debug_log *debug_log;
292 struct kobject *mesh_obj; 502 struct kobject *mesh_obj;
293 struct dentry *debug_dir; 503 struct dentry *debug_dir;
294 struct hlist_head forw_bat_list; 504 struct hlist_head forw_bat_list;
@@ -297,11 +507,15 @@ struct batadv_priv {
297 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 507 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
298 spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */ 508 spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
299 struct delayed_work orig_work; 509 struct delayed_work orig_work;
510 struct work_struct cleanup_work;
300 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ 511 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
301 struct batadv_algo_ops *bat_algo_ops; 512 struct batadv_algo_ops *bat_algo_ops;
302#ifdef CONFIG_BATMAN_ADV_BLA 513#ifdef CONFIG_BATMAN_ADV_BLA
303 struct batadv_priv_bla bla; 514 struct batadv_priv_bla bla;
304#endif 515#endif
516#ifdef CONFIG_BATMAN_ADV_DEBUG
517 struct batadv_priv_debug_log *debug_log;
518#endif
305 struct batadv_priv_gw gw; 519 struct batadv_priv_gw gw;
306 struct batadv_priv_tt tt; 520 struct batadv_priv_tt tt;
307 struct batadv_priv_vis vis; 521 struct batadv_priv_vis vis;
@@ -310,21 +524,97 @@ struct batadv_priv {
310#endif 524#endif
311}; 525};
312 526
527/**
528 * struct batadv_socket_client - layer2 icmp socket client data
529 * @queue_list: packet queue for packets destined for this socket client
530 * @queue_len: number of packets in the packet queue (queue_list)
531 * @index: socket client's index in the batadv_socket_client_hash
532 * @lock: lock protecting queue_list, queue_len & index
533 * @queue_wait: socket client's wait queue
534 * @bat_priv: pointer to soft_iface this client belongs to
535 */
313struct batadv_socket_client { 536struct batadv_socket_client {
314 struct list_head queue_list; 537 struct list_head queue_list;
315 unsigned int queue_len; 538 unsigned int queue_len;
316 unsigned char index; 539 unsigned char index;
317 spinlock_t lock; /* protects queue_list, queue_len, index */ 540 spinlock_t lock; /* protects queue_list, queue_len & index */
318 wait_queue_head_t queue_wait; 541 wait_queue_head_t queue_wait;
319 struct batadv_priv *bat_priv; 542 struct batadv_priv *bat_priv;
320}; 543};
321 544
545/**
546 * struct batadv_socket_packet - layer2 icmp packet for socket client
547 * @list: list node for batadv_socket_client::queue_list
548 * @icmp_len: size of the layer2 icmp packet
549 * @icmp_packet: layer2 icmp packet
550 */
322struct batadv_socket_packet { 551struct batadv_socket_packet {
323 struct list_head list; 552 struct list_head list;
324 size_t icmp_len; 553 size_t icmp_len;
325 struct batadv_icmp_packet_rr icmp_packet; 554 struct batadv_icmp_packet_rr icmp_packet;
326}; 555};
327 556
557/**
558 * struct batadv_bla_backbone_gw - batman-adv gateway bridged into the LAN
559 * @orig: originator address of backbone node (mac address of primary iface)
560 * @vid: vlan id this gateway was detected on
561 * @hash_entry: hlist node for batadv_priv_bla::backbone_hash
562 * @bat_priv: pointer to soft_iface this backbone gateway belongs to
563 * @lasttime: last time we heard of this backbone gw
564 * @wait_periods: grace time for bridge forward delays and bla group forming at
565 * bootup phase - no bcast traffic is formwared until it has elapsed
566 * @request_sent: if this bool is set to true we are out of sync with this
567 * backbone gateway - no bcast traffic is formwared until the situation was
568 * resolved
569 * @crc: crc16 checksum over all claims
570 * @refcount: number of contexts the object is used
571 * @rcu: struct used for freeing in an RCU-safe manner
572 */
573#ifdef CONFIG_BATMAN_ADV_BLA
574struct batadv_bla_backbone_gw {
575 uint8_t orig[ETH_ALEN];
576 short vid;
577 struct hlist_node hash_entry;
578 struct batadv_priv *bat_priv;
579 unsigned long lasttime;
580 atomic_t wait_periods;
581 atomic_t request_sent;
582 uint16_t crc;
583 atomic_t refcount;
584 struct rcu_head rcu;
585};
586
587/**
588 * struct batadv_bla_claim - claimed non-mesh client structure
589 * @addr: mac address of claimed non-mesh client
590 * @vid: vlan id this client was detected on
591 * @batadv_bla_backbone_gw: pointer to backbone gw claiming this client
592 * @lasttime: last time we heard of claim (locals only)
593 * @hash_entry: hlist node for batadv_priv_bla::claim_hash
594 * @refcount: number of contexts the object is used
595 * @rcu: struct used for freeing in an RCU-safe manner
596 */
597struct batadv_bla_claim {
598 uint8_t addr[ETH_ALEN];
599 short vid;
600 struct batadv_bla_backbone_gw *backbone_gw;
601 unsigned long lasttime;
602 struct hlist_node hash_entry;
603 struct rcu_head rcu;
604 atomic_t refcount;
605};
606#endif
607
608/**
609 * struct batadv_tt_common_entry - tt local & tt global common data
610 * @addr: mac address of non-mesh client
611 * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
612 * batadv_priv_tt::global_hash
613 * @flags: various state handling flags (see batadv_tt_client_flags)
614 * @added_at: timestamp used for purging stale tt common entries
615 * @refcount: number of contexts the object is used
616 * @rcu: struct used for freeing in an RCU-safe manner
617 */
328struct batadv_tt_common_entry { 618struct batadv_tt_common_entry {
329 uint8_t addr[ETH_ALEN]; 619 uint8_t addr[ETH_ALEN];
330 struct hlist_node hash_entry; 620 struct hlist_node hash_entry;
@@ -334,62 +624,76 @@ struct batadv_tt_common_entry {
334 struct rcu_head rcu; 624 struct rcu_head rcu;
335}; 625};
336 626
627/**
628 * struct batadv_tt_local_entry - translation table local entry data
629 * @common: general translation table data
630 * @last_seen: timestamp used for purging stale tt local entries
631 */
337struct batadv_tt_local_entry { 632struct batadv_tt_local_entry {
338 struct batadv_tt_common_entry common; 633 struct batadv_tt_common_entry common;
339 unsigned long last_seen; 634 unsigned long last_seen;
340}; 635};
341 636
637/**
638 * struct batadv_tt_global_entry - translation table global entry data
639 * @common: general translation table data
640 * @orig_list: list of orig nodes announcing this non-mesh client
641 * @list_lock: lock protecting orig_list
642 * @roam_at: time at which TT_GLOBAL_ROAM was set
643 */
342struct batadv_tt_global_entry { 644struct batadv_tt_global_entry {
343 struct batadv_tt_common_entry common; 645 struct batadv_tt_common_entry common;
344 struct hlist_head orig_list; 646 struct hlist_head orig_list;
345 spinlock_t list_lock; /* protects the list */ 647 spinlock_t list_lock; /* protects orig_list */
346 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 648 unsigned long roam_at;
347}; 649};
348 650
651/**
652 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
653 * @orig_node: pointer to orig node announcing this non-mesh client
654 * @ttvn: translation table version number which added the non-mesh client
655 * @list: list node for batadv_tt_global_entry::orig_list
656 * @refcount: number of contexts the object is used
657 * @rcu: struct used for freeing in an RCU-safe manner
658 */
349struct batadv_tt_orig_list_entry { 659struct batadv_tt_orig_list_entry {
350 struct batadv_orig_node *orig_node; 660 struct batadv_orig_node *orig_node;
351 uint8_t ttvn; 661 uint8_t ttvn;
352 atomic_t refcount;
353 struct rcu_head rcu;
354 struct hlist_node list; 662 struct hlist_node list;
355};
356
357#ifdef CONFIG_BATMAN_ADV_BLA
358struct batadv_backbone_gw {
359 uint8_t orig[ETH_ALEN];
360 short vid; /* used VLAN ID */
361 struct hlist_node hash_entry;
362 struct batadv_priv *bat_priv;
363 unsigned long lasttime; /* last time we heard of this backbone gw */
364 atomic_t wait_periods;
365 atomic_t request_sent;
366 atomic_t refcount; 663 atomic_t refcount;
367 struct rcu_head rcu; 664 struct rcu_head rcu;
368 uint16_t crc; /* crc checksum over all claims */
369};
370
371struct batadv_claim {
372 uint8_t addr[ETH_ALEN];
373 short vid;
374 struct batadv_backbone_gw *backbone_gw;
375 unsigned long lasttime; /* last time we heard of claim (locals only) */
376 struct rcu_head rcu;
377 atomic_t refcount;
378 struct hlist_node hash_entry;
379}; 665};
380#endif
381 666
667/**
668 * struct batadv_tt_change_node - structure for tt changes occured
669 * @list: list node for batadv_priv_tt::changes_list
670 * @change: holds the actual translation table diff data
671 */
382struct batadv_tt_change_node { 672struct batadv_tt_change_node {
383 struct list_head list; 673 struct list_head list;
384 struct batadv_tt_change change; 674 struct batadv_tt_change change;
385}; 675};
386 676
677/**
678 * struct batadv_tt_req_node - data to keep track of the tt requests in flight
679 * @addr: mac address address of the originator this request was sent to
680 * @issued_at: timestamp used for purging stale tt requests
681 * @list: list node for batadv_priv_tt::req_list
682 */
387struct batadv_tt_req_node { 683struct batadv_tt_req_node {
388 uint8_t addr[ETH_ALEN]; 684 uint8_t addr[ETH_ALEN];
389 unsigned long issued_at; 685 unsigned long issued_at;
390 struct list_head list; 686 struct list_head list;
391}; 687};
392 688
689/**
690 * struct batadv_tt_roam_node - roaming client data
691 * @addr: mac address of the client in the roaming phase
692 * @counter: number of allowed roaming events per client within a single
693 * OGM interval (changes are committed with each OGM)
694 * @first_time: timestamp used for purging stale roaming node entries
695 * @list: list node for batadv_priv_tt::roam_list
696 */
393struct batadv_tt_roam_node { 697struct batadv_tt_roam_node {
394 uint8_t addr[ETH_ALEN]; 698 uint8_t addr[ETH_ALEN];
395 atomic_t counter; 699 atomic_t counter;
@@ -397,8 +701,19 @@ struct batadv_tt_roam_node {
397 struct list_head list; 701 struct list_head list;
398}; 702};
399 703
400/* forw_packet - structure for forw_list maintaining packets to be 704/**
401 * send/forwarded 705 * struct batadv_forw_packet - structure for bcast packets to be sent/forwarded
706 * @list: list node for batadv_socket_client::queue_list
707 * @send_time: execution time for delayed_work (packet sending)
708 * @own: bool for locally generated packets (local OGMs are re-scheduled after
709 * sending)
710 * @skb: bcast packet's skb buffer
711 * @packet_len: size of aggregated OGM packet inside the skb buffer
712 * @direct_link_flags: direct link flags for aggregated OGM packets
713 * @num_packets: counter for bcast packet retransmission
714 * @delayed_work: work queue callback item for packet sending
715 * @if_incoming: pointer incoming hard-iface or primary iface if locally
716 * generated packet
402 */ 717 */
403struct batadv_forw_packet { 718struct batadv_forw_packet {
404 struct hlist_node list; 719 struct hlist_node list;
@@ -412,72 +727,98 @@ struct batadv_forw_packet {
412 struct batadv_hard_iface *if_incoming; 727 struct batadv_hard_iface *if_incoming;
413}; 728};
414 729
415/* While scanning for vis-entries of a particular vis-originator 730/**
416 * this list collects its interfaces to create a subgraph/cluster 731 * struct batadv_frag_packet_list_entry - storage for fragment packet
417 * out of them later 732 * @list: list node for orig_node::frag_list
733 * @seqno: sequence number of the fragment
734 * @skb: fragment's skb buffer
418 */ 735 */
419struct batadv_if_list_entry {
420 uint8_t addr[ETH_ALEN];
421 bool primary;
422 struct hlist_node list;
423};
424
425struct batadv_debug_log {
426 char log_buff[BATADV_LOG_BUF_LEN];
427 unsigned long log_start;
428 unsigned long log_end;
429 spinlock_t lock; /* protects log_buff, log_start and log_end */
430 wait_queue_head_t queue_wait;
431};
432
433struct batadv_frag_packet_list_entry { 736struct batadv_frag_packet_list_entry {
434 struct list_head list; 737 struct list_head list;
435 uint16_t seqno; 738 uint16_t seqno;
436 struct sk_buff *skb; 739 struct sk_buff *skb;
437}; 740};
438 741
742/**
743 * struct batadv_vis_info - local data for vis information
744 * @first_seen: timestamp used for purging stale vis info entries
745 * @recv_list: List of server-neighbors we have received this packet from. This
746 * packet should not be re-forward to them again. List elements are struct
747 * batadv_vis_recvlist_node
748 * @send_list: list of packets to be forwarded
749 * @refcount: number of contexts the object is used
750 * @hash_entry: hlist node for batadv_priv_vis::hash
751 * @bat_priv: pointer to soft_iface this orig node belongs to
752 * @skb_packet: contains the vis packet
753 */
439struct batadv_vis_info { 754struct batadv_vis_info {
440 unsigned long first_seen; 755 unsigned long first_seen;
441 /* list of server-neighbors we received a vis-packet
442 * from. we should not reply to them.
443 */
444 struct list_head recv_list; 756 struct list_head recv_list;
445 struct list_head send_list; 757 struct list_head send_list;
446 struct kref refcount; 758 struct kref refcount;
447 struct hlist_node hash_entry; 759 struct hlist_node hash_entry;
448 struct batadv_priv *bat_priv; 760 struct batadv_priv *bat_priv;
449 /* this packet might be part of the vis send queue. */
450 struct sk_buff *skb_packet; 761 struct sk_buff *skb_packet;
451 /* vis_info may follow here */
452} __packed; 762} __packed;
453 763
764/**
765 * struct batadv_vis_info_entry - contains link information for vis
766 * @src: source MAC of the link, all zero for local TT entry
767 * @dst: destination MAC of the link, client mac address for local TT entry
768 * @quality: transmission quality of the link, or 0 for local TT entry
769 */
454struct batadv_vis_info_entry { 770struct batadv_vis_info_entry {
455 uint8_t src[ETH_ALEN]; 771 uint8_t src[ETH_ALEN];
456 uint8_t dest[ETH_ALEN]; 772 uint8_t dest[ETH_ALEN];
457 uint8_t quality; /* quality = 0 client */ 773 uint8_t quality;
458} __packed; 774} __packed;
459 775
460struct batadv_recvlist_node { 776/**
777 * struct batadv_vis_recvlist_node - list entry for batadv_vis_info::recv_list
778 * @list: list node for batadv_vis_info::recv_list
779 * @mac: MAC address of the originator from where the vis_info was received
780 */
781struct batadv_vis_recvlist_node {
461 struct list_head list; 782 struct list_head list;
462 uint8_t mac[ETH_ALEN]; 783 uint8_t mac[ETH_ALEN];
463}; 784};
464 785
786/**
787 * struct batadv_vis_if_list_entry - auxiliary data for vis data generation
788 * @addr: MAC address of the interface
789 * @primary: true if this interface is the primary interface
790 * @list: list node the interface list
791 *
792 * While scanning for vis-entries of a particular vis-originator
793 * this list collects its interfaces to create a subgraph/cluster
794 * out of them later
795 */
796struct batadv_vis_if_list_entry {
797 uint8_t addr[ETH_ALEN];
798 bool primary;
799 struct hlist_node list;
800};
801
802/**
803 * struct batadv_algo_ops - mesh algorithm callbacks
804 * @list: list node for the batadv_algo_list
805 * @name: name of the algorithm
806 * @bat_iface_enable: init routing info when hard-interface is enabled
807 * @bat_iface_disable: de-init routing info when hard-interface is disabled
808 * @bat_iface_update_mac: (re-)init mac addresses of the protocol information
809 * belonging to this hard-interface
810 * @bat_primary_iface_set: called when primary interface is selected / changed
811 * @bat_ogm_schedule: prepare a new outgoing OGM for the send queue
812 * @bat_ogm_emit: send scheduled OGM
813 */
465struct batadv_algo_ops { 814struct batadv_algo_ops {
466 struct hlist_node list; 815 struct hlist_node list;
467 char *name; 816 char *name;
468 /* init routing info when hard-interface is enabled */
469 int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface); 817 int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
470 /* de-init routing info when hard-interface is disabled */
471 void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface); 818 void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
472 /* (re-)init mac addresses of the protocol information
473 * belonging to this hard-interface
474 */
475 void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface); 819 void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
476 /* called when primary interface is selected / changed */
477 void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface); 820 void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
478 /* prepare a new outgoing OGM for the send queue */
479 void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface); 821 void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
480 /* send scheduled OGM */
481 void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet); 822 void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
482}; 823};
483 824
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 10aff49fcf25..50e079f00be6 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Andreas Langer 3 * Andreas Langer
4 * 4 *
@@ -133,7 +133,6 @@ batadv_frag_search_packet(struct list_head *head,
133 is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD); 133 is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
134 134
135 list_for_each_entry(tfp, head, list) { 135 list_for_each_entry(tfp, head, list) {
136
137 if (!tfp->skb) 136 if (!tfp->skb)
138 continue; 137 continue;
139 138
@@ -162,7 +161,6 @@ void batadv_frag_list_free(struct list_head *head)
162 struct batadv_frag_packet_list_entry *pf, *tmp_pf; 161 struct batadv_frag_packet_list_entry *pf, *tmp_pf;
163 162
164 if (!list_empty(head)) { 163 if (!list_empty(head)) {
165
166 list_for_each_entry_safe(pf, tmp_pf, head, list) { 164 list_for_each_entry_safe(pf, tmp_pf, head, list) {
167 kfree_skb(pf->skb); 165 kfree_skb(pf->skb);
168 list_del(&pf->list); 166 list_del(&pf->list);
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 61abba58bd8f..429cf8a4a31e 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Andreas Langer 3 * Andreas Langer
4 * 4 *
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 0f65a9de5f74..22d2785177d1 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,4 +1,4 @@
1/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich 3 * Simon Wunderlich
4 * 4 *
@@ -28,14 +28,15 @@
28 28
29#define BATADV_MAX_VIS_PACKET_SIZE 1000 29#define BATADV_MAX_VIS_PACKET_SIZE 1000
30 30
31static void batadv_start_vis_timer(struct batadv_priv *bat_priv); 31/* hash class keys */
32static struct lock_class_key batadv_vis_hash_lock_class_key;
32 33
33/* free the info */ 34/* free the info */
34static void batadv_free_info(struct kref *ref) 35static void batadv_free_info(struct kref *ref)
35{ 36{
36 struct batadv_vis_info *info; 37 struct batadv_vis_info *info;
37 struct batadv_priv *bat_priv; 38 struct batadv_priv *bat_priv;
38 struct batadv_recvlist_node *entry, *tmp; 39 struct batadv_vis_recvlist_node *entry, *tmp;
39 40
40 info = container_of(ref, struct batadv_vis_info, refcount); 41 info = container_of(ref, struct batadv_vis_info, refcount);
41 bat_priv = info->bat_priv; 42 bat_priv = info->bat_priv;
@@ -126,7 +127,7 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface,
126 struct hlist_head *if_list, 127 struct hlist_head *if_list,
127 bool primary) 128 bool primary)
128{ 129{
129 struct batadv_if_list_entry *entry; 130 struct batadv_vis_if_list_entry *entry;
130 struct hlist_node *pos; 131 struct hlist_node *pos;
131 132
132 hlist_for_each_entry(entry, pos, if_list, list) { 133 hlist_for_each_entry(entry, pos, if_list, list) {
@@ -146,7 +147,7 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface,
146static void batadv_vis_data_read_prim_sec(struct seq_file *seq, 147static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
147 const struct hlist_head *if_list) 148 const struct hlist_head *if_list)
148{ 149{
149 struct batadv_if_list_entry *entry; 150 struct batadv_vis_if_list_entry *entry;
150 struct hlist_node *pos; 151 struct hlist_node *pos;
151 152
152 hlist_for_each_entry(entry, pos, if_list, list) { 153 hlist_for_each_entry(entry, pos, if_list, list) {
@@ -196,7 +197,7 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
196 struct batadv_vis_info_entry *entries) 197 struct batadv_vis_info_entry *entries)
197{ 198{
198 int i; 199 int i;
199 struct batadv_if_list_entry *entry; 200 struct batadv_vis_if_list_entry *entry;
200 struct hlist_node *pos; 201 struct hlist_node *pos;
201 202
202 hlist_for_each_entry(entry, pos, list, list) { 203 hlist_for_each_entry(entry, pos, list, list) {
@@ -222,7 +223,7 @@ static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
222 struct batadv_vis_packet *packet; 223 struct batadv_vis_packet *packet;
223 uint8_t *entries_pos; 224 uint8_t *entries_pos;
224 struct batadv_vis_info_entry *entries; 225 struct batadv_vis_info_entry *entries;
225 struct batadv_if_list_entry *entry; 226 struct batadv_vis_if_list_entry *entry;
226 struct hlist_node *pos, *n; 227 struct hlist_node *pos, *n;
227 228
228 HLIST_HEAD(vis_if_list); 229 HLIST_HEAD(vis_if_list);
@@ -304,7 +305,7 @@ static void batadv_send_list_del(struct batadv_vis_info *info)
304static void batadv_recv_list_add(struct batadv_priv *bat_priv, 305static void batadv_recv_list_add(struct batadv_priv *bat_priv,
305 struct list_head *recv_list, const char *mac) 306 struct list_head *recv_list, const char *mac)
306{ 307{
307 struct batadv_recvlist_node *entry; 308 struct batadv_vis_recvlist_node *entry;
308 309
309 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 310 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
310 if (!entry) 311 if (!entry)
@@ -321,7 +322,7 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
321 const struct list_head *recv_list, 322 const struct list_head *recv_list,
322 const char *mac) 323 const char *mac)
323{ 324{
324 const struct batadv_recvlist_node *entry; 325 const struct batadv_vis_recvlist_node *entry;
325 326
326 spin_lock_bh(&bat_priv->vis.list_lock); 327 spin_lock_bh(&bat_priv->vis.list_lock);
327 list_for_each_entry(entry, recv_list, list) { 328 list_for_each_entry(entry, recv_list, list) {
@@ -827,7 +828,9 @@ static void batadv_send_vis_packets(struct work_struct *work)
827 kref_put(&info->refcount, batadv_free_info); 828 kref_put(&info->refcount, batadv_free_info);
828 } 829 }
829 spin_unlock_bh(&bat_priv->vis.hash_lock); 830 spin_unlock_bh(&bat_priv->vis.hash_lock);
830 batadv_start_vis_timer(bat_priv); 831
832 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
833 msecs_to_jiffies(BATADV_VIS_INTERVAL));
831} 834}
832 835
833/* init the vis server. this may only be called when if_list is already 836/* init the vis server. this may only be called when if_list is already
@@ -852,6 +855,9 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
852 goto err; 855 goto err;
853 } 856 }
854 857
858 batadv_hash_set_lock_class(bat_priv->vis.hash,
859 &batadv_vis_hash_lock_class_key);
860
855 bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC); 861 bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
856 if (!bat_priv->vis.my_info) 862 if (!bat_priv->vis.my_info)
857 goto err; 863 goto err;
@@ -894,7 +900,11 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
894 } 900 }
895 901
896 spin_unlock_bh(&bat_priv->vis.hash_lock); 902 spin_unlock_bh(&bat_priv->vis.hash_lock);
897 batadv_start_vis_timer(bat_priv); 903
904 INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
905 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
906 msecs_to_jiffies(BATADV_VIS_INTERVAL));
907
898 return 0; 908 return 0;
899 909
900free_info: 910free_info:
@@ -931,11 +941,3 @@ void batadv_vis_quit(struct batadv_priv *bat_priv)
931 bat_priv->vis.my_info = NULL; 941 bat_priv->vis.my_info = NULL;
932 spin_unlock_bh(&bat_priv->vis.hash_lock); 942 spin_unlock_bh(&bat_priv->vis.hash_lock);
933} 943}
934
935/* schedule packets for (re)transmission */
936static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
937{
938 INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
939 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
940 msecs_to_jiffies(BATADV_VIS_INTERVAL));
941}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 873282fa86da..ad92b0e3c230 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,4 +1,4 @@
1/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: 1/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
4 * 4 *
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 2f67d5ecc907..eb0f4b16ff09 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -290,7 +290,7 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
290 goto done; 290 goto done;
291 } 291 }
292 292
293 mgr->state = READ_LOC_AMP_INFO; 293 set_bit(READ_LOC_AMP_INFO, &mgr->state);
294 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); 294 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
295 295
296done: 296done:
@@ -499,8 +499,16 @@ send_rsp:
499 if (hdev) 499 if (hdev)
500 hci_dev_put(hdev); 500 hci_dev_put(hdev);
501 501
502 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp), 502 /* Reply error now and success after HCI Write Remote AMP Assoc
503 &rsp); 503 command complete with success status
504 */
505 if (rsp.status != A2MP_STATUS_SUCCESS) {
506 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
507 sizeof(rsp), &rsp);
508 } else {
509 set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
510 mgr->ident = hdr->ident;
511 }
504 512
505 skb_pull(skb, le16_to_cpu(hdr->len)); 513 skb_pull(skb, le16_to_cpu(hdr->len));
506 return 0; 514 return 0;
@@ -840,7 +848,7 @@ struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
840 848
841 mutex_lock(&amp_mgr_list_lock); 849 mutex_lock(&amp_mgr_list_lock);
842 list_for_each_entry(mgr, &amp_mgr_list, list) { 850 list_for_each_entry(mgr, &amp_mgr_list, list) {
843 if (mgr->state == state) { 851 if (test_and_clear_bit(state, &mgr->state)) {
844 amp_mgr_get(mgr); 852 amp_mgr_get(mgr);
845 mutex_unlock(&amp_mgr_list_lock); 853 mutex_unlock(&amp_mgr_list_lock);
846 return mgr; 854 return mgr;
@@ -949,6 +957,32 @@ clean:
949 kfree(req); 957 kfree(req);
950} 958}
951 959
960void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
961{
962 struct amp_mgr *mgr;
963 struct a2mp_physlink_rsp rsp;
964 struct hci_conn *hs_hcon;
965
966 mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
967 if (!mgr)
968 return;
969
970 hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
971 if (!hs_hcon) {
972 rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
973 } else {
974 rsp.remote_id = hs_hcon->remote_id;
975 rsp.status = A2MP_STATUS_SUCCESS;
976 }
977
978 BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
979 status);
980
981 rsp.local_id = hdev->id;
982 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
983 amp_mgr_put(mgr);
984}
985
952void a2mp_discover_amp(struct l2cap_chan *chan) 986void a2mp_discover_amp(struct l2cap_chan *chan)
953{ 987{
954 struct l2cap_conn *conn = chan->conn; 988 struct l2cap_conn *conn = chan->conn;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 5355df63d39b..d3ee69b35a78 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -641,7 +641,7 @@ int bt_procfs_init(struct module* module, struct net *net, const char *name,
641 sk_list->fops.llseek = seq_lseek; 641 sk_list->fops.llseek = seq_lseek;
642 sk_list->fops.release = seq_release_private; 642 sk_list->fops.release = seq_release_private;
643 643
644 pde = proc_net_fops_create(net, name, 0, &sk_list->fops); 644 pde = proc_create(name, 0, net->proc_net, &sk_list->fops);
645 if (!pde) 645 if (!pde)
646 return -ENOMEM; 646 return -ENOMEM;
647 647
@@ -652,7 +652,7 @@ int bt_procfs_init(struct module* module, struct net *net, const char *name,
652 652
653void bt_procfs_cleanup(struct net *net, const char *name) 653void bt_procfs_cleanup(struct net *net, const char *name)
654{ 654{
655 proc_net_remove(net, name); 655 remove_proc_entry(name, net->proc_net);
656} 656}
657#else 657#else
658int bt_procfs_init(struct module* module, struct net *net, const char *name, 658int bt_procfs_init(struct module* module, struct net *net, const char *name,
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 1b0d92c0643a..d459ed43c779 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -236,7 +236,7 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
236 236
237 cp.max_len = cpu_to_le16(hdev->amp_assoc_size); 237 cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
238 238
239 mgr->state = READ_LOC_AMP_ASSOC; 239 set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); 240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
241} 241}
242 242
@@ -250,7 +250,7 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
250 cp.len_so_far = cpu_to_le16(0); 250 cp.len_so_far = cpu_to_le16(0);
251 cp.max_len = cpu_to_le16(hdev->amp_assoc_size); 251 cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
252 252
253 mgr->state = READ_LOC_AMP_ASSOC_FINAL; 253 set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
254 254
255 /* Read Local AMP Assoc final link information data */ 255 /* Read Local AMP Assoc final link information data */
256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); 256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
@@ -317,7 +317,9 @@ void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
317 if (!hcon) 317 if (!hcon)
318 return; 318 return;
319 319
320 amp_write_rem_assoc_frag(hdev, hcon); 320 /* Send A2MP create phylink rsp when all fragments are written */
321 if (amp_write_rem_assoc_frag(hdev, hcon))
322 a2mp_send_create_phy_link_rsp(hdev, 0);
321} 323}
322 324
323void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle) 325void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
@@ -403,26 +405,20 @@ void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
403 405
404void amp_create_logical_link(struct l2cap_chan *chan) 406void amp_create_logical_link(struct l2cap_chan *chan)
405{ 407{
408 struct hci_conn *hs_hcon = chan->hs_hcon;
406 struct hci_cp_create_accept_logical_link cp; 409 struct hci_cp_create_accept_logical_link cp;
407 struct hci_conn *hcon;
408 struct hci_dev *hdev; 410 struct hci_dev *hdev;
409 411
410 BT_DBG("chan %p", chan); 412 BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst);
411 413
412 if (!chan->hs_hcon) 414 if (!hs_hcon)
413 return; 415 return;
414 416
415 hdev = hci_dev_hold(chan->hs_hcon->hdev); 417 hdev = hci_dev_hold(chan->hs_hcon->hdev);
416 if (!hdev) 418 if (!hdev)
417 return; 419 return;
418 420
419 BT_DBG("chan %p dst %pMR", chan, chan->conn->dst); 421 cp.phy_handle = hs_hcon->handle;
420
421 hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, chan->conn->dst);
422 if (!hcon)
423 goto done;
424
425 cp.phy_handle = hcon->handle;
426 422
427 cp.tx_flow_spec.id = chan->local_id; 423 cp.tx_flow_spec.id = chan->local_id;
428 cp.tx_flow_spec.stype = chan->local_stype; 424 cp.tx_flow_spec.stype = chan->local_stype;
@@ -438,14 +434,13 @@ void amp_create_logical_link(struct l2cap_chan *chan)
438 cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat); 434 cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
439 cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to); 435 cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
440 436
441 if (hcon->out) 437 if (hs_hcon->out)
442 hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp), 438 hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
443 &cp); 439 &cp);
444 else 440 else
445 hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp), 441 hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
446 &cp); 442 &cp);
447 443
448done:
449 hci_dev_put(hdev); 444 hci_dev_put(hdev);
450} 445}
451 446
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index a5b639702637..e430b1abcd2f 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -33,7 +33,6 @@
33 33
34#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h> 35#include <net/bluetooth/hci_core.h>
36#include <net/bluetooth/l2cap.h>
37 36
38#include "bnep.h" 37#include "bnep.h"
39 38
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 25bfce0666eb..4925a02ae7e4 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
249 __u8 reason = hci_proto_disconn_ind(conn); 249 __u8 reason = hci_proto_disconn_ind(conn);
250 250
251 switch (conn->type) { 251 switch (conn->type) {
252 case ACL_LINK:
253 hci_acl_disconn(conn, reason);
254 break;
255 case AMP_LINK: 252 case AMP_LINK:
256 hci_amp_disconn(conn, reason); 253 hci_amp_disconn(conn, reason);
257 break; 254 break;
255 default:
256 hci_acl_disconn(conn, reason);
257 break;
258 } 258 }
259} 259}
260 260
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 596660d37c5e..60793e7b768b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1146,7 +1146,8 @@ static void hci_power_on(struct work_struct *work)
1146 return; 1146 return;
1147 1147
1148 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1148 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1149 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT); 1149 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1150 HCI_AUTO_OFF_TIMEOUT);
1150 1151
1151 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1152 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1152 mgmt_index_added(hdev); 1153 mgmt_index_added(hdev);
@@ -1182,14 +1183,10 @@ static void hci_discov_off(struct work_struct *work)
1182 1183
1183int hci_uuids_clear(struct hci_dev *hdev) 1184int hci_uuids_clear(struct hci_dev *hdev)
1184{ 1185{
1185 struct list_head *p, *n; 1186 struct bt_uuid *uuid, *tmp;
1186
1187 list_for_each_safe(p, n, &hdev->uuids) {
1188 struct bt_uuid *uuid;
1189
1190 uuid = list_entry(p, struct bt_uuid, list);
1191 1187
1192 list_del(p); 1188 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1189 list_del(&uuid->list);
1193 kfree(uuid); 1190 kfree(uuid);
1194 } 1191 }
1195 1192
@@ -1621,8 +1618,8 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1621 if (err < 0) 1618 if (err < 0)
1622 return err; 1619 return err;
1623 1620
1624 schedule_delayed_work(&hdev->le_scan_disable, 1621 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1625 msecs_to_jiffies(timeout)); 1622 msecs_to_jiffies(timeout));
1626 1623
1627 return 0; 1624 return 0;
1628} 1625}
@@ -1799,6 +1796,15 @@ int hci_register_dev(struct hci_dev *hdev)
1799 goto err; 1796 goto err;
1800 } 1797 }
1801 1798
1799 hdev->req_workqueue = alloc_workqueue(hdev->name,
1800 WQ_HIGHPRI | WQ_UNBOUND |
1801 WQ_MEM_RECLAIM, 1);
1802 if (!hdev->req_workqueue) {
1803 destroy_workqueue(hdev->workqueue);
1804 error = -ENOMEM;
1805 goto err;
1806 }
1807
1802 error = hci_add_sysfs(hdev); 1808 error = hci_add_sysfs(hdev);
1803 if (error < 0) 1809 if (error < 0)
1804 goto err_wqueue; 1810 goto err_wqueue;
@@ -1821,12 +1827,13 @@ int hci_register_dev(struct hci_dev *hdev)
1821 hci_notify(hdev, HCI_DEV_REG); 1827 hci_notify(hdev, HCI_DEV_REG);
1822 hci_dev_hold(hdev); 1828 hci_dev_hold(hdev);
1823 1829
1824 schedule_work(&hdev->power_on); 1830 queue_work(hdev->req_workqueue, &hdev->power_on);
1825 1831
1826 return id; 1832 return id;
1827 1833
1828err_wqueue: 1834err_wqueue:
1829 destroy_workqueue(hdev->workqueue); 1835 destroy_workqueue(hdev->workqueue);
1836 destroy_workqueue(hdev->req_workqueue);
1830err: 1837err:
1831 ida_simple_remove(&hci_index_ida, hdev->id); 1838 ida_simple_remove(&hci_index_ida, hdev->id);
1832 write_lock(&hci_dev_list_lock); 1839 write_lock(&hci_dev_list_lock);
@@ -1880,6 +1887,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1880 hci_del_sysfs(hdev); 1887 hci_del_sysfs(hdev);
1881 1888
1882 destroy_workqueue(hdev->workqueue); 1889 destroy_workqueue(hdev->workqueue);
1890 destroy_workqueue(hdev->req_workqueue);
1883 1891
1884 hci_dev_lock(hdev); 1892 hci_dev_lock(hdev);
1885 hci_blacklist_clear(hdev); 1893 hci_blacklist_clear(hdev);
@@ -1921,7 +1929,7 @@ int hci_recv_frame(struct sk_buff *skb)
1921 return -ENXIO; 1929 return -ENXIO;
1922 } 1930 }
1923 1931
1924 /* Incomming skb */ 1932 /* Incoming skb */
1925 bt_cb(skb)->incoming = 1; 1933 bt_cb(skb)->incoming = 1;
1926 1934
1927 /* Time stamp */ 1935 /* Time stamp */
@@ -2810,14 +2818,6 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2810 if (conn) { 2818 if (conn) {
2811 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 2819 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2812 2820
2813 hci_dev_lock(hdev);
2814 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2815 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2816 mgmt_device_connected(hdev, &conn->dst, conn->type,
2817 conn->dst_type, 0, NULL, 0,
2818 conn->dev_class);
2819 hci_dev_unlock(hdev);
2820
2821 /* Send to upper protocol */ 2821 /* Send to upper protocol */
2822 l2cap_recv_acldata(conn, skb, flags); 2822 l2cap_recv_acldata(conn, skb, flags);
2823 return; 2823 return;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 705078a0cc39..477726a63512 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -609,8 +609,17 @@ static void le_setup(struct hci_dev *hdev)
609 /* Read LE Buffer Size */ 609 /* Read LE Buffer Size */
610 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); 610 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
611 611
612 /* Read LE Local Supported Features */
613 hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
614
612 /* Read LE Advertising Channel TX Power */ 615 /* Read LE Advertising Channel TX Power */
613 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 616 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
617
618 /* Read LE White List Size */
619 hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
620
621 /* Read LE Supported States */
622 hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
614} 623}
615 624
616static void hci_setup(struct hci_dev *hdev) 625static void hci_setup(struct hci_dev *hdev)
@@ -1090,6 +1099,19 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1090 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 1099 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
1091} 1100}
1092 1101
1102static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1103 struct sk_buff *skb)
1104{
1105 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1106
1107 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1108
1109 if (!rp->status)
1110 memcpy(hdev->le_features, rp->features, 8);
1111
1112 hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status);
1113}
1114
1093static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 1115static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1094 struct sk_buff *skb) 1116 struct sk_buff *skb)
1095{ 1117{
@@ -1290,6 +1312,19 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1290 } 1312 }
1291} 1313}
1292 1314
1315static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1317{
1318 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1319
1320 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1321
1322 if (!rp->status)
1323 hdev->le_white_list_size = rp->size;
1324
1325 hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status);
1326}
1327
1293static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 1328static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1294{ 1329{
1295 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 1330 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
@@ -1314,6 +1349,19 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1314 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1349 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1315} 1350}
1316 1351
1352static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1353 struct sk_buff *skb)
1354{
1355 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1356
1357 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1358
1359 if (!rp->status)
1360 memcpy(hdev->le_states, rp->le_states, 8);
1361
1362 hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status);
1363}
1364
1317static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1365static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1318 struct sk_buff *skb) 1366 struct sk_buff *skb)
1319{ 1367{
@@ -2628,6 +2676,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 hci_cc_le_read_buffer_size(hdev, skb); 2676 hci_cc_le_read_buffer_size(hdev, skb);
2629 break; 2677 break;
2630 2678
2679 case HCI_OP_LE_READ_LOCAL_FEATURES:
2680 hci_cc_le_read_local_features(hdev, skb);
2681 break;
2682
2631 case HCI_OP_LE_READ_ADV_TX_POWER: 2683 case HCI_OP_LE_READ_ADV_TX_POWER:
2632 hci_cc_le_read_adv_tx_power(hdev, skb); 2684 hci_cc_le_read_adv_tx_power(hdev, skb);
2633 break; 2685 break;
@@ -2664,6 +2716,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2664 hci_cc_le_set_scan_enable(hdev, skb); 2716 hci_cc_le_set_scan_enable(hdev, skb);
2665 break; 2717 break;
2666 2718
2719 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2720 hci_cc_le_read_white_list_size(hdev, skb);
2721 break;
2722
2667 case HCI_OP_LE_LTK_REPLY: 2723 case HCI_OP_LE_LTK_REPLY:
2668 hci_cc_le_ltk_reply(hdev, skb); 2724 hci_cc_le_ltk_reply(hdev, skb);
2669 break; 2725 break;
@@ -2672,6 +2728,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2672 hci_cc_le_ltk_neg_reply(hdev, skb); 2728 hci_cc_le_ltk_neg_reply(hdev, skb);
2673 break; 2729 break;
2674 2730
2731 case HCI_OP_LE_READ_SUPPORTED_STATES:
2732 hci_cc_le_read_supported_states(hdev, skb);
2733 break;
2734
2675 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 2735 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2676 hci_cc_write_le_host_supported(hdev, skb); 2736 hci_cc_write_le_host_supported(hdev, skb);
2677 break; 2737 break;
@@ -2688,7 +2748,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2688 if (ev->opcode != HCI_OP_NOP) 2748 if (ev->opcode != HCI_OP_NOP)
2689 del_timer(&hdev->cmd_timer); 2749 del_timer(&hdev->cmd_timer);
2690 2750
2691 if (ev->ncmd) { 2751 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2692 atomic_set(&hdev->cmd_cnt, 1); 2752 atomic_set(&hdev->cmd_cnt, 1);
2693 if (!skb_queue_empty(&hdev->cmd_q)) 2753 if (!skb_queue_empty(&hdev->cmd_q))
2694 queue_work(hdev->workqueue, &hdev->cmd_work); 2754 queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -3928,8 +3988,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3928 void *ptr = &skb->data[1]; 3988 void *ptr = &skb->data[1];
3929 s8 rssi; 3989 s8 rssi;
3930 3990
3931 hci_dev_lock(hdev);
3932
3933 while (num_reports--) { 3991 while (num_reports--) {
3934 struct hci_ev_le_advertising_info *ev = ptr; 3992 struct hci_ev_le_advertising_info *ev = ptr;
3935 3993
@@ -3939,8 +3997,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3939 3997
3940 ptr += sizeof(*ev) + ev->length + 1; 3998 ptr += sizeof(*ev) + ev->length + 1;
3941 } 3999 }
3942
3943 hci_dev_unlock(hdev);
3944} 4000}
3945 4001
3946static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4002static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 55cceee02a84..23b4e242a31a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -2,6 +2,7 @@
2 2
3#include <linux/debugfs.h> 3#include <linux/debugfs.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <asm/unaligned.h>
5 6
6#include <net/bluetooth/bluetooth.h> 7#include <net/bluetooth/bluetooth.h>
7#include <net/bluetooth/hci_core.h> 8#include <net/bluetooth/hci_core.h>
@@ -461,19 +462,18 @@ static const struct file_operations blacklist_fops = {
461 462
462static void print_bt_uuid(struct seq_file *f, u8 *uuid) 463static void print_bt_uuid(struct seq_file *f, u8 *uuid)
463{ 464{
464 __be32 data0, data4; 465 u32 data0, data5;
465 __be16 data1, data2, data3, data5; 466 u16 data1, data2, data3, data4;
466 467
467 memcpy(&data0, &uuid[0], 4); 468 data5 = get_unaligned_le32(uuid);
468 memcpy(&data1, &uuid[4], 2); 469 data4 = get_unaligned_le16(uuid + 4);
469 memcpy(&data2, &uuid[6], 2); 470 data3 = get_unaligned_le16(uuid + 6);
470 memcpy(&data3, &uuid[8], 2); 471 data2 = get_unaligned_le16(uuid + 8);
471 memcpy(&data4, &uuid[10], 4); 472 data1 = get_unaligned_le16(uuid + 10);
472 memcpy(&data5, &uuid[14], 2); 473 data0 = get_unaligned_le32(uuid + 12);
473 474
474 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", 475 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
475 ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3), 476 data0, data1, data2, data3, data4, data5);
476 ntohl(data4), ntohs(data5));
477} 477}
478 478
479static int uuids_show(struct seq_file *f, void *p) 479static int uuids_show(struct seq_file *f, void *p)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b2bcbe2dc328..a7352ff3fd1e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -931,7 +931,7 @@ static int hidp_setup_hid(struct hidp_session *session,
931 hid->version = req->version; 931 hid->version = req->version;
932 hid->country = req->country; 932 hid->country = req->country;
933 933
934 strncpy(hid->name, req->name, 128); 934 strncpy(hid->name, req->name, sizeof(req->name) - 1);
935 935
936 snprintf(hid->phys, sizeof(hid->phys), "%pMR", 936 snprintf(hid->phys, sizeof(hid->phys), "%pMR",
937 &bt_sk(session->ctrl_sock->sk)->src); 937 &bt_sk(session->ctrl_sock->sk)->src);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2c78208d793e..7c7e9321f1ea 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1527,17 +1527,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); 1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1528 1528
1529 switch (hcon->type) { 1529 switch (hcon->type) {
1530 case AMP_LINK:
1531 conn->mtu = hcon->hdev->block_mtu;
1532 break;
1533
1534 case LE_LINK: 1530 case LE_LINK:
1535 if (hcon->hdev->le_mtu) { 1531 if (hcon->hdev->le_mtu) {
1536 conn->mtu = hcon->hdev->le_mtu; 1532 conn->mtu = hcon->hdev->le_mtu;
1537 break; 1533 break;
1538 } 1534 }
1539 /* fall through */ 1535 /* fall through */
1540
1541 default: 1536 default:
1542 conn->mtu = hcon->hdev->acl_mtu; 1537 conn->mtu = hcon->hdev->acl_mtu;
1543 break; 1538 break;
@@ -3727,6 +3722,17 @@ sendresp:
3727static int l2cap_connect_req(struct l2cap_conn *conn, 3722static int l2cap_connect_req(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u8 *data) 3723 struct l2cap_cmd_hdr *cmd, u8 *data)
3729{ 3724{
3725 struct hci_dev *hdev = conn->hcon->hdev;
3726 struct hci_conn *hcon = conn->hcon;
3727
3728 hci_dev_lock(hdev);
3729 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3730 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3731 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3732 hcon->dst_type, 0, NULL, 0,
3733 hcon->dev_class);
3734 hci_dev_unlock(hdev);
3735
3730 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); 3736 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3731 return 0; 3737 return 0;
3732} 3738}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f559b966279c..39395c7144aa 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,7 +35,7 @@
35bool enable_hs; 35bool enable_hs;
36 36
37#define MGMT_VERSION 1 37#define MGMT_VERSION 1
38#define MGMT_REVISION 2 38#define MGMT_REVISION 3
39 39
40static const u16 mgmt_commands[] = { 40static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST, 41 MGMT_OP_READ_INDEX_LIST,
@@ -435,35 +435,117 @@ static u32 get_current_settings(struct hci_dev *hdev)
435 435
436#define PNP_INFO_SVCLASS_ID 0x1200 436#define PNP_INFO_SVCLASS_ID 0x1200
437 437
438static u8 bluetooth_base_uuid[] = { 438static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
439 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80, 439{
440 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 440 u8 *ptr = data, *uuids_start = NULL;
441}; 441 struct bt_uuid *uuid;
442
443 if (len < 4)
444 return ptr;
445
446 list_for_each_entry(uuid, &hdev->uuids, list) {
447 u16 uuid16;
448
449 if (uuid->size != 16)
450 continue;
451
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
453 if (uuid16 < 0x1100)
454 continue;
455
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
457 continue;
442 458
443static u16 get_uuid16(u8 *uuid128) 459 if (!uuids_start) {
460 uuids_start = ptr;
461 uuids_start[0] = 1;
462 uuids_start[1] = EIR_UUID16_ALL;
463 ptr += 2;
464 }
465
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
469 break;
470 }
471
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
475 }
476
477 return ptr;
478}
479
480static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
444{ 481{
445 u32 val; 482 u8 *ptr = data, *uuids_start = NULL;
446 int i; 483 struct bt_uuid *uuid;
484
485 if (len < 6)
486 return ptr;
447 487
448 for (i = 0; i < 12; i++) { 488 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (bluetooth_base_uuid[i] != uuid128[i]) 489 if (uuid->size != 32)
450 return 0; 490 continue;
491
492 if (!uuids_start) {
493 uuids_start = ptr;
494 uuids_start[0] = 1;
495 uuids_start[1] = EIR_UUID32_ALL;
496 ptr += 2;
497 }
498
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
502 break;
503 }
504
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
506 ptr += sizeof(u32);
507 uuids_start[0] += sizeof(u32);
451 } 508 }
452 509
453 val = get_unaligned_le32(&uuid128[12]); 510 return ptr;
454 if (val > 0xffff) 511}
455 return 0; 512
513static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
514{
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
517
518 if (len < 18)
519 return ptr;
456 520
457 return (u16) val; 521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
523 continue;
524
525 if (!uuids_start) {
526 uuids_start = ptr;
527 uuids_start[0] = 1;
528 uuids_start[1] = EIR_UUID128_ALL;
529 ptr += 2;
530 }
531
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
535 break;
536 }
537
538 memcpy(ptr, uuid->uuid, 16);
539 ptr += 16;
540 uuids_start[0] += 16;
541 }
542
543 return ptr;
458} 544}
459 545
460static void create_eir(struct hci_dev *hdev, u8 *data) 546static void create_eir(struct hci_dev *hdev, u8 *data)
461{ 547{
462 u8 *ptr = data; 548 u8 *ptr = data;
463 u16 eir_len = 0;
464 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
465 int i, truncated = 0;
466 struct bt_uuid *uuid;
467 size_t name_len; 549 size_t name_len;
468 550
469 name_len = strlen(hdev->dev_name); 551 name_len = strlen(hdev->dev_name);
@@ -481,7 +563,6 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
481 563
482 memcpy(ptr + 2, hdev->dev_name, name_len); 564 memcpy(ptr + 2, hdev->dev_name, name_len);
483 565
484 eir_len += (name_len + 2);
485 ptr += (name_len + 2); 566 ptr += (name_len + 2);
486 } 567 }
487 568
@@ -490,7 +571,6 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
490 ptr[1] = EIR_TX_POWER; 571 ptr[1] = EIR_TX_POWER;
491 ptr[2] = (u8) hdev->inq_tx_power; 572 ptr[2] = (u8) hdev->inq_tx_power;
492 573
493 eir_len += 3;
494 ptr += 3; 574 ptr += 3;
495 } 575 }
496 576
@@ -503,60 +583,12 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
503 put_unaligned_le16(hdev->devid_product, ptr + 6); 583 put_unaligned_le16(hdev->devid_product, ptr + 6);
504 put_unaligned_le16(hdev->devid_version, ptr + 8); 584 put_unaligned_le16(hdev->devid_version, ptr + 8);
505 585
506 eir_len += 10;
507 ptr += 10; 586 ptr += 10;
508 } 587 }
509 588
510 memset(uuid16_list, 0, sizeof(uuid16_list)); 589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
511 590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
512 /* Group all UUID16 types */ 591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 u16 uuid16;
515
516 uuid16 = get_uuid16(uuid->uuid);
517 if (uuid16 == 0)
518 return;
519
520 if (uuid16 < 0x1100)
521 continue;
522
523 if (uuid16 == PNP_INFO_SVCLASS_ID)
524 continue;
525
526 /* Stop if not enough space to put next UUID */
527 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
528 truncated = 1;
529 break;
530 }
531
532 /* Check for duplicates */
533 for (i = 0; uuid16_list[i] != 0; i++)
534 if (uuid16_list[i] == uuid16)
535 break;
536
537 if (uuid16_list[i] == 0) {
538 uuid16_list[i] = uuid16;
539 eir_len += sizeof(u16);
540 }
541 }
542
543 if (uuid16_list[0] != 0) {
544 u8 *length = ptr;
545
546 /* EIR Data type */
547 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
548
549 ptr += 2;
550 eir_len += 2;
551
552 for (i = 0; uuid16_list[i] != 0; i++) {
553 *ptr++ = (uuid16_list[i] & 0x00ff);
554 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
555 }
556
557 /* EIR Data length */
558 *length = (i * sizeof(u16)) + 1;
559 }
560} 592}
561 593
562static int update_eir(struct hci_dev *hdev) 594static int update_eir(struct hci_dev *hdev)
@@ -728,13 +760,9 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
728 void *data), 760 void *data),
729 void *data) 761 void *data)
730{ 762{
731 struct list_head *p, *n; 763 struct pending_cmd *cmd, *tmp;
732
733 list_for_each_safe(p, n, &hdev->mgmt_pending) {
734 struct pending_cmd *cmd;
735
736 cmd = list_entry(p, struct pending_cmd, list);
737 764
765 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
738 if (opcode > 0 && cmd->opcode != opcode) 766 if (opcode > 0 && cmd->opcode != opcode)
739 continue; 767 continue;
740 768
@@ -777,14 +805,19 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
777 805
778 BT_DBG("request for %s", hdev->name); 806 BT_DBG("request for %s", hdev->name);
779 807
808 if (cp->val != 0x00 && cp->val != 0x01)
809 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
810 MGMT_STATUS_INVALID_PARAMS);
811
780 hci_dev_lock(hdev); 812 hci_dev_lock(hdev);
781 813
782 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 814 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
783 cancel_delayed_work(&hdev->power_off); 815 cancel_delayed_work(&hdev->power_off);
784 816
785 if (cp->val) { 817 if (cp->val) {
786 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); 818 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
787 mgmt_powered(hdev, 1); 819 data, len);
820 err = mgmt_powered(hdev, 1);
788 goto failed; 821 goto failed;
789 } 822 }
790 } 823 }
@@ -807,9 +840,9 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
807 } 840 }
808 841
809 if (cp->val) 842 if (cp->val)
810 schedule_work(&hdev->power_on); 843 queue_work(hdev->req_workqueue, &hdev->power_on);
811 else 844 else
812 schedule_work(&hdev->power_off.work); 845 queue_work(hdev->req_workqueue, &hdev->power_off.work);
813 846
814 err = 0; 847 err = 0;
815 848
@@ -872,6 +905,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
872 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 905 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
873 MGMT_STATUS_NOT_SUPPORTED); 906 MGMT_STATUS_NOT_SUPPORTED);
874 907
908 if (cp->val != 0x00 && cp->val != 0x01)
909 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
910 MGMT_STATUS_INVALID_PARAMS);
911
875 timeout = __le16_to_cpu(cp->timeout); 912 timeout = __le16_to_cpu(cp->timeout);
876 if (!cp->val && timeout > 0) 913 if (!cp->val && timeout > 0)
877 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 914 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
@@ -971,6 +1008,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
971 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1008 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
972 MGMT_STATUS_NOT_SUPPORTED); 1009 MGMT_STATUS_NOT_SUPPORTED);
973 1010
1011 if (cp->val != 0x00 && cp->val != 0x01)
1012 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1013 MGMT_STATUS_INVALID_PARAMS);
1014
974 hci_dev_lock(hdev); 1015 hci_dev_lock(hdev);
975 1016
976 if (!hdev_is_powered(hdev)) { 1017 if (!hdev_is_powered(hdev)) {
@@ -1041,6 +1082,10 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1041 1082
1042 BT_DBG("request for %s", hdev->name); 1083 BT_DBG("request for %s", hdev->name);
1043 1084
1085 if (cp->val != 0x00 && cp->val != 0x01)
1086 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1087 MGMT_STATUS_INVALID_PARAMS);
1088
1044 hci_dev_lock(hdev); 1089 hci_dev_lock(hdev);
1045 1090
1046 if (cp->val) 1091 if (cp->val)
@@ -1073,6 +1118,10 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1073 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 1118 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1074 MGMT_STATUS_NOT_SUPPORTED); 1119 MGMT_STATUS_NOT_SUPPORTED);
1075 1120
1121 if (cp->val != 0x00 && cp->val != 0x01)
1122 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1123 MGMT_STATUS_INVALID_PARAMS);
1124
1076 hci_dev_lock(hdev); 1125 hci_dev_lock(hdev);
1077 1126
1078 if (!hdev_is_powered(hdev)) { 1127 if (!hdev_is_powered(hdev)) {
@@ -1133,13 +1182,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1133 1182
1134 BT_DBG("request for %s", hdev->name); 1183 BT_DBG("request for %s", hdev->name);
1135 1184
1136 hci_dev_lock(hdev); 1185 if (!lmp_ssp_capable(hdev))
1186 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1187 MGMT_STATUS_NOT_SUPPORTED);
1137 1188
1138 if (!lmp_ssp_capable(hdev)) { 1189 if (cp->val != 0x00 && cp->val != 0x01)
1139 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 1190 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1140 MGMT_STATUS_NOT_SUPPORTED); 1191 MGMT_STATUS_INVALID_PARAMS);
1141 goto failed; 1192
1142 } 1193 hci_dev_lock(hdev);
1143 1194
1144 val = !!cp->val; 1195 val = !!cp->val;
1145 1196
@@ -1199,6 +1250,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1199 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 1250 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1200 MGMT_STATUS_NOT_SUPPORTED); 1251 MGMT_STATUS_NOT_SUPPORTED);
1201 1252
1253 if (cp->val != 0x00 && cp->val != 0x01)
1254 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1255 MGMT_STATUS_INVALID_PARAMS);
1256
1202 if (cp->val) 1257 if (cp->val)
1203 set_bit(HCI_HS_ENABLED, &hdev->dev_flags); 1258 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1204 else 1259 else
@@ -1217,13 +1272,15 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1217 1272
1218 BT_DBG("request for %s", hdev->name); 1273 BT_DBG("request for %s", hdev->name);
1219 1274
1220 hci_dev_lock(hdev); 1275 if (!lmp_le_capable(hdev))
1276 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1277 MGMT_STATUS_NOT_SUPPORTED);
1221 1278
1222 if (!lmp_le_capable(hdev)) { 1279 if (cp->val != 0x00 && cp->val != 0x01)
1223 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 1280 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1224 MGMT_STATUS_NOT_SUPPORTED); 1281 MGMT_STATUS_INVALID_PARAMS);
1225 goto unlock; 1282
1226 } 1283 hci_dev_lock(hdev);
1227 1284
1228 val = !!cp->val; 1285 val = !!cp->val;
1229 enabled = lmp_host_le_capable(hdev); 1286 enabled = lmp_host_le_capable(hdev);
@@ -1275,6 +1332,25 @@ unlock:
1275 return err; 1332 return err;
1276} 1333}
1277 1334
1335static const u8 bluetooth_base_uuid[] = {
1336 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1337 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1338};
1339
1340static u8 get_uuid_size(const u8 *uuid)
1341{
1342 u32 val;
1343
1344 if (memcmp(uuid, bluetooth_base_uuid, 12))
1345 return 128;
1346
1347 val = get_unaligned_le32(&uuid[12]);
1348 if (val > 0xffff)
1349 return 32;
1350
1351 return 16;
1352}
1353
1278static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1354static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1279{ 1355{
1280 struct mgmt_cp_add_uuid *cp = data; 1356 struct mgmt_cp_add_uuid *cp = data;
@@ -1300,8 +1376,9 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1300 1376
1301 memcpy(uuid->uuid, cp->uuid, 16); 1377 memcpy(uuid->uuid, cp->uuid, 16);
1302 uuid->svc_hint = cp->svc_hint; 1378 uuid->svc_hint = cp->svc_hint;
1379 uuid->size = get_uuid_size(cp->uuid);
1303 1380
1304 list_add(&uuid->list, &hdev->uuids); 1381 list_add_tail(&uuid->list, &hdev->uuids);
1305 1382
1306 err = update_class(hdev); 1383 err = update_class(hdev);
1307 if (err < 0) 1384 if (err < 0)
@@ -1332,7 +1409,8 @@ static bool enable_service_cache(struct hci_dev *hdev)
1332 return false; 1409 return false;
1333 1410
1334 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { 1411 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1335 schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT); 1412 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1413 CACHE_TIMEOUT);
1336 return true; 1414 return true;
1337 } 1415 }
1338 1416
@@ -1344,7 +1422,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1344{ 1422{
1345 struct mgmt_cp_remove_uuid *cp = data; 1423 struct mgmt_cp_remove_uuid *cp = data;
1346 struct pending_cmd *cmd; 1424 struct pending_cmd *cmd;
1347 struct list_head *p, *n; 1425 struct bt_uuid *match, *tmp;
1348 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 1426 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1349 int err, found; 1427 int err, found;
1350 1428
@@ -1372,9 +1450,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1372 1450
1373 found = 0; 1451 found = 0;
1374 1452
1375 list_for_each_safe(p, n, &hdev->uuids) { 1453 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1376 struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
1377
1378 if (memcmp(match->uuid, cp->uuid, 16) != 0) 1454 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1379 continue; 1455 continue;
1380 1456
@@ -1422,13 +1498,19 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1422 1498
1423 BT_DBG("request for %s", hdev->name); 1499 BT_DBG("request for %s", hdev->name);
1424 1500
1425 hci_dev_lock(hdev); 1501 if (!lmp_bredr_capable(hdev))
1502 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1503 MGMT_STATUS_NOT_SUPPORTED);
1426 1504
1427 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { 1505 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags))
1428 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 1506 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1429 MGMT_STATUS_BUSY); 1507 MGMT_STATUS_BUSY);
1430 goto unlock; 1508
1431 } 1509 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0)
1510 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1511 MGMT_STATUS_INVALID_PARAMS);
1512
1513 hci_dev_lock(hdev);
1432 1514
1433 hdev->major_class = cp->major; 1515 hdev->major_class = cp->major;
1434 hdev->minor_class = cp->minor; 1516 hdev->minor_class = cp->minor;
@@ -1483,9 +1565,21 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1483 MGMT_STATUS_INVALID_PARAMS); 1565 MGMT_STATUS_INVALID_PARAMS);
1484 } 1566 }
1485 1567
1568 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1569 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1570 MGMT_STATUS_INVALID_PARAMS);
1571
1486 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 1572 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1487 key_count); 1573 key_count);
1488 1574
1575 for (i = 0; i < key_count; i++) {
1576 struct mgmt_link_key_info *key = &cp->keys[i];
1577
1578 if (key->addr.type != BDADDR_BREDR)
1579 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1580 MGMT_STATUS_INVALID_PARAMS);
1581 }
1582
1489 hci_dev_lock(hdev); 1583 hci_dev_lock(hdev);
1490 1584
1491 hci_link_keys_clear(hdev); 1585 hci_link_keys_clear(hdev);
@@ -1533,12 +1627,22 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1533 struct hci_conn *conn; 1627 struct hci_conn *conn;
1534 int err; 1628 int err;
1535 1629
1536 hci_dev_lock(hdev);
1537
1538 memset(&rp, 0, sizeof(rp)); 1630 memset(&rp, 0, sizeof(rp));
1539 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 1631 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1540 rp.addr.type = cp->addr.type; 1632 rp.addr.type = cp->addr.type;
1541 1633
1634 if (!bdaddr_type_is_valid(cp->addr.type))
1635 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1636 MGMT_STATUS_INVALID_PARAMS,
1637 &rp, sizeof(rp));
1638
1639 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1640 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1641 MGMT_STATUS_INVALID_PARAMS,
1642 &rp, sizeof(rp));
1643
1644 hci_dev_lock(hdev);
1645
1542 if (!hdev_is_powered(hdev)) { 1646 if (!hdev_is_powered(hdev)) {
1543 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 1647 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1544 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 1648 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
@@ -1596,6 +1700,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1596 u16 len) 1700 u16 len)
1597{ 1701{
1598 struct mgmt_cp_disconnect *cp = data; 1702 struct mgmt_cp_disconnect *cp = data;
1703 struct mgmt_rp_disconnect rp;
1599 struct hci_cp_disconnect dc; 1704 struct hci_cp_disconnect dc;
1600 struct pending_cmd *cmd; 1705 struct pending_cmd *cmd;
1601 struct hci_conn *conn; 1706 struct hci_conn *conn;
@@ -1603,17 +1708,26 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1603 1708
1604 BT_DBG(""); 1709 BT_DBG("");
1605 1710
1711 memset(&rp, 0, sizeof(rp));
1712 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1713 rp.addr.type = cp->addr.type;
1714
1715 if (!bdaddr_type_is_valid(cp->addr.type))
1716 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1717 MGMT_STATUS_INVALID_PARAMS,
1718 &rp, sizeof(rp));
1719
1606 hci_dev_lock(hdev); 1720 hci_dev_lock(hdev);
1607 1721
1608 if (!test_bit(HCI_UP, &hdev->flags)) { 1722 if (!test_bit(HCI_UP, &hdev->flags)) {
1609 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, 1723 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1610 MGMT_STATUS_NOT_POWERED); 1724 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1611 goto failed; 1725 goto failed;
1612 } 1726 }
1613 1727
1614 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { 1728 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, 1729 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1616 MGMT_STATUS_BUSY); 1730 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1617 goto failed; 1731 goto failed;
1618 } 1732 }
1619 1733
@@ -1624,8 +1738,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1624 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1738 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1625 1739
1626 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { 1740 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1627 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, 1741 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1628 MGMT_STATUS_NOT_CONNECTED); 1742 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1629 goto failed; 1743 goto failed;
1630 } 1744 }
1631 1745
@@ -1903,11 +2017,20 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1903 2017
1904 BT_DBG(""); 2018 BT_DBG("");
1905 2019
2020 memset(&rp, 0, sizeof(rp));
2021 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2022 rp.addr.type = cp->addr.type;
2023
2024 if (!bdaddr_type_is_valid(cp->addr.type))
2025 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2026 MGMT_STATUS_INVALID_PARAMS,
2027 &rp, sizeof(rp));
2028
1906 hci_dev_lock(hdev); 2029 hci_dev_lock(hdev);
1907 2030
1908 if (!hdev_is_powered(hdev)) { 2031 if (!hdev_is_powered(hdev)) {
1909 err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 2032 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1910 MGMT_STATUS_NOT_POWERED); 2033 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1911 goto unlock; 2034 goto unlock;
1912 } 2035 }
1913 2036
@@ -1924,10 +2047,6 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1924 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, 2047 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
1925 cp->addr.type, sec_level, auth_type); 2048 cp->addr.type, sec_level, auth_type);
1926 2049
1927 memset(&rp, 0, sizeof(rp));
1928 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1929 rp.addr.type = cp->addr.type;
1930
1931 if (IS_ERR(conn)) { 2050 if (IS_ERR(conn)) {
1932 int status; 2051 int status;
1933 2052
@@ -2254,24 +2373,16 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2254 2373
2255 hci_dev_lock(hdev); 2374 hci_dev_lock(hdev);
2256 2375
2257 if (!hdev_is_powered(hdev)) {
2258 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
2259 MGMT_STATUS_NOT_POWERED, &cp->addr,
2260 sizeof(cp->addr));
2261 goto unlock;
2262 }
2263
2264 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash, 2376 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2265 cp->randomizer); 2377 cp->randomizer);
2266 if (err < 0) 2378 if (err < 0)
2267 status = MGMT_STATUS_FAILED; 2379 status = MGMT_STATUS_FAILED;
2268 else 2380 else
2269 status = 0; 2381 status = MGMT_STATUS_SUCCESS;
2270 2382
2271 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status, 2383 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2272 &cp->addr, sizeof(cp->addr)); 2384 &cp->addr, sizeof(cp->addr));
2273 2385
2274unlock:
2275 hci_dev_unlock(hdev); 2386 hci_dev_unlock(hdev);
2276 return err; 2387 return err;
2277} 2388}
@@ -2287,24 +2398,15 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2287 2398
2288 hci_dev_lock(hdev); 2399 hci_dev_lock(hdev);
2289 2400
2290 if (!hdev_is_powered(hdev)) {
2291 err = cmd_complete(sk, hdev->id,
2292 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2293 MGMT_STATUS_NOT_POWERED, &cp->addr,
2294 sizeof(cp->addr));
2295 goto unlock;
2296 }
2297
2298 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr); 2401 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2299 if (err < 0) 2402 if (err < 0)
2300 status = MGMT_STATUS_INVALID_PARAMS; 2403 status = MGMT_STATUS_INVALID_PARAMS;
2301 else 2404 else
2302 status = 0; 2405 status = MGMT_STATUS_SUCCESS;
2303 2406
2304 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2407 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2305 status, &cp->addr, sizeof(cp->addr)); 2408 status, &cp->addr, sizeof(cp->addr));
2306 2409
2307unlock:
2308 hci_dev_unlock(hdev); 2410 hci_dev_unlock(hdev);
2309 return err; 2411 return err;
2310} 2412}
@@ -2365,31 +2467,45 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2365 2467
2366 switch (hdev->discovery.type) { 2468 switch (hdev->discovery.type) {
2367 case DISCOV_TYPE_BREDR: 2469 case DISCOV_TYPE_BREDR:
2368 if (lmp_bredr_capable(hdev)) 2470 if (!lmp_bredr_capable(hdev)) {
2369 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR); 2471 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2370 else 2472 MGMT_STATUS_NOT_SUPPORTED);
2371 err = -ENOTSUPP; 2473 mgmt_pending_remove(cmd);
2474 goto failed;
2475 }
2476
2477 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2372 break; 2478 break;
2373 2479
2374 case DISCOV_TYPE_LE: 2480 case DISCOV_TYPE_LE:
2375 if (lmp_host_le_capable(hdev)) 2481 if (!lmp_host_le_capable(hdev)) {
2376 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, 2482 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2377 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY); 2483 MGMT_STATUS_NOT_SUPPORTED);
2378 else 2484 mgmt_pending_remove(cmd);
2379 err = -ENOTSUPP; 2485 goto failed;
2486 }
2487
2488 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2489 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2380 break; 2490 break;
2381 2491
2382 case DISCOV_TYPE_INTERLEAVED: 2492 case DISCOV_TYPE_INTERLEAVED:
2383 if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev)) 2493 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2384 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, 2494 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2385 LE_SCAN_WIN, 2495 MGMT_STATUS_NOT_SUPPORTED);
2386 LE_SCAN_TIMEOUT_BREDR_LE); 2496 mgmt_pending_remove(cmd);
2387 else 2497 goto failed;
2388 err = -ENOTSUPP; 2498 }
2499
2500 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2501 LE_SCAN_TIMEOUT_BREDR_LE);
2389 break; 2502 break;
2390 2503
2391 default: 2504 default:
2392 err = -EINVAL; 2505 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2506 MGMT_STATUS_INVALID_PARAMS);
2507 mgmt_pending_remove(cmd);
2508 goto failed;
2393 } 2509 }
2394 2510
2395 if (err < 0) 2511 if (err < 0)
@@ -2510,7 +2626,8 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2510 hci_inquiry_cache_update_resolve(hdev, e); 2626 hci_inquiry_cache_update_resolve(hdev, e);
2511 } 2627 }
2512 2628
2513 err = 0; 2629 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2630 sizeof(cp->addr));
2514 2631
2515failed: 2632failed:
2516 hci_dev_unlock(hdev); 2633 hci_dev_unlock(hdev);
@@ -2526,13 +2643,18 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2526 2643
2527 BT_DBG("%s", hdev->name); 2644 BT_DBG("%s", hdev->name);
2528 2645
2646 if (!bdaddr_type_is_valid(cp->addr.type))
2647 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2648 MGMT_STATUS_INVALID_PARAMS,
2649 &cp->addr, sizeof(cp->addr));
2650
2529 hci_dev_lock(hdev); 2651 hci_dev_lock(hdev);
2530 2652
2531 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type); 2653 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2532 if (err < 0) 2654 if (err < 0)
2533 status = MGMT_STATUS_FAILED; 2655 status = MGMT_STATUS_FAILED;
2534 else 2656 else
2535 status = 0; 2657 status = MGMT_STATUS_SUCCESS;
2536 2658
2537 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, 2659 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2538 &cp->addr, sizeof(cp->addr)); 2660 &cp->addr, sizeof(cp->addr));
@@ -2551,13 +2673,18 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2551 2673
2552 BT_DBG("%s", hdev->name); 2674 BT_DBG("%s", hdev->name);
2553 2675
2676 if (!bdaddr_type_is_valid(cp->addr.type))
2677 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2678 MGMT_STATUS_INVALID_PARAMS,
2679 &cp->addr, sizeof(cp->addr));
2680
2554 hci_dev_lock(hdev); 2681 hci_dev_lock(hdev);
2555 2682
2556 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type); 2683 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2557 if (err < 0) 2684 if (err < 0)
2558 status = MGMT_STATUS_INVALID_PARAMS; 2685 status = MGMT_STATUS_INVALID_PARAMS;
2559 else 2686 else
2560 status = 0; 2687 status = MGMT_STATUS_SUCCESS;
2561 2688
2562 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, 2689 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2563 &cp->addr, sizeof(cp->addr)); 2690 &cp->addr, sizeof(cp->addr));
@@ -2612,6 +2739,10 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2612 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 2739 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2613 MGMT_STATUS_NOT_SUPPORTED); 2740 MGMT_STATUS_NOT_SUPPORTED);
2614 2741
2742 if (cp->val != 0x00 && cp->val != 0x01)
2743 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2744 MGMT_STATUS_INVALID_PARAMS);
2745
2615 if (!hdev_is_powered(hdev)) 2746 if (!hdev_is_powered(hdev))
2616 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 2747 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2617 MGMT_STATUS_NOT_POWERED); 2748 MGMT_STATUS_NOT_POWERED);
@@ -2659,12 +2790,23 @@ done:
2659 return err; 2790 return err;
2660} 2791}
2661 2792
2793static bool ltk_is_valid(struct mgmt_ltk_info *key)
2794{
2795 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2796 return false;
2797 if (key->master != 0x00 && key->master != 0x01)
2798 return false;
2799 if (!bdaddr_type_is_le(key->addr.type))
2800 return false;
2801 return true;
2802}
2803
2662static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, 2804static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2663 void *cp_data, u16 len) 2805 void *cp_data, u16 len)
2664{ 2806{
2665 struct mgmt_cp_load_long_term_keys *cp = cp_data; 2807 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2666 u16 key_count, expected_len; 2808 u16 key_count, expected_len;
2667 int i; 2809 int i, err;
2668 2810
2669 key_count = __le16_to_cpu(cp->key_count); 2811 key_count = __le16_to_cpu(cp->key_count);
2670 2812
@@ -2674,11 +2816,20 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2674 BT_ERR("load_keys: expected %u bytes, got %u bytes", 2816 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2675 len, expected_len); 2817 len, expected_len);
2676 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 2818 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2677 EINVAL); 2819 MGMT_STATUS_INVALID_PARAMS);
2678 } 2820 }
2679 2821
2680 BT_DBG("%s key_count %u", hdev->name, key_count); 2822 BT_DBG("%s key_count %u", hdev->name, key_count);
2681 2823
2824 for (i = 0; i < key_count; i++) {
2825 struct mgmt_ltk_info *key = &cp->keys[i];
2826
2827 if (!ltk_is_valid(key))
2828 return cmd_status(sk, hdev->id,
2829 MGMT_OP_LOAD_LONG_TERM_KEYS,
2830 MGMT_STATUS_INVALID_PARAMS);
2831 }
2832
2682 hci_dev_lock(hdev); 2833 hci_dev_lock(hdev);
2683 2834
2684 hci_smp_ltks_clear(hdev); 2835 hci_smp_ltks_clear(hdev);
@@ -2698,9 +2849,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2698 key->enc_size, key->ediv, key->rand); 2849 key->enc_size, key->ediv, key->rand);
2699 } 2850 }
2700 2851
2852 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
2853 NULL, 0);
2854
2701 hci_dev_unlock(hdev); 2855 hci_dev_unlock(hdev);
2702 2856
2703 return 0; 2857 return err;
2704} 2858}
2705 2859
2706static const struct mgmt_handler { 2860static const struct mgmt_handler {
@@ -2915,6 +3069,8 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2915 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 3069 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
2916 3070
2917 if (powered) { 3071 if (powered) {
3072 u8 link_sec;
3073
2918 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 3074 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
2919 !lmp_host_ssp_capable(hdev)) { 3075 !lmp_host_ssp_capable(hdev)) {
2920 u8 ssp = 1; 3076 u8 ssp = 1;
@@ -2938,6 +3094,11 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2938 sizeof(cp), &cp); 3094 sizeof(cp), &cp);
2939 } 3095 }
2940 3096
3097 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3098 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3099 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3100 sizeof(link_sec), &link_sec);
3101
2941 if (lmp_bredr_capable(hdev)) { 3102 if (lmp_bredr_capable(hdev)) {
2942 set_bredr_scan(hdev); 3103 set_bredr_scan(hdev);
2943 update_class(hdev); 3104 update_class(hdev);
@@ -2946,7 +3107,13 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2946 } 3107 }
2947 } else { 3108 } else {
2948 u8 status = MGMT_STATUS_NOT_POWERED; 3109 u8 status = MGMT_STATUS_NOT_POWERED;
3110 u8 zero_cod[] = { 0, 0, 0 };
3111
2949 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 3112 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3113
3114 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3115 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3116 zero_cod, sizeof(zero_cod), NULL);
2950 } 3117 }
2951 3118
2952 err = new_settings(hdev, match.sk); 3119 err = new_settings(hdev, match.sk);
diff --git a/net/bluetooth/rfcomm/Kconfig b/net/bluetooth/rfcomm/Kconfig
index 22e718b554e4..18d352ea2bc7 100644
--- a/net/bluetooth/rfcomm/Kconfig
+++ b/net/bluetooth/rfcomm/Kconfig
@@ -12,6 +12,7 @@ config BT_RFCOMM
12config BT_RFCOMM_TTY 12config BT_RFCOMM_TTY
13 bool "RFCOMM TTY support" 13 bool "RFCOMM TTY support"
14 depends on BT_RFCOMM 14 depends on BT_RFCOMM
15 depends on TTY
15 help 16 help
16 This option enables TTY emulation support for RFCOMM channels. 17 This option enables TTY emulation support for RFCOMM channels.
17 18
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 201fdf737209..b23e2713fea8 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -257,7 +257,7 @@ static void rfcomm_session_clear_timer(struct rfcomm_session *s)
257{ 257{
258 BT_DBG("session %p state %ld", s, s->state); 258 BT_DBG("session %p state %ld", s, s->state);
259 259
260 if (timer_pending(&s->timer) && del_timer(&s->timer)) 260 if (del_timer(&s->timer))
261 rfcomm_session_put(s); 261 rfcomm_session_put(s);
262} 262}
263 263
@@ -285,7 +285,7 @@ static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d)
285{ 285{
286 BT_DBG("dlc %p state %ld", d, d->state); 286 BT_DBG("dlc %p state %ld", d, d->state);
287 287
288 if (timer_pending(&d->timer) && del_timer(&d->timer)) 288 if (del_timer(&d->timer))
289 rfcomm_dlc_put(d); 289 rfcomm_dlc_put(d);
290} 290}
291 291
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index bd6fd0f43d2b..b6e44ad6cca6 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -541,23 +541,21 @@ int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
541static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) 541static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
542{ 542{
543 struct rfcomm_dev *dev = dlc->owner; 543 struct rfcomm_dev *dev = dlc->owner;
544 struct tty_struct *tty;
545 544
546 if (!dev) { 545 if (!dev) {
547 kfree_skb(skb); 546 kfree_skb(skb);
548 return; 547 return;
549 } 548 }
550 549
551 tty = dev->port.tty; 550 if (!skb_queue_empty(&dev->pending)) {
552 if (!tty || !skb_queue_empty(&dev->pending)) {
553 skb_queue_tail(&dev->pending, skb); 551 skb_queue_tail(&dev->pending, skb);
554 return; 552 return;
555 } 553 }
556 554
557 BT_DBG("dlc %p tty %p len %d", dlc, tty, skb->len); 555 BT_DBG("dlc %p len %d", dlc, skb->len);
558 556
559 tty_insert_flip_string(tty, skb->data, skb->len); 557 tty_insert_flip_string(&dev->port, skb->data, skb->len);
560 tty_flip_buffer_push(tty); 558 tty_flip_buffer_push(&dev->port);
561 559
562 kfree_skb(skb); 560 kfree_skb(skb);
563} 561}
@@ -621,26 +619,23 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
621/* ---- TTY functions ---- */ 619/* ---- TTY functions ---- */
622static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) 620static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
623{ 621{
624 struct tty_struct *tty = dev->port.tty;
625 struct sk_buff *skb; 622 struct sk_buff *skb;
626 int inserted = 0; 623 int inserted = 0;
627 624
628 if (!tty) 625 BT_DBG("dev %p", dev);
629 return;
630
631 BT_DBG("dev %p tty %p", dev, tty);
632 626
633 rfcomm_dlc_lock(dev->dlc); 627 rfcomm_dlc_lock(dev->dlc);
634 628
635 while ((skb = skb_dequeue(&dev->pending))) { 629 while ((skb = skb_dequeue(&dev->pending))) {
636 inserted += tty_insert_flip_string(tty, skb->data, skb->len); 630 inserted += tty_insert_flip_string(&dev->port, skb->data,
631 skb->len);
637 kfree_skb(skb); 632 kfree_skb(skb);
638 } 633 }
639 634
640 rfcomm_dlc_unlock(dev->dlc); 635 rfcomm_dlc_unlock(dev->dlc);
641 636
642 if (inserted > 0) 637 if (inserted > 0)
643 tty_flip_buffer_push(tty); 638 tty_flip_buffer_push(&dev->port);
644} 639}
645 640
646static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) 641static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 531a93d613d4..b5178d62064e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -352,7 +352,7 @@ static void __sco_sock_close(struct sock *sk)
352 352
353 case BT_CONNECTED: 353 case BT_CONNECTED:
354 case BT_CONFIG: 354 case BT_CONFIG:
355 if (sco_pi(sk)->conn) { 355 if (sco_pi(sk)->conn->hcon) {
356 sk->sk_state = BT_DISCONN; 356 sk->sk_state = BT_DISCONN;
357 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); 357 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
358 hci_conn_put(sco_pi(sk)->conn->hcon); 358 hci_conn_put(sco_pi(sk)->conn->hcon);
@@ -900,8 +900,6 @@ static void sco_conn_ready(struct sco_conn *conn)
900 900
901 BT_DBG("conn %p", conn); 901 BT_DBG("conn %p", conn);
902 902
903 sco_conn_lock(conn);
904
905 if (sk) { 903 if (sk) {
906 sco_sock_clear_timer(sk); 904 sco_sock_clear_timer(sk);
907 bh_lock_sock(sk); 905 bh_lock_sock(sk);
@@ -909,9 +907,13 @@ static void sco_conn_ready(struct sco_conn *conn)
909 sk->sk_state_change(sk); 907 sk->sk_state_change(sk);
910 bh_unlock_sock(sk); 908 bh_unlock_sock(sk);
911 } else { 909 } else {
910 sco_conn_lock(conn);
911
912 parent = sco_get_sock_listen(conn->src); 912 parent = sco_get_sock_listen(conn->src);
913 if (!parent) 913 if (!parent) {
914 goto done; 914 sco_conn_unlock(conn);
915 return;
916 }
915 917
916 bh_lock_sock(parent); 918 bh_lock_sock(parent);
917 919
@@ -919,7 +921,8 @@ static void sco_conn_ready(struct sco_conn *conn)
919 BTPROTO_SCO, GFP_ATOMIC); 921 BTPROTO_SCO, GFP_ATOMIC);
920 if (!sk) { 922 if (!sk) {
921 bh_unlock_sock(parent); 923 bh_unlock_sock(parent);
922 goto done; 924 sco_conn_unlock(conn);
925 return;
923 } 926 }
924 927
925 sco_sock_init(sk, parent); 928 sco_sock_init(sk, parent);
@@ -939,10 +942,9 @@ static void sco_conn_ready(struct sco_conn *conn)
939 parent->sk_data_ready(parent, 1); 942 parent->sk_data_ready(parent, 1);
940 943
941 bh_unlock_sock(parent); 944 bh_unlock_sock(parent);
942 }
943 945
944done: 946 sco_conn_unlock(conn);
945 sco_conn_unlock(conn); 947 }
946} 948}
947 949
948/* ----- SCO interface with lower layer (HCI) ----- */ 950/* ----- SCO interface with lower layer (HCI) ----- */
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 68a9587c9694..5abefb12891d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
859 859
860 skb_pull(skb, sizeof(code)); 860 skb_pull(skb, sizeof(code));
861 861
862 /*
863 * The SMP context must be initialized for all other PDUs except
864 * pairing and security requests. If we get any other PDU when
865 * not initialized simply disconnect (done if this function
866 * returns an error).
867 */
868 if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
869 !conn->smp_chan) {
870 BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
871 kfree_skb(skb);
872 return -ENOTSUPP;
873 }
874
862 switch (code) { 875 switch (code) {
863 case SMP_CMD_PAIRING_REQ: 876 case SMP_CMD_PAIRING_REQ:
864 reason = smp_cmd_pairing_req(conn, skb); 877 reason = smp_cmd_pairing_req(conn, skb);
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 6dee7bf648a9..aa0d3b2f1bb7 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -46,3 +46,17 @@ config BRIDGE_IGMP_SNOOPING
46 Say N to exclude this support and reduce the binary size. 46 Say N to exclude this support and reduce the binary size.
47 47
48 If unsure, say Y. 48 If unsure, say Y.
49
50config BRIDGE_VLAN_FILTERING
51 bool "VLAN filtering"
52 depends on BRIDGE
53 depends on VLAN_8021Q
54 default n
55 ---help---
56 If you say Y here, then the Ethernet bridge will be able selectively
57 receive and forward traffic based on VLAN information in the packet
58 any VLAN information configured on the bridge port or bridge device.
59
60 Say N to exclude this support and reduce the binary size.
61
62 If unsure, say Y.
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index e859098f5ee9..e85498b2f166 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -14,4 +14,6 @@ bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
14 14
15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o 15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
16 16
17bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
18
17obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ 19obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 7c78e2640190..d5f1d3fd4b28 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -30,6 +30,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
30 struct net_bridge_fdb_entry *dst; 30 struct net_bridge_fdb_entry *dst;
31 struct net_bridge_mdb_entry *mdst; 31 struct net_bridge_mdb_entry *mdst;
32 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); 32 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
33 u16 vid = 0;
33 34
34 rcu_read_lock(); 35 rcu_read_lock();
35#ifdef CONFIG_BRIDGE_NETFILTER 36#ifdef CONFIG_BRIDGE_NETFILTER
@@ -45,6 +46,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
45 brstats->tx_bytes += skb->len; 46 brstats->tx_bytes += skb->len;
46 u64_stats_update_end(&brstats->syncp); 47 u64_stats_update_end(&brstats->syncp);
47 48
49 if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
50 goto out;
51
48 BR_INPUT_SKB_CB(skb)->brdev = dev; 52 BR_INPUT_SKB_CB(skb)->brdev = dev;
49 53
50 skb_reset_mac_header(skb); 54 skb_reset_mac_header(skb);
@@ -67,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
67 br_multicast_deliver(mdst, skb); 71 br_multicast_deliver(mdst, skb);
68 else 72 else
69 br_flood_deliver(br, skb); 73 br_flood_deliver(br, skb);
70 } else if ((dst = __br_fdb_get(br, dest)) != NULL) 74 } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL)
71 br_deliver(dst->dst, skb); 75 br_deliver(dst->dst, skb);
72 else 76 else
73 br_flood_deliver(br, skb); 77 br_flood_deliver(br, skb);
@@ -172,12 +176,10 @@ static int br_set_mac_address(struct net_device *dev, void *p)
172 176
173 spin_lock_bh(&br->lock); 177 spin_lock_bh(&br->lock);
174 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { 178 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
175 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
176 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 179 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
177 br_fdb_change_mac_address(br, addr->sa_data); 180 br_fdb_change_mac_address(br, addr->sa_data);
178 br_stp_change_bridge_id(br, addr->sa_data); 181 br_stp_change_bridge_id(br, addr->sa_data);
179 } 182 }
180 br->flags |= BR_SET_MAC_ADDR;
181 spin_unlock_bh(&br->lock); 183 spin_unlock_bh(&br->lock);
182 184
183 return 0; 185 return 0;
@@ -185,10 +187,10 @@ static int br_set_mac_address(struct net_device *dev, void *p)
185 187
186static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) 188static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
187{ 189{
188 strcpy(info->driver, "bridge"); 190 strlcpy(info->driver, "bridge", sizeof(info->driver));
189 strcpy(info->version, BR_VERSION); 191 strlcpy(info->version, BR_VERSION, sizeof(info->version));
190 strcpy(info->fw_version, "N/A"); 192 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
191 strcpy(info->bus_info, "N/A"); 193 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
192} 194}
193 195
194static netdev_features_t br_fix_features(struct net_device *dev, 196static netdev_features_t br_fix_features(struct net_device *dev,
@@ -267,7 +269,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
267 269
268 p->np = NULL; 270 p->np = NULL;
269 271
270 __netpoll_free_rcu(np); 272 __netpoll_free_async(np);
271} 273}
272 274
273#endif 275#endif
@@ -315,6 +317,7 @@ static const struct net_device_ops br_netdev_ops = {
315 .ndo_fdb_dump = br_fdb_dump, 317 .ndo_fdb_dump = br_fdb_dump,
316 .ndo_bridge_getlink = br_getlink, 318 .ndo_bridge_getlink = br_getlink,
317 .ndo_bridge_setlink = br_setlink, 319 .ndo_bridge_setlink = br_setlink,
320 .ndo_bridge_dellink = br_dellink,
318}; 321};
319 322
320static void br_dev_free(struct net_device *dev) 323static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index d9576e6de2b8..8117900af4de 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -23,11 +23,12 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/atomic.h> 24#include <linux/atomic.h>
25#include <asm/unaligned.h> 25#include <asm/unaligned.h>
26#include <linux/if_vlan.h>
26#include "br_private.h" 27#include "br_private.h"
27 28
28static struct kmem_cache *br_fdb_cache __read_mostly; 29static struct kmem_cache *br_fdb_cache __read_mostly;
29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 30static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
30 const unsigned char *addr); 31 const unsigned char *addr, u16 vid);
31static void fdb_notify(struct net_bridge *br, 32static void fdb_notify(struct net_bridge *br,
32 const struct net_bridge_fdb_entry *, int); 33 const struct net_bridge_fdb_entry *, int);
33 34
@@ -67,11 +68,11 @@ static inline int has_expired(const struct net_bridge *br,
67 time_before_eq(fdb->updated + hold_time(br), jiffies); 68 time_before_eq(fdb->updated + hold_time(br), jiffies);
68} 69}
69 70
70static inline int br_mac_hash(const unsigned char *mac) 71static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
71{ 72{
72 /* use 1 byte of OUI cnd 3 bytes of NIC */ 73 /* use 1 byte of OUI and 3 bytes of NIC */
73 u32 key = get_unaligned((u32 *)(mac + 2)); 74 u32 key = get_unaligned((u32 *)(mac + 2));
74 return jhash_1word(key, fdb_salt) & (BR_HASH_SIZE - 1); 75 return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
75} 76}
76 77
77static void fdb_rcu_free(struct rcu_head *head) 78static void fdb_rcu_free(struct rcu_head *head)
@@ -91,6 +92,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
91void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) 92void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
92{ 93{
93 struct net_bridge *br = p->br; 94 struct net_bridge *br = p->br;
95 bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false;
94 int i; 96 int i;
95 97
96 spin_lock_bh(&br->hash_lock); 98 spin_lock_bh(&br->hash_lock);
@@ -105,10 +107,12 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
105 if (f->dst == p && f->is_local) { 107 if (f->dst == p && f->is_local) {
106 /* maybe another port has same hw addr? */ 108 /* maybe another port has same hw addr? */
107 struct net_bridge_port *op; 109 struct net_bridge_port *op;
110 u16 vid = f->vlan_id;
108 list_for_each_entry(op, &br->port_list, list) { 111 list_for_each_entry(op, &br->port_list, list) {
109 if (op != p && 112 if (op != p &&
110 ether_addr_equal(op->dev->dev_addr, 113 ether_addr_equal(op->dev->dev_addr,
111 f->addr.addr)) { 114 f->addr.addr) &&
115 nbp_vlan_find(op, vid)) {
112 f->dst = op; 116 f->dst = op;
113 goto insert; 117 goto insert;
114 } 118 }
@@ -116,27 +120,55 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
116 120
117 /* delete old one */ 121 /* delete old one */
118 fdb_delete(br, f); 122 fdb_delete(br, f);
119 goto insert; 123insert:
124 /* insert new address, may fail if invalid
125 * address or dup.
126 */
127 fdb_insert(br, p, newaddr, vid);
128
129 /* if this port has no vlan information
130 * configured, we can safely be done at
131 * this point.
132 */
133 if (no_vlan)
134 goto done;
120 } 135 }
121 } 136 }
122 } 137 }
123 insert:
124 /* insert new address, may fail if invalid address or dup. */
125 fdb_insert(br, p, newaddr);
126 138
139done:
127 spin_unlock_bh(&br->hash_lock); 140 spin_unlock_bh(&br->hash_lock);
128} 141}
129 142
130void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) 143void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
131{ 144{
132 struct net_bridge_fdb_entry *f; 145 struct net_bridge_fdb_entry *f;
146 struct net_port_vlans *pv;
147 u16 vid = 0;
133 148
134 /* If old entry was unassociated with any port, then delete it. */ 149 /* If old entry was unassociated with any port, then delete it. */
135 f = __br_fdb_get(br, br->dev->dev_addr); 150 f = __br_fdb_get(br, br->dev->dev_addr, 0);
136 if (f && f->is_local && !f->dst) 151 if (f && f->is_local && !f->dst)
137 fdb_delete(br, f); 152 fdb_delete(br, f);
138 153
139 fdb_insert(br, NULL, newaddr); 154 fdb_insert(br, NULL, newaddr, 0);
155
156 /* Now remove and add entries for every VLAN configured on the
157 * bridge. This function runs under RTNL so the bitmap will not
158 * change from under us.
159 */
160 pv = br_get_vlan_info(br);
161 if (!pv)
162 return;
163
164 for (vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid);
165 vid < BR_VLAN_BITMAP_LEN;
166 vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid+1)) {
167 f = __br_fdb_get(br, br->dev->dev_addr, vid);
168 if (f && f->is_local && !f->dst)
169 fdb_delete(br, f);
170 fdb_insert(br, NULL, newaddr, vid);
171 }
140} 172}
141 173
142void br_fdb_cleanup(unsigned long _data) 174void br_fdb_cleanup(unsigned long _data)
@@ -231,13 +263,16 @@ void br_fdb_delete_by_port(struct net_bridge *br,
231 263
232/* No locking or refcounting, assumes caller has rcu_read_lock */ 264/* No locking or refcounting, assumes caller has rcu_read_lock */
233struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, 265struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
234 const unsigned char *addr) 266 const unsigned char *addr,
267 __u16 vid)
235{ 268{
236 struct hlist_node *h; 269 struct hlist_node *h;
237 struct net_bridge_fdb_entry *fdb; 270 struct net_bridge_fdb_entry *fdb;
238 271
239 hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) { 272 hlist_for_each_entry_rcu(fdb, h,
240 if (ether_addr_equal(fdb->addr.addr, addr)) { 273 &br->hash[br_mac_hash(addr, vid)], hlist) {
274 if (ether_addr_equal(fdb->addr.addr, addr) &&
275 fdb->vlan_id == vid) {
241 if (unlikely(has_expired(br, fdb))) 276 if (unlikely(has_expired(br, fdb)))
242 break; 277 break;
243 return fdb; 278 return fdb;
@@ -261,7 +296,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
261 if (!port) 296 if (!port)
262 ret = 0; 297 ret = 0;
263 else { 298 else {
264 fdb = __br_fdb_get(port->br, addr); 299 fdb = __br_fdb_get(port->br, addr, 0);
265 ret = fdb && fdb->dst && fdb->dst->dev != dev && 300 ret = fdb && fdb->dst && fdb->dst->dev != dev &&
266 fdb->dst->state == BR_STATE_FORWARDING; 301 fdb->dst->state == BR_STATE_FORWARDING;
267 } 302 }
@@ -325,26 +360,30 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
325} 360}
326 361
327static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, 362static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
328 const unsigned char *addr) 363 const unsigned char *addr,
364 __u16 vid)
329{ 365{
330 struct hlist_node *h; 366 struct hlist_node *h;
331 struct net_bridge_fdb_entry *fdb; 367 struct net_bridge_fdb_entry *fdb;
332 368
333 hlist_for_each_entry(fdb, h, head, hlist) { 369 hlist_for_each_entry(fdb, h, head, hlist) {
334 if (ether_addr_equal(fdb->addr.addr, addr)) 370 if (ether_addr_equal(fdb->addr.addr, addr) &&
371 fdb->vlan_id == vid)
335 return fdb; 372 return fdb;
336 } 373 }
337 return NULL; 374 return NULL;
338} 375}
339 376
340static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, 377static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
341 const unsigned char *addr) 378 const unsigned char *addr,
379 __u16 vid)
342{ 380{
343 struct hlist_node *h; 381 struct hlist_node *h;
344 struct net_bridge_fdb_entry *fdb; 382 struct net_bridge_fdb_entry *fdb;
345 383
346 hlist_for_each_entry_rcu(fdb, h, head, hlist) { 384 hlist_for_each_entry_rcu(fdb, h, head, hlist) {
347 if (ether_addr_equal(fdb->addr.addr, addr)) 385 if (ether_addr_equal(fdb->addr.addr, addr) &&
386 fdb->vlan_id == vid)
348 return fdb; 387 return fdb;
349 } 388 }
350 return NULL; 389 return NULL;
@@ -352,7 +391,8 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
352 391
353static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, 392static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
354 struct net_bridge_port *source, 393 struct net_bridge_port *source,
355 const unsigned char *addr) 394 const unsigned char *addr,
395 __u16 vid)
356{ 396{
357 struct net_bridge_fdb_entry *fdb; 397 struct net_bridge_fdb_entry *fdb;
358 398
@@ -360,6 +400,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
360 if (fdb) { 400 if (fdb) {
361 memcpy(fdb->addr.addr, addr, ETH_ALEN); 401 memcpy(fdb->addr.addr, addr, ETH_ALEN);
362 fdb->dst = source; 402 fdb->dst = source;
403 fdb->vlan_id = vid;
363 fdb->is_local = 0; 404 fdb->is_local = 0;
364 fdb->is_static = 0; 405 fdb->is_static = 0;
365 fdb->updated = fdb->used = jiffies; 406 fdb->updated = fdb->used = jiffies;
@@ -369,15 +410,15 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
369} 410}
370 411
371static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 412static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
372 const unsigned char *addr) 413 const unsigned char *addr, u16 vid)
373{ 414{
374 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 415 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
375 struct net_bridge_fdb_entry *fdb; 416 struct net_bridge_fdb_entry *fdb;
376 417
377 if (!is_valid_ether_addr(addr)) 418 if (!is_valid_ether_addr(addr))
378 return -EINVAL; 419 return -EINVAL;
379 420
380 fdb = fdb_find(head, addr); 421 fdb = fdb_find(head, addr, vid);
381 if (fdb) { 422 if (fdb) {
382 /* it is okay to have multiple ports with same 423 /* it is okay to have multiple ports with same
383 * address, just use the first one. 424 * address, just use the first one.
@@ -390,7 +431,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
390 fdb_delete(br, fdb); 431 fdb_delete(br, fdb);
391 } 432 }
392 433
393 fdb = fdb_create(head, source, addr); 434 fdb = fdb_create(head, source, addr, vid);
394 if (!fdb) 435 if (!fdb)
395 return -ENOMEM; 436 return -ENOMEM;
396 437
@@ -401,20 +442,20 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
401 442
402/* Add entry for local address of interface */ 443/* Add entry for local address of interface */
403int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 444int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
404 const unsigned char *addr) 445 const unsigned char *addr, u16 vid)
405{ 446{
406 int ret; 447 int ret;
407 448
408 spin_lock_bh(&br->hash_lock); 449 spin_lock_bh(&br->hash_lock);
409 ret = fdb_insert(br, source, addr); 450 ret = fdb_insert(br, source, addr, vid);
410 spin_unlock_bh(&br->hash_lock); 451 spin_unlock_bh(&br->hash_lock);
411 return ret; 452 return ret;
412} 453}
413 454
414void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, 455void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
415 const unsigned char *addr) 456 const unsigned char *addr, u16 vid)
416{ 457{
417 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 458 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
418 struct net_bridge_fdb_entry *fdb; 459 struct net_bridge_fdb_entry *fdb;
419 460
420 /* some users want to always flood. */ 461 /* some users want to always flood. */
@@ -426,7 +467,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
426 source->state == BR_STATE_FORWARDING)) 467 source->state == BR_STATE_FORWARDING))
427 return; 468 return;
428 469
429 fdb = fdb_find_rcu(head, addr); 470 fdb = fdb_find_rcu(head, addr, vid);
430 if (likely(fdb)) { 471 if (likely(fdb)) {
431 /* attempt to update an entry for a local interface */ 472 /* attempt to update an entry for a local interface */
432 if (unlikely(fdb->is_local)) { 473 if (unlikely(fdb->is_local)) {
@@ -441,8 +482,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
441 } 482 }
442 } else { 483 } else {
443 spin_lock(&br->hash_lock); 484 spin_lock(&br->hash_lock);
444 if (likely(!fdb_find(head, addr))) { 485 if (likely(!fdb_find(head, addr, vid))) {
445 fdb = fdb_create(head, source, addr); 486 fdb = fdb_create(head, source, addr, vid);
446 if (fdb) 487 if (fdb)
447 fdb_notify(br, fdb, RTM_NEWNEIGH); 488 fdb_notify(br, fdb, RTM_NEWNEIGH);
448 } 489 }
@@ -495,6 +536,10 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
495 ci.ndm_refcnt = 0; 536 ci.ndm_refcnt = 0;
496 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 537 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
497 goto nla_put_failure; 538 goto nla_put_failure;
539
540 if (nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
541 goto nla_put_failure;
542
498 return nlmsg_end(skb, nlh); 543 return nlmsg_end(skb, nlh);
499 544
500nla_put_failure: 545nla_put_failure:
@@ -506,6 +551,7 @@ static inline size_t fdb_nlmsg_size(void)
506{ 551{
507 return NLMSG_ALIGN(sizeof(struct ndmsg)) 552 return NLMSG_ALIGN(sizeof(struct ndmsg))
508 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 553 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
554 + nla_total_size(sizeof(u16)) /* NDA_VLAN */
509 + nla_total_size(sizeof(struct nda_cacheinfo)); 555 + nla_total_size(sizeof(struct nda_cacheinfo));
510} 556}
511 557
@@ -571,18 +617,18 @@ out:
571 617
572/* Update (create or replace) forwarding database entry */ 618/* Update (create or replace) forwarding database entry */
573static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, 619static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
574 __u16 state, __u16 flags) 620 __u16 state, __u16 flags, __u16 vid)
575{ 621{
576 struct net_bridge *br = source->br; 622 struct net_bridge *br = source->br;
577 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 623 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
578 struct net_bridge_fdb_entry *fdb; 624 struct net_bridge_fdb_entry *fdb;
579 625
580 fdb = fdb_find(head, addr); 626 fdb = fdb_find(head, addr, vid);
581 if (fdb == NULL) { 627 if (fdb == NULL) {
582 if (!(flags & NLM_F_CREATE)) 628 if (!(flags & NLM_F_CREATE))
583 return -ENOENT; 629 return -ENOENT;
584 630
585 fdb = fdb_create(head, source, addr); 631 fdb = fdb_create(head, source, addr, vid);
586 if (!fdb) 632 if (!fdb)
587 return -ENOMEM; 633 return -ENOMEM;
588 fdb_notify(br, fdb, RTM_NEWNEIGH); 634 fdb_notify(br, fdb, RTM_NEWNEIGH);
@@ -607,6 +653,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
607 return 0; 653 return 0;
608} 654}
609 655
656static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
657 const unsigned char *addr, u16 nlh_flags, u16 vid)
658{
659 int err = 0;
660
661 if (ndm->ndm_flags & NTF_USE) {
662 rcu_read_lock();
663 br_fdb_update(p->br, p, addr, vid);
664 rcu_read_unlock();
665 } else {
666 spin_lock_bh(&p->br->hash_lock);
667 err = fdb_add_entry(p, addr, ndm->ndm_state,
668 nlh_flags, vid);
669 spin_unlock_bh(&p->br->hash_lock);
670 }
671
672 return err;
673}
674
610/* Add new permanent fdb entry with RTM_NEWNEIGH */ 675/* Add new permanent fdb entry with RTM_NEWNEIGH */
611int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 676int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
612 struct net_device *dev, 677 struct net_device *dev,
@@ -614,12 +679,29 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
614{ 679{
615 struct net_bridge_port *p; 680 struct net_bridge_port *p;
616 int err = 0; 681 int err = 0;
682 struct net_port_vlans *pv;
683 unsigned short vid = VLAN_N_VID;
617 684
618 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { 685 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
619 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); 686 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
620 return -EINVAL; 687 return -EINVAL;
621 } 688 }
622 689
690 if (tb[NDA_VLAN]) {
691 if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
692 pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
693 return -EINVAL;
694 }
695
696 vid = nla_get_u16(tb[NDA_VLAN]);
697
698 if (vid >= VLAN_N_VID) {
699 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
700 vid);
701 return -EINVAL;
702 }
703 }
704
623 p = br_port_get_rtnl(dev); 705 p = br_port_get_rtnl(dev);
624 if (p == NULL) { 706 if (p == NULL) {
625 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n", 707 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
@@ -627,40 +709,90 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
627 return -EINVAL; 709 return -EINVAL;
628 } 710 }
629 711
630 if (ndm->ndm_flags & NTF_USE) { 712 pv = nbp_get_vlan_info(p);
631 rcu_read_lock(); 713 if (vid != VLAN_N_VID) {
632 br_fdb_update(p->br, p, addr); 714 if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
633 rcu_read_unlock(); 715 pr_info("bridge: RTM_NEWNEIGH with unconfigured "
716 "vlan %d on port %s\n", vid, dev->name);
717 return -EINVAL;
718 }
719
720 /* VID was specified, so use it. */
721 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
634 } else { 722 } else {
635 spin_lock_bh(&p->br->hash_lock); 723 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
636 err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags); 724 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
637 spin_unlock_bh(&p->br->hash_lock); 725 goto out;
726 }
727
728 /* We have vlans configured on this port and user didn't
729 * specify a VLAN. To be nice, add/update entry for every
730 * vlan on this port.
731 */
732 vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
733 while (vid < BR_VLAN_BITMAP_LEN) {
734 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
735 if (err)
736 goto out;
737 vid = find_next_bit(pv->vlan_bitmap,
738 BR_VLAN_BITMAP_LEN, vid+1);
739 }
638 } 740 }
639 741
742out:
640 return err; 743 return err;
641} 744}
642 745
643static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) 746int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
747 u16 vlan)
644{ 748{
645 struct net_bridge *br = p->br; 749 struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
646 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
647 struct net_bridge_fdb_entry *fdb; 750 struct net_bridge_fdb_entry *fdb;
648 751
649 fdb = fdb_find(head, addr); 752 fdb = fdb_find(head, addr, vlan);
650 if (!fdb) 753 if (!fdb)
651 return -ENOENT; 754 return -ENOENT;
652 755
653 fdb_delete(p->br, fdb); 756 fdb_delete(br, fdb);
654 return 0; 757 return 0;
655} 758}
656 759
760static int __br_fdb_delete(struct net_bridge_port *p,
761 const unsigned char *addr, u16 vid)
762{
763 int err;
764
765 spin_lock_bh(&p->br->hash_lock);
766 err = fdb_delete_by_addr(p->br, addr, vid);
767 spin_unlock_bh(&p->br->hash_lock);
768
769 return err;
770}
771
657/* Remove neighbor entry with RTM_DELNEIGH */ 772/* Remove neighbor entry with RTM_DELNEIGH */
658int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev, 773int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
774 struct net_device *dev,
659 const unsigned char *addr) 775 const unsigned char *addr)
660{ 776{
661 struct net_bridge_port *p; 777 struct net_bridge_port *p;
662 int err; 778 int err;
779 struct net_port_vlans *pv;
780 unsigned short vid = VLAN_N_VID;
781
782 if (tb[NDA_VLAN]) {
783 if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
784 pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
785 return -EINVAL;
786 }
787
788 vid = nla_get_u16(tb[NDA_VLAN]);
663 789
790 if (vid >= VLAN_N_VID) {
791 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
792 vid);
793 return -EINVAL;
794 }
795 }
664 p = br_port_get_rtnl(dev); 796 p = br_port_get_rtnl(dev);
665 if (p == NULL) { 797 if (p == NULL) {
666 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", 798 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
@@ -668,9 +800,33 @@ int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
668 return -EINVAL; 800 return -EINVAL;
669 } 801 }
670 802
671 spin_lock_bh(&p->br->hash_lock); 803 pv = nbp_get_vlan_info(p);
672 err = fdb_delete_by_addr(p, addr); 804 if (vid != VLAN_N_VID) {
673 spin_unlock_bh(&p->br->hash_lock); 805 if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
806 pr_info("bridge: RTM_DELNEIGH with unconfigured "
807 "vlan %d on port %s\n", vid, dev->name);
808 return -EINVAL;
809 }
810
811 err = __br_fdb_delete(p, addr, vid);
812 } else {
813 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
814 err = __br_fdb_delete(p, addr, 0);
815 goto out;
816 }
674 817
818 /* We have vlans configured on this port and user didn't
819 * specify a VLAN. To be nice, add/update entry for every
820 * vlan on this port.
821 */
822 err = -ENOENT;
823 vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
824 while (vid < BR_VLAN_BITMAP_LEN) {
825 err &= __br_fdb_delete(p, addr, vid);
826 vid = find_next_bit(pv->vlan_bitmap,
827 BR_VLAN_BITMAP_LEN, vid+1);
828 }
829 }
830out:
675 return err; 831 return err;
676} 832}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 02015a505d2a..092b20e4ee4c 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -31,6 +31,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
31 const struct sk_buff *skb) 31 const struct sk_buff *skb)
32{ 32{
33 return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && 33 return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
34 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
34 p->state == BR_STATE_FORWARDING); 35 p->state == BR_STATE_FORWARDING);
35} 36}
36 37
@@ -63,6 +64,10 @@ int br_forward_finish(struct sk_buff *skb)
63 64
64static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 65static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
65{ 66{
67 skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
68 if (!skb)
69 return;
70
66 skb->dev = to->dev; 71 skb->dev = to->dev;
67 72
68 if (unlikely(netpoll_tx_running(to->br->dev))) { 73 if (unlikely(netpoll_tx_running(to->br->dev))) {
@@ -88,6 +93,10 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
88 return; 93 return;
89 } 94 }
90 95
96 skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
97 if (!skb)
98 return;
99
91 indev = skb->dev; 100 indev = skb->dev;
92 skb->dev = to->dev; 101 skb->dev = to->dev;
93 skb_forward_csum(skb); 102 skb_forward_csum(skb);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 1c8fdc3558cd..ef1b91431c6b 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -23,6 +23,7 @@
23#include <linux/if_ether.h> 23#include <linux/if_ether.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <net/sock.h> 25#include <net/sock.h>
26#include <linux/if_vlan.h>
26 27
27#include "br_private.h" 28#include "br_private.h"
28 29
@@ -66,14 +67,14 @@ void br_port_carrier_check(struct net_bridge_port *p)
66 struct net_device *dev = p->dev; 67 struct net_device *dev = p->dev;
67 struct net_bridge *br = p->br; 68 struct net_bridge *br = p->br;
68 69
69 if (netif_running(dev) && netif_carrier_ok(dev)) 70 if (netif_running(dev) && netif_oper_up(dev))
70 p->path_cost = port_cost(dev); 71 p->path_cost = port_cost(dev);
71 72
72 if (!netif_running(br->dev)) 73 if (!netif_running(br->dev))
73 return; 74 return;
74 75
75 spin_lock_bh(&br->lock); 76 spin_lock_bh(&br->lock);
76 if (netif_running(dev) && netif_carrier_ok(dev)) { 77 if (netif_running(dev) && netif_oper_up(dev)) {
77 if (p->state == BR_STATE_DISABLED) 78 if (p->state == BR_STATE_DISABLED)
78 br_stp_enable_port(p); 79 br_stp_enable_port(p);
79 } else { 80 } else {
@@ -139,6 +140,7 @@ static void del_nbp(struct net_bridge_port *p)
139 140
140 br_ifinfo_notify(RTM_DELLINK, p); 141 br_ifinfo_notify(RTM_DELLINK, p);
141 142
143 nbp_vlan_flush(p);
142 br_fdb_delete_by_port(br, p, 1); 144 br_fdb_delete_by_port(br, p, 1);
143 145
144 list_del_rcu(&p->list); 146 list_del_rcu(&p->list);
@@ -148,7 +150,7 @@ static void del_nbp(struct net_bridge_port *p)
148 netdev_rx_handler_unregister(dev); 150 netdev_rx_handler_unregister(dev);
149 synchronize_net(); 151 synchronize_net();
150 152
151 netdev_set_master(dev, NULL); 153 netdev_upper_dev_unlink(dev, br->dev);
152 154
153 br_multicast_del_port(p); 155 br_multicast_del_port(p);
154 156
@@ -364,13 +366,13 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
364 if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL)))) 366 if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
365 goto err3; 367 goto err3;
366 368
367 err = netdev_set_master(dev, br->dev); 369 err = netdev_master_upper_dev_link(dev, br->dev);
368 if (err) 370 if (err)
369 goto err3; 371 goto err4;
370 372
371 err = netdev_rx_handler_register(dev, br_handle_frame, p); 373 err = netdev_rx_handler_register(dev, br_handle_frame, p);
372 if (err) 374 if (err)
373 goto err4; 375 goto err5;
374 376
375 dev->priv_flags |= IFF_BRIDGE_PORT; 377 dev->priv_flags |= IFF_BRIDGE_PORT;
376 378
@@ -383,7 +385,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
383 spin_lock_bh(&br->lock); 385 spin_lock_bh(&br->lock);
384 changed_addr = br_stp_recalculate_bridge_id(br); 386 changed_addr = br_stp_recalculate_bridge_id(br);
385 387
386 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && 388 if (netif_running(dev) && netif_oper_up(dev) &&
387 (br->dev->flags & IFF_UP)) 389 (br->dev->flags & IFF_UP))
388 br_stp_enable_port(p); 390 br_stp_enable_port(p);
389 spin_unlock_bh(&br->lock); 391 spin_unlock_bh(&br->lock);
@@ -395,15 +397,17 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
395 397
396 dev_set_mtu(br->dev, br_min_mtu(br)); 398 dev_set_mtu(br->dev, br_min_mtu(br));
397 399
398 if (br_fdb_insert(br, p, dev->dev_addr)) 400 if (br_fdb_insert(br, p, dev->dev_addr, 0))
399 netdev_err(dev, "failed insert local address bridge forwarding table\n"); 401 netdev_err(dev, "failed insert local address bridge forwarding table\n");
400 402
401 kobject_uevent(&p->kobj, KOBJ_ADD); 403 kobject_uevent(&p->kobj, KOBJ_ADD);
402 404
403 return 0; 405 return 0;
404 406
407err5:
408 netdev_upper_dev_unlink(dev, br->dev);
405err4: 409err4:
406 netdev_set_master(dev, NULL); 410 br_netpoll_disable(p);
407err3: 411err3:
408 sysfs_remove_link(br->ifobj, p->dev->name); 412 sysfs_remove_link(br->ifobj, p->dev->name);
409err2: 413err2:
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 4b34207419b1..480330151898 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -17,6 +17,7 @@
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/netfilter_bridge.h> 18#include <linux/netfilter_bridge.h>
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/rculist.h>
20#include "br_private.h" 21#include "br_private.h"
21 22
22/* Hook for brouter */ 23/* Hook for brouter */
@@ -34,6 +35,20 @@ static int br_pass_frame_up(struct sk_buff *skb)
34 brstats->rx_bytes += skb->len; 35 brstats->rx_bytes += skb->len;
35 u64_stats_update_end(&brstats->syncp); 36 u64_stats_update_end(&brstats->syncp);
36 37
38 /* Bridge is just like any other port. Make sure the
39 * packet is allowed except in promisc modue when someone
40 * may be running packet capture.
41 */
42 if (!(brdev->flags & IFF_PROMISC) &&
43 !br_allowed_egress(br, br_get_vlan_info(br), skb)) {
44 kfree_skb(skb);
45 return NET_RX_DROP;
46 }
47
48 skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
49 if (!skb)
50 return NET_RX_DROP;
51
37 indev = skb->dev; 52 indev = skb->dev;
38 skb->dev = brdev; 53 skb->dev = brdev;
39 54
@@ -50,13 +65,17 @@ int br_handle_frame_finish(struct sk_buff *skb)
50 struct net_bridge_fdb_entry *dst; 65 struct net_bridge_fdb_entry *dst;
51 struct net_bridge_mdb_entry *mdst; 66 struct net_bridge_mdb_entry *mdst;
52 struct sk_buff *skb2; 67 struct sk_buff *skb2;
68 u16 vid = 0;
53 69
54 if (!p || p->state == BR_STATE_DISABLED) 70 if (!p || p->state == BR_STATE_DISABLED)
55 goto drop; 71 goto drop;
56 72
73 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
74 goto drop;
75
57 /* insert into forwarding database after filtering to avoid spoofing */ 76 /* insert into forwarding database after filtering to avoid spoofing */
58 br = p->br; 77 br = p->br;
59 br_fdb_update(br, p, eth_hdr(skb)->h_source); 78 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
60 79
61 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && 80 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
62 br_multicast_rcv(br, p, skb)) 81 br_multicast_rcv(br, p, skb))
@@ -91,7 +110,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
91 skb2 = skb; 110 skb2 = skb;
92 111
93 br->dev->stats.multicast++; 112 br->dev->stats.multicast++;
94 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { 113 } else if ((dst = __br_fdb_get(br, dest, vid)) &&
114 dst->is_local) {
95 skb2 = skb; 115 skb2 = skb;
96 /* Do not forward the packet since it's local. */ 116 /* Do not forward the packet since it's local. */
97 skb = NULL; 117 skb = NULL;
@@ -119,8 +139,10 @@ drop:
119static int br_handle_local_finish(struct sk_buff *skb) 139static int br_handle_local_finish(struct sk_buff *skb)
120{ 140{
121 struct net_bridge_port *p = br_port_get_rcu(skb->dev); 141 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
142 u16 vid = 0;
122 143
123 br_fdb_update(p->br, p, eth_hdr(skb)->h_source); 144 br_vlan_get_tag(skb, &vid);
145 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
124 return 0; /* process further */ 146 return 0; /* process further */
125} 147}
126 148
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6f0a2eebcb27..38991e03646d 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -83,9 +83,12 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
83 if (port) { 83 if (port) {
84 struct br_mdb_entry e; 84 struct br_mdb_entry e;
85 e.ifindex = port->dev->ifindex; 85 e.ifindex = port->dev->ifindex;
86 e.addr.u.ip4 = p->addr.u.ip4; 86 e.state = p->state;
87 if (p->addr.proto == htons(ETH_P_IP))
88 e.addr.u.ip4 = p->addr.u.ip4;
87#if IS_ENABLED(CONFIG_IPV6) 89#if IS_ENABLED(CONFIG_IPV6)
88 e.addr.u.ip6 = p->addr.u.ip6; 90 if (p->addr.proto == htons(ETH_P_IPV6))
91 e.addr.u.ip6 = p->addr.u.ip6;
89#endif 92#endif
90 e.addr.proto = p->addr.proto; 93 e.addr.proto = p->addr.proto;
91 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) { 94 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
@@ -253,6 +256,8 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
253#endif 256#endif
254 } else 257 } else
255 return false; 258 return false;
259 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
260 return false;
256 261
257 return true; 262 return true;
258} 263}
@@ -267,9 +272,6 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
267 struct net_device *dev; 272 struct net_device *dev;
268 int err; 273 int err;
269 274
270 if (!capable(CAP_NET_ADMIN))
271 return -EPERM;
272
273 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL); 275 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL);
274 if (err < 0) 276 if (err < 0)
275 return err; 277 return err;
@@ -310,7 +312,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
310} 312}
311 313
312static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 314static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
313 struct br_ip *group) 315 struct br_ip *group, unsigned char state)
314{ 316{
315 struct net_bridge_mdb_entry *mp; 317 struct net_bridge_mdb_entry *mp;
316 struct net_bridge_port_group *p; 318 struct net_bridge_port_group *p;
@@ -336,7 +338,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
336 break; 338 break;
337 } 339 }
338 340
339 p = br_multicast_new_port_group(port, group, *pp); 341 p = br_multicast_new_port_group(port, group, *pp, state);
340 if (unlikely(!p)) 342 if (unlikely(!p))
341 return -ENOMEM; 343 return -ENOMEM;
342 rcu_assign_pointer(*pp, p); 344 rcu_assign_pointer(*pp, p);
@@ -373,7 +375,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
373#endif 375#endif
374 376
375 spin_lock_bh(&br->multicast_lock); 377 spin_lock_bh(&br->multicast_lock);
376 ret = br_mdb_add_group(br, p, &ip); 378 ret = br_mdb_add_group(br, p, &ip, entry->state);
377 spin_unlock_bh(&br->multicast_lock); 379 spin_unlock_bh(&br->multicast_lock);
378 return ret; 380 return ret;
379} 381}
@@ -479,3 +481,10 @@ void br_mdb_init(void)
479 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL); 481 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
480 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL); 482 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
481} 483}
484
485void br_mdb_uninit(void)
486{
487 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
488 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
489 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
490}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 1093c89095d8..7d886b0a8b7b 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -39,6 +39,8 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
39{ 39{
40 if (a->proto != b->proto) 40 if (a->proto != b->proto)
41 return 0; 41 return 0;
42 if (a->vid != b->vid)
43 return 0;
42 switch (a->proto) { 44 switch (a->proto) {
43 case htons(ETH_P_IP): 45 case htons(ETH_P_IP):
44 return a->u.ip4 == b->u.ip4; 46 return a->u.ip4 == b->u.ip4;
@@ -50,16 +52,19 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
50 return 0; 52 return 0;
51} 53}
52 54
53static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) 55static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
56 __u16 vid)
54{ 57{
55 return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); 58 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
56} 59}
57 60
58#if IS_ENABLED(CONFIG_IPV6) 61#if IS_ENABLED(CONFIG_IPV6)
59static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 62static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
60 const struct in6_addr *ip) 63 const struct in6_addr *ip,
64 __u16 vid)
61{ 65{
62 return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); 66 return jhash_2words(ipv6_addr_hash(ip), vid,
67 mdb->secret) & (mdb->max - 1);
63} 68}
64#endif 69#endif
65 70
@@ -68,10 +73,10 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
68{ 73{
69 switch (ip->proto) { 74 switch (ip->proto) {
70 case htons(ETH_P_IP): 75 case htons(ETH_P_IP):
71 return __br_ip4_hash(mdb, ip->u.ip4); 76 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
72#if IS_ENABLED(CONFIG_IPV6) 77#if IS_ENABLED(CONFIG_IPV6)
73 case htons(ETH_P_IPV6): 78 case htons(ETH_P_IPV6):
74 return __br_ip6_hash(mdb, &ip->u.ip6); 79 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
75#endif 80#endif
76 } 81 }
77 return 0; 82 return 0;
@@ -101,24 +106,27 @@ struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
101} 106}
102 107
103static struct net_bridge_mdb_entry *br_mdb_ip4_get( 108static struct net_bridge_mdb_entry *br_mdb_ip4_get(
104 struct net_bridge_mdb_htable *mdb, __be32 dst) 109 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
105{ 110{
106 struct br_ip br_dst; 111 struct br_ip br_dst;
107 112
108 br_dst.u.ip4 = dst; 113 br_dst.u.ip4 = dst;
109 br_dst.proto = htons(ETH_P_IP); 114 br_dst.proto = htons(ETH_P_IP);
115 br_dst.vid = vid;
110 116
111 return br_mdb_ip_get(mdb, &br_dst); 117 return br_mdb_ip_get(mdb, &br_dst);
112} 118}
113 119
114#if IS_ENABLED(CONFIG_IPV6) 120#if IS_ENABLED(CONFIG_IPV6)
115static struct net_bridge_mdb_entry *br_mdb_ip6_get( 121static struct net_bridge_mdb_entry *br_mdb_ip6_get(
116 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) 122 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
123 __u16 vid)
117{ 124{
118 struct br_ip br_dst; 125 struct br_ip br_dst;
119 126
120 br_dst.u.ip6 = *dst; 127 br_dst.u.ip6 = *dst;
121 br_dst.proto = htons(ETH_P_IPV6); 128 br_dst.proto = htons(ETH_P_IPV6);
129 br_dst.vid = vid;
122 130
123 return br_mdb_ip_get(mdb, &br_dst); 131 return br_mdb_ip_get(mdb, &br_dst);
124} 132}
@@ -279,7 +287,7 @@ static void br_multicast_port_group_expired(unsigned long data)
279 287
280 spin_lock(&br->multicast_lock); 288 spin_lock(&br->multicast_lock);
281 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 289 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
282 hlist_unhashed(&pg->mglist)) 290 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
283 goto out; 291 goto out;
284 292
285 br_multicast_del_pg(br, pg); 293 br_multicast_del_pg(br, pg);
@@ -622,7 +630,8 @@ out:
622struct net_bridge_port_group *br_multicast_new_port_group( 630struct net_bridge_port_group *br_multicast_new_port_group(
623 struct net_bridge_port *port, 631 struct net_bridge_port *port,
624 struct br_ip *group, 632 struct br_ip *group,
625 struct net_bridge_port_group __rcu *next) 633 struct net_bridge_port_group __rcu *next,
634 unsigned char state)
626{ 635{
627 struct net_bridge_port_group *p; 636 struct net_bridge_port_group *p;
628 637
@@ -632,6 +641,7 @@ struct net_bridge_port_group *br_multicast_new_port_group(
632 641
633 p->addr = *group; 642 p->addr = *group;
634 p->port = port; 643 p->port = port;
644 p->state = state;
635 rcu_assign_pointer(p->next, next); 645 rcu_assign_pointer(p->next, next);
636 hlist_add_head(&p->mglist, &port->mglist); 646 hlist_add_head(&p->mglist, &port->mglist);
637 setup_timer(&p->timer, br_multicast_port_group_expired, 647 setup_timer(&p->timer, br_multicast_port_group_expired,
@@ -674,7 +684,7 @@ static int br_multicast_add_group(struct net_bridge *br,
674 break; 684 break;
675 } 685 }
676 686
677 p = br_multicast_new_port_group(port, group, *pp); 687 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
678 if (unlikely(!p)) 688 if (unlikely(!p))
679 goto err; 689 goto err;
680 rcu_assign_pointer(*pp, p); 690 rcu_assign_pointer(*pp, p);
@@ -692,7 +702,8 @@ err:
692 702
693static int br_ip4_multicast_add_group(struct net_bridge *br, 703static int br_ip4_multicast_add_group(struct net_bridge *br,
694 struct net_bridge_port *port, 704 struct net_bridge_port *port,
695 __be32 group) 705 __be32 group,
706 __u16 vid)
696{ 707{
697 struct br_ip br_group; 708 struct br_ip br_group;
698 709
@@ -701,6 +712,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
701 712
702 br_group.u.ip4 = group; 713 br_group.u.ip4 = group;
703 br_group.proto = htons(ETH_P_IP); 714 br_group.proto = htons(ETH_P_IP);
715 br_group.vid = vid;
704 716
705 return br_multicast_add_group(br, port, &br_group); 717 return br_multicast_add_group(br, port, &br_group);
706} 718}
@@ -708,7 +720,8 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
708#if IS_ENABLED(CONFIG_IPV6) 720#if IS_ENABLED(CONFIG_IPV6)
709static int br_ip6_multicast_add_group(struct net_bridge *br, 721static int br_ip6_multicast_add_group(struct net_bridge *br,
710 struct net_bridge_port *port, 722 struct net_bridge_port *port,
711 const struct in6_addr *group) 723 const struct in6_addr *group,
724 __u16 vid)
712{ 725{
713 struct br_ip br_group; 726 struct br_ip br_group;
714 727
@@ -717,6 +730,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
717 730
718 br_group.u.ip6 = *group; 731 br_group.u.ip6 = *group;
719 br_group.proto = htons(ETH_P_IPV6); 732 br_group.proto = htons(ETH_P_IPV6);
733 br_group.vid = vid;
720 734
721 return br_multicast_add_group(br, port, &br_group); 735 return br_multicast_add_group(br, port, &br_group);
722} 736}
@@ -893,10 +907,12 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
893 int type; 907 int type;
894 int err = 0; 908 int err = 0;
895 __be32 group; 909 __be32 group;
910 u16 vid = 0;
896 911
897 if (!pskb_may_pull(skb, sizeof(*ih))) 912 if (!pskb_may_pull(skb, sizeof(*ih)))
898 return -EINVAL; 913 return -EINVAL;
899 914
915 br_vlan_get_tag(skb, &vid);
900 ih = igmpv3_report_hdr(skb); 916 ih = igmpv3_report_hdr(skb);
901 num = ntohs(ih->ngrec); 917 num = ntohs(ih->ngrec);
902 len = sizeof(*ih); 918 len = sizeof(*ih);
@@ -928,7 +944,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
928 continue; 944 continue;
929 } 945 }
930 946
931 err = br_ip4_multicast_add_group(br, port, group); 947 err = br_ip4_multicast_add_group(br, port, group, vid);
932 if (err) 948 if (err)
933 break; 949 break;
934 } 950 }
@@ -947,10 +963,12 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
947 int len; 963 int len;
948 int num; 964 int num;
949 int err = 0; 965 int err = 0;
966 u16 vid = 0;
950 967
951 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 968 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
952 return -EINVAL; 969 return -EINVAL;
953 970
971 br_vlan_get_tag(skb, &vid);
954 icmp6h = icmp6_hdr(skb); 972 icmp6h = icmp6_hdr(skb);
955 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 973 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
956 len = sizeof(*icmp6h); 974 len = sizeof(*icmp6h);
@@ -988,7 +1006,8 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
988 continue; 1006 continue;
989 } 1007 }
990 1008
991 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); 1009 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
1010 vid);
992 if (!err) 1011 if (!err)
993 break; 1012 break;
994 } 1013 }
@@ -1072,6 +1091,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1072 unsigned long now = jiffies; 1091 unsigned long now = jiffies;
1073 __be32 group; 1092 __be32 group;
1074 int err = 0; 1093 int err = 0;
1094 u16 vid = 0;
1075 1095
1076 spin_lock(&br->multicast_lock); 1096 spin_lock(&br->multicast_lock);
1077 if (!netif_running(br->dev) || 1097 if (!netif_running(br->dev) ||
@@ -1106,7 +1126,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1106 if (!group) 1126 if (!group)
1107 goto out; 1127 goto out;
1108 1128
1109 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group); 1129 br_vlan_get_tag(skb, &vid);
1130 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1110 if (!mp) 1131 if (!mp)
1111 goto out; 1132 goto out;
1112 1133
@@ -1147,6 +1168,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1147 unsigned long now = jiffies; 1168 unsigned long now = jiffies;
1148 const struct in6_addr *group = NULL; 1169 const struct in6_addr *group = NULL;
1149 int err = 0; 1170 int err = 0;
1171 u16 vid = 0;
1150 1172
1151 spin_lock(&br->multicast_lock); 1173 spin_lock(&br->multicast_lock);
1152 if (!netif_running(br->dev) || 1174 if (!netif_running(br->dev) ||
@@ -1165,7 +1187,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1165 if (max_delay) 1187 if (max_delay)
1166 group = &mld->mld_mca; 1188 group = &mld->mld_mca;
1167 } else if (skb->len >= sizeof(*mld2q)) { 1189 } else if (skb->len >= sizeof(*mld2q)) {
1168 u16 mrc;
1169 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1190 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1170 err = -EINVAL; 1191 err = -EINVAL;
1171 goto out; 1192 goto out;
@@ -1173,14 +1194,14 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1173 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1194 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1174 if (!mld2q->mld2q_nsrcs) 1195 if (!mld2q->mld2q_nsrcs)
1175 group = &mld2q->mld2q_mca; 1196 group = &mld2q->mld2q_mca;
1176 mrc = ntohs(mld2q->mld2q_mrc); 1197 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
1177 max_delay = mrc ? MLDV2_MRC(mrc) : 1;
1178 } 1198 }
1179 1199
1180 if (!group) 1200 if (!group)
1181 goto out; 1201 goto out;
1182 1202
1183 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group); 1203 br_vlan_get_tag(skb, &vid);
1204 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1184 if (!mp) 1205 if (!mp)
1185 goto out; 1206 goto out;
1186 1207
@@ -1286,7 +1307,8 @@ out:
1286 1307
1287static void br_ip4_multicast_leave_group(struct net_bridge *br, 1308static void br_ip4_multicast_leave_group(struct net_bridge *br,
1288 struct net_bridge_port *port, 1309 struct net_bridge_port *port,
1289 __be32 group) 1310 __be32 group,
1311 __u16 vid)
1290{ 1312{
1291 struct br_ip br_group; 1313 struct br_ip br_group;
1292 1314
@@ -1295,6 +1317,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1295 1317
1296 br_group.u.ip4 = group; 1318 br_group.u.ip4 = group;
1297 br_group.proto = htons(ETH_P_IP); 1319 br_group.proto = htons(ETH_P_IP);
1320 br_group.vid = vid;
1298 1321
1299 br_multicast_leave_group(br, port, &br_group); 1322 br_multicast_leave_group(br, port, &br_group);
1300} 1323}
@@ -1302,7 +1325,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1302#if IS_ENABLED(CONFIG_IPV6) 1325#if IS_ENABLED(CONFIG_IPV6)
1303static void br_ip6_multicast_leave_group(struct net_bridge *br, 1326static void br_ip6_multicast_leave_group(struct net_bridge *br,
1304 struct net_bridge_port *port, 1327 struct net_bridge_port *port,
1305 const struct in6_addr *group) 1328 const struct in6_addr *group,
1329 __u16 vid)
1306{ 1330{
1307 struct br_ip br_group; 1331 struct br_ip br_group;
1308 1332
@@ -1311,6 +1335,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1311 1335
1312 br_group.u.ip6 = *group; 1336 br_group.u.ip6 = *group;
1313 br_group.proto = htons(ETH_P_IPV6); 1337 br_group.proto = htons(ETH_P_IPV6);
1338 br_group.vid = vid;
1314 1339
1315 br_multicast_leave_group(br, port, &br_group); 1340 br_multicast_leave_group(br, port, &br_group);
1316} 1341}
@@ -1326,6 +1351,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1326 unsigned int len; 1351 unsigned int len;
1327 unsigned int offset; 1352 unsigned int offset;
1328 int err; 1353 int err;
1354 u16 vid = 0;
1329 1355
1330 /* We treat OOM as packet loss for now. */ 1356 /* We treat OOM as packet loss for now. */
1331 if (!pskb_may_pull(skb, sizeof(*iph))) 1357 if (!pskb_may_pull(skb, sizeof(*iph)))
@@ -1386,6 +1412,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1386 1412
1387 err = 0; 1413 err = 0;
1388 1414
1415 br_vlan_get_tag(skb2, &vid);
1389 BR_INPUT_SKB_CB(skb)->igmp = 1; 1416 BR_INPUT_SKB_CB(skb)->igmp = 1;
1390 ih = igmp_hdr(skb2); 1417 ih = igmp_hdr(skb2);
1391 1418
@@ -1393,7 +1420,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1393 case IGMP_HOST_MEMBERSHIP_REPORT: 1420 case IGMP_HOST_MEMBERSHIP_REPORT:
1394 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1421 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1395 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1422 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1396 err = br_ip4_multicast_add_group(br, port, ih->group); 1423 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1397 break; 1424 break;
1398 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1425 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1399 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1426 err = br_ip4_multicast_igmp3_report(br, port, skb2);
@@ -1402,7 +1429,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1402 err = br_ip4_multicast_query(br, port, skb2); 1429 err = br_ip4_multicast_query(br, port, skb2);
1403 break; 1430 break;
1404 case IGMP_HOST_LEAVE_MESSAGE: 1431 case IGMP_HOST_LEAVE_MESSAGE:
1405 br_ip4_multicast_leave_group(br, port, ih->group); 1432 br_ip4_multicast_leave_group(br, port, ih->group, vid);
1406 break; 1433 break;
1407 } 1434 }
1408 1435
@@ -1427,6 +1454,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1427 unsigned int len; 1454 unsigned int len;
1428 int offset; 1455 int offset;
1429 int err; 1456 int err;
1457 u16 vid = 0;
1430 1458
1431 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1459 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1432 return -EINVAL; 1460 return -EINVAL;
@@ -1510,6 +1538,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1510 1538
1511 err = 0; 1539 err = 0;
1512 1540
1541 br_vlan_get_tag(skb, &vid);
1513 BR_INPUT_SKB_CB(skb)->igmp = 1; 1542 BR_INPUT_SKB_CB(skb)->igmp = 1;
1514 1543
1515 switch (icmp6_type) { 1544 switch (icmp6_type) {
@@ -1522,7 +1551,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1522 } 1551 }
1523 mld = (struct mld_msg *)skb_transport_header(skb2); 1552 mld = (struct mld_msg *)skb_transport_header(skb2);
1524 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1553 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1525 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1554 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
1526 break; 1555 break;
1527 } 1556 }
1528 case ICMPV6_MLD2_REPORT: 1557 case ICMPV6_MLD2_REPORT:
@@ -1539,7 +1568,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1539 goto out; 1568 goto out;
1540 } 1569 }
1541 mld = (struct mld_msg *)skb_transport_header(skb2); 1570 mld = (struct mld_msg *)skb_transport_header(skb2);
1542 br_ip6_multicast_leave_group(br, port, &mld->mld_mca); 1571 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
1543 } 1572 }
1544 } 1573 }
1545 1574
@@ -1608,7 +1637,6 @@ void br_multicast_init(struct net_bridge *br)
1608 br_multicast_querier_expired, (unsigned long)br); 1637 br_multicast_querier_expired, (unsigned long)br);
1609 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1638 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1610 (unsigned long)br); 1639 (unsigned long)br);
1611 br_mdb_init();
1612} 1640}
1613 1641
1614void br_multicast_open(struct net_bridge *br) 1642void br_multicast_open(struct net_bridge *br)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index dead9dfe865b..27aa3ee517ce 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -16,6 +16,7 @@
16#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
17#include <net/net_namespace.h> 17#include <net/net_namespace.h>
18#include <net/sock.h> 18#include <net/sock.h>
19#include <uapi/linux/if_bridge.h>
19 20
20#include "br_private.h" 21#include "br_private.h"
21#include "br_private_stp.h" 22#include "br_private_stp.h"
@@ -64,15 +65,21 @@ static int br_port_fill_attrs(struct sk_buff *skb,
64 * Create one netlink message for one interface 65 * Create one netlink message for one interface
65 * Contains port and master info as well as carrier and bridge state. 66 * Contains port and master info as well as carrier and bridge state.
66 */ 67 */
67static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port, 68static int br_fill_ifinfo(struct sk_buff *skb,
68 u32 pid, u32 seq, int event, unsigned int flags) 69 const struct net_bridge_port *port,
70 u32 pid, u32 seq, int event, unsigned int flags,
71 u32 filter_mask, const struct net_device *dev)
69{ 72{
70 const struct net_bridge *br = port->br; 73 const struct net_bridge *br;
71 const struct net_device *dev = port->dev;
72 struct ifinfomsg *hdr; 74 struct ifinfomsg *hdr;
73 struct nlmsghdr *nlh; 75 struct nlmsghdr *nlh;
74 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 76 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
75 77
78 if (port)
79 br = port->br;
80 else
81 br = netdev_priv(dev);
82
76 br_debug(br, "br_fill_info event %d port %s master %s\n", 83 br_debug(br, "br_fill_info event %d port %s master %s\n",
77 event, dev->name, br->dev->name); 84 event, dev->name, br->dev->name);
78 85
@@ -98,7 +105,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
98 nla_put_u32(skb, IFLA_LINK, dev->iflink))) 105 nla_put_u32(skb, IFLA_LINK, dev->iflink)))
99 goto nla_put_failure; 106 goto nla_put_failure;
100 107
101 if (event == RTM_NEWLINK) { 108 if (event == RTM_NEWLINK && port) {
102 struct nlattr *nest 109 struct nlattr *nest
103 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 110 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
104 111
@@ -107,6 +114,48 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
107 nla_nest_end(skb, nest); 114 nla_nest_end(skb, nest);
108 } 115 }
109 116
117 /* Check if the VID information is requested */
118 if (filter_mask & RTEXT_FILTER_BRVLAN) {
119 struct nlattr *af;
120 const struct net_port_vlans *pv;
121 struct bridge_vlan_info vinfo;
122 u16 vid;
123 u16 pvid;
124
125 if (port)
126 pv = nbp_get_vlan_info(port);
127 else
128 pv = br_get_vlan_info(br);
129
130 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
131 goto done;
132
133 af = nla_nest_start(skb, IFLA_AF_SPEC);
134 if (!af)
135 goto nla_put_failure;
136
137 pvid = br_get_pvid(pv);
138 for (vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
139 vid < BR_VLAN_BITMAP_LEN;
140 vid = find_next_bit(pv->vlan_bitmap,
141 BR_VLAN_BITMAP_LEN, vid+1)) {
142 vinfo.vid = vid;
143 vinfo.flags = 0;
144 if (vid == pvid)
145 vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
146
147 if (test_bit(vid, pv->untagged_bitmap))
148 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
149
150 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
151 sizeof(vinfo), &vinfo))
152 goto nla_put_failure;
153 }
154
155 nla_nest_end(skb, af);
156 }
157
158done:
110 return nlmsg_end(skb, nlh); 159 return nlmsg_end(skb, nlh);
111 160
112nla_put_failure: 161nla_put_failure:
@@ -119,10 +168,14 @@ nla_put_failure:
119 */ 168 */
120void br_ifinfo_notify(int event, struct net_bridge_port *port) 169void br_ifinfo_notify(int event, struct net_bridge_port *port)
121{ 170{
122 struct net *net = dev_net(port->dev); 171 struct net *net;
123 struct sk_buff *skb; 172 struct sk_buff *skb;
124 int err = -ENOBUFS; 173 int err = -ENOBUFS;
125 174
175 if (!port)
176 return;
177
178 net = dev_net(port->dev);
126 br_debug(port->br, "port %u(%s) event %d\n", 179 br_debug(port->br, "port %u(%s) event %d\n",
127 (unsigned int)port->port_no, port->dev->name, event); 180 (unsigned int)port->port_no, port->dev->name, event);
128 181
@@ -130,7 +183,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
130 if (skb == NULL) 183 if (skb == NULL)
131 goto errout; 184 goto errout;
132 185
133 err = br_fill_ifinfo(skb, port, 0, 0, event, 0); 186 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, 0, port->dev);
134 if (err < 0) { 187 if (err < 0) {
135 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 188 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
136 WARN_ON(err == -EMSGSIZE); 189 WARN_ON(err == -EMSGSIZE);
@@ -144,24 +197,85 @@ errout:
144 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 197 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
145} 198}
146 199
200
147/* 201/*
148 * Dump information about all ports, in response to GETLINK 202 * Dump information about all ports, in response to GETLINK
149 */ 203 */
150int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 204int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
151 struct net_device *dev) 205 struct net_device *dev, u32 filter_mask)
152{ 206{
153 int err = 0; 207 int err = 0;
154 struct net_bridge_port *port = br_port_get_rcu(dev); 208 struct net_bridge_port *port = br_port_get_rcu(dev);
155 209
156 /* not a bridge port */ 210 /* not a bridge port and */
157 if (!port) 211 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
158 goto out; 212 goto out;
159 213
160 err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI); 214 err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
215 filter_mask, dev);
161out: 216out:
162 return err; 217 return err;
163} 218}
164 219
220static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
221 [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
222 [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
223 [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
224 .len = sizeof(struct bridge_vlan_info), },
225};
226
227static int br_afspec(struct net_bridge *br,
228 struct net_bridge_port *p,
229 struct nlattr *af_spec,
230 int cmd)
231{
232 struct nlattr *tb[IFLA_BRIDGE_MAX+1];
233 int err = 0;
234
235 err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, af_spec, ifla_br_policy);
236 if (err)
237 return err;
238
239 if (tb[IFLA_BRIDGE_VLAN_INFO]) {
240 struct bridge_vlan_info *vinfo;
241
242 vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
243
244 if (vinfo->vid >= VLAN_N_VID)
245 return -EINVAL;
246
247 switch (cmd) {
248 case RTM_SETLINK:
249 if (p) {
250 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
251 if (err)
252 break;
253
254 if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
255 err = br_vlan_add(p->br, vinfo->vid,
256 vinfo->flags);
257 } else
258 err = br_vlan_add(br, vinfo->vid, vinfo->flags);
259
260 if (err)
261 break;
262
263 break;
264
265 case RTM_DELLINK:
266 if (p) {
267 nbp_vlan_delete(p, vinfo->vid);
268 if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
269 br_vlan_delete(p->br, vinfo->vid);
270 } else
271 br_vlan_delete(br, vinfo->vid);
272 break;
273 }
274 }
275
276 return err;
277}
278
165static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { 279static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
166 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 280 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
167 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 281 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
@@ -181,8 +295,11 @@ static int br_set_port_state(struct net_bridge_port *p, u8 state)
181 if (p->br->stp_enabled == BR_KERNEL_STP) 295 if (p->br->stp_enabled == BR_KERNEL_STP)
182 return -EBUSY; 296 return -EBUSY;
183 297
298 /* if device is not up, change is not allowed
299 * if link is not present, only allowable state is disabled
300 */
184 if (!netif_running(p->dev) || 301 if (!netif_running(p->dev) ||
185 (!netif_carrier_ok(p->dev) && state != BR_STATE_DISABLED)) 302 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
186 return -ENETDOWN; 303 return -ENETDOWN;
187 304
188 p->state = state; 305 p->state = state;
@@ -238,6 +355,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
238{ 355{
239 struct ifinfomsg *ifm; 356 struct ifinfomsg *ifm;
240 struct nlattr *protinfo; 357 struct nlattr *protinfo;
358 struct nlattr *afspec;
241 struct net_bridge_port *p; 359 struct net_bridge_port *p;
242 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 360 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
243 int err; 361 int err;
@@ -245,38 +363,76 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
245 ifm = nlmsg_data(nlh); 363 ifm = nlmsg_data(nlh);
246 364
247 protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO); 365 protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO);
248 if (!protinfo) 366 afspec = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_AF_SPEC);
367 if (!protinfo && !afspec)
249 return 0; 368 return 0;
250 369
251 p = br_port_get_rtnl(dev); 370 p = br_port_get_rtnl(dev);
252 if (!p) 371 /* We want to accept dev as bridge itself if the AF_SPEC
372 * is set to see if someone is setting vlan info on the brigde
373 */
374 if (!p && ((dev->priv_flags & IFF_EBRIDGE) && !afspec))
253 return -EINVAL; 375 return -EINVAL;
254 376
255 if (protinfo->nla_type & NLA_F_NESTED) { 377 if (p && protinfo) {
256 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, 378 if (protinfo->nla_type & NLA_F_NESTED) {
257 protinfo, ifla_brport_policy); 379 err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
380 protinfo, ifla_brport_policy);
381 if (err)
382 return err;
383
384 spin_lock_bh(&p->br->lock);
385 err = br_setport(p, tb);
386 spin_unlock_bh(&p->br->lock);
387 } else {
388 /* Binary compatability with old RSTP */
389 if (nla_len(protinfo) < sizeof(u8))
390 return -EINVAL;
391
392 spin_lock_bh(&p->br->lock);
393 err = br_set_port_state(p, nla_get_u8(protinfo));
394 spin_unlock_bh(&p->br->lock);
395 }
258 if (err) 396 if (err)
259 return err; 397 goto out;
260 398 }
261 spin_lock_bh(&p->br->lock);
262 err = br_setport(p, tb);
263 spin_unlock_bh(&p->br->lock);
264 } else {
265 /* Binary compatability with old RSTP */
266 if (nla_len(protinfo) < sizeof(u8))
267 return -EINVAL;
268 399
269 spin_lock_bh(&p->br->lock); 400 if (afspec) {
270 err = br_set_port_state(p, nla_get_u8(protinfo)); 401 err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
271 spin_unlock_bh(&p->br->lock); 402 afspec, RTM_SETLINK);
272 } 403 }
273 404
274 if (err == 0) 405 if (err == 0)
275 br_ifinfo_notify(RTM_NEWLINK, p); 406 br_ifinfo_notify(RTM_NEWLINK, p);
276 407
408out:
277 return err; 409 return err;
278} 410}
279 411
412/* Delete port information */
413int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
414{
415 struct ifinfomsg *ifm;
416 struct nlattr *afspec;
417 struct net_bridge_port *p;
418 int err;
419
420 ifm = nlmsg_data(nlh);
421
422 afspec = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_AF_SPEC);
423 if (!afspec)
424 return 0;
425
426 p = br_port_get_rtnl(dev);
427 /* We want to accept dev as bridge itself as well */
428 if (!p && !(dev->priv_flags & IFF_EBRIDGE))
429 return -EINVAL;
430
431 err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
432 afspec, RTM_DELLINK);
433
434 return err;
435}
280static int br_validate(struct nlattr *tb[], struct nlattr *data[]) 436static int br_validate(struct nlattr *tb[], struct nlattr *data[])
281{ 437{
282 if (tb[IFLA_ADDRESS]) { 438 if (tb[IFLA_ADDRESS]) {
@@ -289,6 +445,29 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
289 return 0; 445 return 0;
290} 446}
291 447
448static size_t br_get_link_af_size(const struct net_device *dev)
449{
450 struct net_port_vlans *pv;
451
452 if (br_port_exists(dev))
453 pv = nbp_get_vlan_info(br_port_get_rcu(dev));
454 else if (dev->priv_flags & IFF_EBRIDGE)
455 pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
456 else
457 return 0;
458
459 if (!pv)
460 return 0;
461
462 /* Each VLAN is returned in bridge_vlan_info along with flags */
463 return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
464}
465
466static struct rtnl_af_ops br_af_ops = {
467 .family = AF_BRIDGE,
468 .get_link_af_size = br_get_link_af_size,
469};
470
292struct rtnl_link_ops br_link_ops __read_mostly = { 471struct rtnl_link_ops br_link_ops __read_mostly = {
293 .kind = "bridge", 472 .kind = "bridge",
294 .priv_size = sizeof(struct net_bridge), 473 .priv_size = sizeof(struct net_bridge),
@@ -299,11 +478,29 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
299 478
300int __init br_netlink_init(void) 479int __init br_netlink_init(void)
301{ 480{
302 return rtnl_link_register(&br_link_ops); 481 int err;
482
483 br_mdb_init();
484 err = rtnl_af_register(&br_af_ops);
485 if (err)
486 goto out;
487
488 err = rtnl_link_register(&br_link_ops);
489 if (err)
490 goto out_af;
491
492 return 0;
493
494out_af:
495 rtnl_af_unregister(&br_af_ops);
496out:
497 br_mdb_uninit();
498 return err;
303} 499}
304 500
305void __exit br_netlink_fini(void) 501void __exit br_netlink_fini(void)
306{ 502{
503 br_mdb_uninit();
504 rtnl_af_unregister(&br_af_ops);
307 rtnl_link_unregister(&br_link_ops); 505 rtnl_link_unregister(&br_link_ops);
308 rtnl_unregister_all(PF_BRIDGE);
309} 506}
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index a76b62135558..1644b3e1f947 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -82,7 +82,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
82 break; 82 break;
83 83
84 case NETDEV_UP: 84 case NETDEV_UP:
85 if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP)) { 85 if (netif_running(br->dev) && netif_oper_up(dev)) {
86 spin_lock_bh(&br->lock); 86 spin_lock_bh(&br->lock);
87 br_stp_enable_port(p); 87 br_stp_enable_port(p);
88 spin_unlock_bh(&br->lock); 88 spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index f21a739a6186..6d314c4e6bcb 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -18,6 +18,7 @@
18#include <linux/netpoll.h> 18#include <linux/netpoll.h>
19#include <linux/u64_stats_sync.h> 19#include <linux/u64_stats_sync.h>
20#include <net/route.h> 20#include <net/route.h>
21#include <linux/if_vlan.h>
21 22
22#define BR_HASH_BITS 8 23#define BR_HASH_BITS 8
23#define BR_HASH_SIZE (1 << BR_HASH_BITS) 24#define BR_HASH_SIZE (1 << BR_HASH_BITS)
@@ -26,6 +27,7 @@
26 27
27#define BR_PORT_BITS 10 28#define BR_PORT_BITS 10
28#define BR_MAX_PORTS (1<<BR_PORT_BITS) 29#define BR_MAX_PORTS (1<<BR_PORT_BITS)
30#define BR_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
29 31
30#define BR_VERSION "2.3" 32#define BR_VERSION "2.3"
31 33
@@ -61,6 +63,20 @@ struct br_ip
61#endif 63#endif
62 } u; 64 } u;
63 __be16 proto; 65 __be16 proto;
66 __u16 vid;
67};
68
69struct net_port_vlans {
70 u16 port_idx;
71 u16 pvid;
72 union {
73 struct net_bridge_port *port;
74 struct net_bridge *br;
75 } parent;
76 struct rcu_head rcu;
77 unsigned long vlan_bitmap[BR_VLAN_BITMAP_LEN];
78 unsigned long untagged_bitmap[BR_VLAN_BITMAP_LEN];
79 u16 num_vlans;
64}; 80};
65 81
66struct net_bridge_fdb_entry 82struct net_bridge_fdb_entry
@@ -74,6 +90,7 @@ struct net_bridge_fdb_entry
74 mac_addr addr; 90 mac_addr addr;
75 unsigned char is_local; 91 unsigned char is_local;
76 unsigned char is_static; 92 unsigned char is_static;
93 __u16 vlan_id;
77}; 94};
78 95
79struct net_bridge_port_group { 96struct net_bridge_port_group {
@@ -83,6 +100,7 @@ struct net_bridge_port_group {
83 struct rcu_head rcu; 100 struct rcu_head rcu;
84 struct timer_list timer; 101 struct timer_list timer;
85 struct br_ip addr; 102 struct br_ip addr;
103 unsigned char state;
86}; 104};
87 105
88struct net_bridge_mdb_entry 106struct net_bridge_mdb_entry
@@ -155,6 +173,9 @@ struct net_bridge_port
155#ifdef CONFIG_NET_POLL_CONTROLLER 173#ifdef CONFIG_NET_POLL_CONTROLLER
156 struct netpoll *np; 174 struct netpoll *np;
157#endif 175#endif
176#ifdef CONFIG_BRIDGE_VLAN_FILTERING
177 struct net_port_vlans __rcu *vlan_info;
178#endif
158}; 179};
159 180
160#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT) 181#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
@@ -196,9 +217,6 @@ struct net_bridge
196 bool nf_call_ip6tables; 217 bool nf_call_ip6tables;
197 bool nf_call_arptables; 218 bool nf_call_arptables;
198#endif 219#endif
199 unsigned long flags;
200#define BR_SET_MAC_ADDR 0x00000001
201
202 u16 group_fwd_mask; 220 u16 group_fwd_mask;
203 221
204 /* STP */ 222 /* STP */
@@ -259,6 +277,10 @@ struct net_bridge
259 struct timer_list topology_change_timer; 277 struct timer_list topology_change_timer;
260 struct timer_list gc_timer; 278 struct timer_list gc_timer;
261 struct kobject *ifobj; 279 struct kobject *ifobj;
280#ifdef CONFIG_BRIDGE_VLAN_FILTERING
281 u8 vlan_enabled;
282 struct net_port_vlans __rcu *vlan_info;
283#endif
262}; 284};
263 285
264struct br_input_skb_cb { 286struct br_input_skb_cb {
@@ -354,18 +376,22 @@ extern void br_fdb_cleanup(unsigned long arg);
354extern void br_fdb_delete_by_port(struct net_bridge *br, 376extern void br_fdb_delete_by_port(struct net_bridge *br,
355 const struct net_bridge_port *p, int do_all); 377 const struct net_bridge_port *p, int do_all);
356extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, 378extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
357 const unsigned char *addr); 379 const unsigned char *addr,
380 __u16 vid);
358extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr); 381extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
359extern int br_fdb_fillbuf(struct net_bridge *br, void *buf, 382extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
360 unsigned long count, unsigned long off); 383 unsigned long count, unsigned long off);
361extern int br_fdb_insert(struct net_bridge *br, 384extern int br_fdb_insert(struct net_bridge *br,
362 struct net_bridge_port *source, 385 struct net_bridge_port *source,
363 const unsigned char *addr); 386 const unsigned char *addr,
387 u16 vid);
364extern void br_fdb_update(struct net_bridge *br, 388extern void br_fdb_update(struct net_bridge *br,
365 struct net_bridge_port *source, 389 struct net_bridge_port *source,
366 const unsigned char *addr); 390 const unsigned char *addr,
391 u16 vid);
392extern int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
367 393
368extern int br_fdb_delete(struct ndmsg *ndm, 394extern int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
369 struct net_device *dev, 395 struct net_device *dev,
370 const unsigned char *addr); 396 const unsigned char *addr);
371extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], 397extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
@@ -443,8 +469,10 @@ extern void br_multicast_free_pg(struct rcu_head *head);
443extern struct net_bridge_port_group *br_multicast_new_port_group( 469extern struct net_bridge_port_group *br_multicast_new_port_group(
444 struct net_bridge_port *port, 470 struct net_bridge_port *port,
445 struct br_ip *group, 471 struct br_ip *group,
446 struct net_bridge_port_group *next); 472 struct net_bridge_port_group *next,
473 unsigned char state);
447extern void br_mdb_init(void); 474extern void br_mdb_init(void);
475extern void br_mdb_uninit(void);
448extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 476extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
449 struct br_ip *group, int type); 477 struct br_ip *group, int type);
450 478
@@ -523,6 +551,148 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
523{ 551{
524 return 0; 552 return 0;
525} 553}
554static inline void br_mdb_init(void)
555{
556}
557static inline void br_mdb_uninit(void)
558{
559}
560#endif
561
562/* br_vlan.c */
563#ifdef CONFIG_BRIDGE_VLAN_FILTERING
564extern bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
565 struct sk_buff *skb, u16 *vid);
566extern bool br_allowed_egress(struct net_bridge *br,
567 const struct net_port_vlans *v,
568 const struct sk_buff *skb);
569extern struct sk_buff *br_handle_vlan(struct net_bridge *br,
570 const struct net_port_vlans *v,
571 struct sk_buff *skb);
572extern int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
573extern int br_vlan_delete(struct net_bridge *br, u16 vid);
574extern void br_vlan_flush(struct net_bridge *br);
575extern int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
576extern int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
577extern int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
578extern void nbp_vlan_flush(struct net_bridge_port *port);
579extern bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
580
581static inline struct net_port_vlans *br_get_vlan_info(
582 const struct net_bridge *br)
583{
584 return rcu_dereference_rtnl(br->vlan_info);
585}
586
587static inline struct net_port_vlans *nbp_get_vlan_info(
588 const struct net_bridge_port *p)
589{
590 return rcu_dereference_rtnl(p->vlan_info);
591}
592
593/* Since bridge now depends on 8021Q module, but the time bridge sees the
594 * skb, the vlan tag will always be present if the frame was tagged.
595 */
596static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
597{
598 int err = 0;
599
600 if (vlan_tx_tag_present(skb))
601 *vid = vlan_tx_tag_get(skb) & VLAN_VID_MASK;
602 else {
603 *vid = 0;
604 err = -EINVAL;
605 }
606
607 return err;
608}
609
610static inline u16 br_get_pvid(const struct net_port_vlans *v)
611{
612 /* Return just the VID if it is set, or VLAN_N_VID (invalid vid) if
613 * vid wasn't set
614 */
615 smp_rmb();
616 return (v->pvid & VLAN_TAG_PRESENT) ?
617 (v->pvid & ~VLAN_TAG_PRESENT) :
618 VLAN_N_VID;
619}
620
621#else
622static inline bool br_allowed_ingress(struct net_bridge *br,
623 struct net_port_vlans *v,
624 struct sk_buff *skb,
625 u16 *vid)
626{
627 return true;
628}
629
630static inline bool br_allowed_egress(struct net_bridge *br,
631 const struct net_port_vlans *v,
632 const struct sk_buff *skb)
633{
634 return true;
635}
636
637static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
638 const struct net_port_vlans *v,
639 struct sk_buff *skb)
640{
641 return skb;
642}
643
644static inline int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
645{
646 return -EOPNOTSUPP;
647}
648
649static inline int br_vlan_delete(struct net_bridge *br, u16 vid)
650{
651 return -EOPNOTSUPP;
652}
653
654static inline void br_vlan_flush(struct net_bridge *br)
655{
656}
657
658static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
659{
660 return -EOPNOTSUPP;
661}
662
663static inline int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
664{
665 return -EOPNOTSUPP;
666}
667
668static inline void nbp_vlan_flush(struct net_bridge_port *port)
669{
670}
671
672static inline struct net_port_vlans *br_get_vlan_info(
673 const struct net_bridge *br)
674{
675 return NULL;
676}
677static inline struct net_port_vlans *nbp_get_vlan_info(
678 const struct net_bridge_port *p)
679{
680 return NULL;
681}
682
683static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
684{
685 return false;
686}
687
688static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag)
689{
690 return 0;
691}
692static inline u16 br_get_pvid(const struct net_port_vlans *v)
693{
694 return VLAN_N_VID; /* Returns invalid vid */
695}
526#endif 696#endif
527 697
528/* br_netfilter.c */ 698/* br_netfilter.c */
@@ -585,8 +755,9 @@ extern int br_netlink_init(void);
585extern void br_netlink_fini(void); 755extern void br_netlink_fini(void);
586extern void br_ifinfo_notify(int event, struct net_bridge_port *port); 756extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
587extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg); 757extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
758extern int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
588extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 759extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
589 struct net_device *dev); 760 struct net_device *dev, u32 filter_mask);
590 761
591#ifdef CONFIG_SYSFS 762#ifdef CONFIG_SYSFS
592/* br_sysfs_if.c */ 763/* br_sysfs_if.c */
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 7f884e3fb955..8660ea3be705 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -16,6 +16,7 @@
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/llc.h> 17#include <linux/llc.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pkt_sched.h>
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <net/llc.h> 21#include <net/llc.h>
21#include <net/llc_pdu.h> 22#include <net/llc_pdu.h>
@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
40 41
41 skb->dev = p->dev; 42 skb->dev = p->dev;
42 skb->protocol = htons(ETH_P_802_2); 43 skb->protocol = htons(ETH_P_802_2);
44 skb->priority = TC_PRIO_CONTROL;
43 45
44 skb_reserve(skb, LLC_RESERVE); 46 skb_reserve(skb, LLC_RESERVE);
45 memcpy(__skb_put(skb, length), data, length); 47 memcpy(__skb_put(skb, length), data, length);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9d5a414a3943..0bdb4ebd362b 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -54,7 +54,7 @@ void br_stp_enable_bridge(struct net_bridge *br)
54 br_config_bpdu_generation(br); 54 br_config_bpdu_generation(br);
55 55
56 list_for_each_entry(p, &br->port_list, list) { 56 list_for_each_entry(p, &br->port_list, list) {
57 if ((p->dev->flags & IFF_UP) && netif_carrier_ok(p->dev)) 57 if (netif_running(p->dev) && netif_oper_up(p->dev))
58 br_stp_enable_port(p); 58 br_stp_enable_port(p);
59 59
60 } 60 }
@@ -216,7 +216,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
216 struct net_bridge_port *p; 216 struct net_bridge_port *p;
217 217
218 /* user has chosen a value so keep it */ 218 /* user has chosen a value so keep it */
219 if (br->flags & BR_SET_MAC_ADDR) 219 if (br->dev->addr_assign_type == NET_ADDR_SET)
220 return false; 220 return false;
221 221
222 list_for_each_entry(p, &br->port_list, list) { 222 list_for_each_entry(p, &br->port_list, list) {
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 5913a3a0047b..8baa9c08e1a4 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -691,6 +691,24 @@ static ssize_t store_nf_call_arptables(
691static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR, 691static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR,
692 show_nf_call_arptables, store_nf_call_arptables); 692 show_nf_call_arptables, store_nf_call_arptables);
693#endif 693#endif
694#ifdef CONFIG_BRIDGE_VLAN_FILTERING
695static ssize_t show_vlan_filtering(struct device *d,
696 struct device_attribute *attr,
697 char *buf)
698{
699 struct net_bridge *br = to_bridge(d);
700 return sprintf(buf, "%d\n", br->vlan_enabled);
701}
702
703static ssize_t store_vlan_filtering(struct device *d,
704 struct device_attribute *attr,
705 const char *buf, size_t len)
706{
707 return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
708}
709static DEVICE_ATTR(vlan_filtering, S_IRUGO | S_IWUSR,
710 show_vlan_filtering, store_vlan_filtering);
711#endif
694 712
695static struct attribute *bridge_attrs[] = { 713static struct attribute *bridge_attrs[] = {
696 &dev_attr_forward_delay.attr, 714 &dev_attr_forward_delay.attr,
@@ -732,6 +750,9 @@ static struct attribute *bridge_attrs[] = {
732 &dev_attr_nf_call_ip6tables.attr, 750 &dev_attr_nf_call_ip6tables.attr,
733 &dev_attr_nf_call_arptables.attr, 751 &dev_attr_nf_call_arptables.attr,
734#endif 752#endif
753#ifdef CONFIG_BRIDGE_VLAN_FILTERING
754 &dev_attr_vlan_filtering.attr,
755#endif
735 NULL 756 NULL
736}; 757};
737 758
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
new file mode 100644
index 000000000000..93dde75923f0
--- /dev/null
+++ b/net/bridge/br_vlan.c
@@ -0,0 +1,415 @@
1#include <linux/kernel.h>
2#include <linux/netdevice.h>
3#include <linux/rtnetlink.h>
4#include <linux/slab.h>
5
6#include "br_private.h"
7
8static void __vlan_add_pvid(struct net_port_vlans *v, u16 vid)
9{
10 if (v->pvid == vid)
11 return;
12
13 smp_wmb();
14 v->pvid = vid;
15}
16
17static void __vlan_delete_pvid(struct net_port_vlans *v, u16 vid)
18{
19 if (v->pvid != vid)
20 return;
21
22 smp_wmb();
23 v->pvid = 0;
24}
25
26static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
27{
28 if (flags & BRIDGE_VLAN_INFO_PVID)
29 __vlan_add_pvid(v, vid);
30
31 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
32 set_bit(vid, v->untagged_bitmap);
33}
34
35static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
36{
37 struct net_bridge_port *p = NULL;
38 struct net_bridge *br;
39 struct net_device *dev;
40 int err;
41
42 if (test_bit(vid, v->vlan_bitmap)) {
43 __vlan_add_flags(v, vid, flags);
44 return 0;
45 }
46
47 if (vid) {
48 if (v->port_idx) {
49 p = v->parent.port;
50 br = p->br;
51 dev = p->dev;
52 } else {
53 br = v->parent.br;
54 dev = br->dev;
55 }
56
57 if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) {
58 /* Add VLAN to the device filter if it is supported.
59 * Stricly speaking, this is not necessary now, since
60 * devices are made promiscuous by the bridge, but if
61 * that ever changes this code will allow tagged
62 * traffic to enter the bridge.
63 */
64 err = dev->netdev_ops->ndo_vlan_rx_add_vid(dev, vid);
65 if (err)
66 return err;
67 }
68
69 err = br_fdb_insert(br, p, dev->dev_addr, vid);
70 if (err) {
71 br_err(br, "failed insert local address into bridge "
72 "forwarding table\n");
73 goto out_filt;
74 }
75
76 }
77
78 set_bit(vid, v->vlan_bitmap);
79 v->num_vlans++;
80 __vlan_add_flags(v, vid, flags);
81
82 return 0;
83
84out_filt:
85 if (p && (dev->features & NETIF_F_HW_VLAN_FILTER))
86 dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid);
87 return err;
88}
89
90static int __vlan_del(struct net_port_vlans *v, u16 vid)
91{
92 if (!test_bit(vid, v->vlan_bitmap))
93 return -EINVAL;
94
95 __vlan_delete_pvid(v, vid);
96 clear_bit(vid, v->untagged_bitmap);
97
98 if (v->port_idx && vid) {
99 struct net_device *dev = v->parent.port->dev;
100
101 if (dev->features & NETIF_F_HW_VLAN_FILTER)
102 dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid);
103 }
104
105 clear_bit(vid, v->vlan_bitmap);
106 v->num_vlans--;
107 if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
108 if (v->port_idx)
109 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
110 else
111 rcu_assign_pointer(v->parent.br->vlan_info, NULL);
112 kfree_rcu(v, rcu);
113 }
114 return 0;
115}
116
117static void __vlan_flush(struct net_port_vlans *v)
118{
119 smp_wmb();
120 v->pvid = 0;
121 bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
122 if (v->port_idx)
123 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
124 else
125 rcu_assign_pointer(v->parent.br->vlan_info, NULL);
126 kfree_rcu(v, rcu);
127}
128
129/* Strip the tag from the packet. Will return skb with tci set 0. */
130static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
131{
132 if (skb->protocol != htons(ETH_P_8021Q)) {
133 skb->vlan_tci = 0;
134 return skb;
135 }
136
137 skb->vlan_tci = 0;
138 skb = vlan_untag(skb);
139 if (skb)
140 skb->vlan_tci = 0;
141
142 return skb;
143}
144
145struct sk_buff *br_handle_vlan(struct net_bridge *br,
146 const struct net_port_vlans *pv,
147 struct sk_buff *skb)
148{
149 u16 vid;
150
151 if (!br->vlan_enabled)
152 goto out;
153
154 /* At this point, we know that the frame was filtered and contains
155 * a valid vlan id. If the vlan id is set in the untagged bitmap,
156 * send untagged; otherwise, send taged.
157 */
158 br_vlan_get_tag(skb, &vid);
159 if (test_bit(vid, pv->untagged_bitmap))
160 skb = br_vlan_untag(skb);
161 else {
162 /* Egress policy says "send tagged". If output device
163 * is the bridge, we need to add the VLAN header
164 * ourselves since we'll be going through the RX path.
165 * Sending to ports puts the frame on the TX path and
166 * we let dev_hard_start_xmit() add the header.
167 */
168 if (skb->protocol != htons(ETH_P_8021Q) &&
169 pv->port_idx == 0) {
170 /* vlan_put_tag expects skb->data to point to
171 * mac header.
172 */
173 skb_push(skb, ETH_HLEN);
174 skb = __vlan_put_tag(skb, skb->vlan_tci);
175 if (!skb)
176 goto out;
177 /* put skb->data back to where it was */
178 skb_pull(skb, ETH_HLEN);
179 skb->vlan_tci = 0;
180 }
181 }
182
183out:
184 return skb;
185}
186
187/* Called under RCU */
188bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
189 struct sk_buff *skb, u16 *vid)
190{
191 /* If VLAN filtering is disabled on the bridge, all packets are
192 * permitted.
193 */
194 if (!br->vlan_enabled)
195 return true;
196
197 /* If there are no vlan in the permitted list, all packets are
198 * rejected.
199 */
200 if (!v)
201 return false;
202
203 if (br_vlan_get_tag(skb, vid)) {
204 u16 pvid = br_get_pvid(v);
205
206 /* Frame did not have a tag. See if pvid is set
207 * on this port. That tells us which vlan untagged
208 * traffic belongs to.
209 */
210 if (pvid == VLAN_N_VID)
211 return false;
212
213 /* PVID is set on this port. Any untagged ingress
214 * frame is considered to belong to this vlan.
215 */
216 __vlan_hwaccel_put_tag(skb, pvid);
217 return true;
218 }
219
220 /* Frame had a valid vlan tag. See if vlan is allowed */
221 if (test_bit(*vid, v->vlan_bitmap))
222 return true;
223
224 return false;
225}
226
227/* Called under RCU. */
228bool br_allowed_egress(struct net_bridge *br,
229 const struct net_port_vlans *v,
230 const struct sk_buff *skb)
231{
232 u16 vid;
233
234 if (!br->vlan_enabled)
235 return true;
236
237 if (!v)
238 return false;
239
240 br_vlan_get_tag(skb, &vid);
241 if (test_bit(vid, v->vlan_bitmap))
242 return true;
243
244 return false;
245}
246
247/* Must be protected by RTNL */
248int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
249{
250 struct net_port_vlans *pv = NULL;
251 int err;
252
253 ASSERT_RTNL();
254
255 pv = rtnl_dereference(br->vlan_info);
256 if (pv)
257 return __vlan_add(pv, vid, flags);
258
259 /* Create port vlan infomration
260 */
261 pv = kzalloc(sizeof(*pv), GFP_KERNEL);
262 if (!pv)
263 return -ENOMEM;
264
265 pv->parent.br = br;
266 err = __vlan_add(pv, vid, flags);
267 if (err)
268 goto out;
269
270 rcu_assign_pointer(br->vlan_info, pv);
271 return 0;
272out:
273 kfree(pv);
274 return err;
275}
276
277/* Must be protected by RTNL */
278int br_vlan_delete(struct net_bridge *br, u16 vid)
279{
280 struct net_port_vlans *pv;
281
282 ASSERT_RTNL();
283
284 pv = rtnl_dereference(br->vlan_info);
285 if (!pv)
286 return -EINVAL;
287
288 if (vid) {
289 /* If the VID !=0 remove fdb for this vid. VID 0 is special
290 * in that it's the default and is always there in the fdb.
291 */
292 spin_lock_bh(&br->hash_lock);
293 fdb_delete_by_addr(br, br->dev->dev_addr, vid);
294 spin_unlock_bh(&br->hash_lock);
295 }
296
297 __vlan_del(pv, vid);
298 return 0;
299}
300
301void br_vlan_flush(struct net_bridge *br)
302{
303 struct net_port_vlans *pv;
304
305 ASSERT_RTNL();
306 pv = rtnl_dereference(br->vlan_info);
307 if (!pv)
308 return;
309
310 __vlan_flush(pv);
311}
312
313int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
314{
315 if (!rtnl_trylock())
316 return restart_syscall();
317
318 if (br->vlan_enabled == val)
319 goto unlock;
320
321 br->vlan_enabled = val;
322
323unlock:
324 rtnl_unlock();
325 return 0;
326}
327
328/* Must be protected by RTNL */
329int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
330{
331 struct net_port_vlans *pv = NULL;
332 int err;
333
334 ASSERT_RTNL();
335
336 pv = rtnl_dereference(port->vlan_info);
337 if (pv)
338 return __vlan_add(pv, vid, flags);
339
340 /* Create port vlan infomration
341 */
342 pv = kzalloc(sizeof(*pv), GFP_KERNEL);
343 if (!pv) {
344 err = -ENOMEM;
345 goto clean_up;
346 }
347
348 pv->port_idx = port->port_no;
349 pv->parent.port = port;
350 err = __vlan_add(pv, vid, flags);
351 if (err)
352 goto clean_up;
353
354 rcu_assign_pointer(port->vlan_info, pv);
355 return 0;
356
357clean_up:
358 kfree(pv);
359 return err;
360}
361
362/* Must be protected by RTNL */
363int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
364{
365 struct net_port_vlans *pv;
366
367 ASSERT_RTNL();
368
369 pv = rtnl_dereference(port->vlan_info);
370 if (!pv)
371 return -EINVAL;
372
373 if (vid) {
374 /* If the VID !=0 remove fdb for this vid. VID 0 is special
375 * in that it's the default and is always there in the fdb.
376 */
377 spin_lock_bh(&port->br->hash_lock);
378 fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
379 spin_unlock_bh(&port->br->hash_lock);
380 }
381
382 return __vlan_del(pv, vid);
383}
384
385void nbp_vlan_flush(struct net_bridge_port *port)
386{
387 struct net_port_vlans *pv;
388
389 ASSERT_RTNL();
390
391 pv = rtnl_dereference(port->vlan_info);
392 if (!pv)
393 return;
394
395 __vlan_flush(pv);
396}
397
398bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
399{
400 struct net_port_vlans *pv;
401 bool found = false;
402
403 rcu_read_lock();
404 pv = rcu_dereference(port->vlan_info);
405
406 if (!pv)
407 goto out;
408
409 if (test_bit(vid, pv->vlan_bitmap))
410 found = true;
411
412out:
413 rcu_read_unlock();
414 return found;
415}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 3476ec469740..3bf43f7bb9d4 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -70,8 +70,7 @@ static void ulog_send(unsigned int nlgroup)
70{ 70{
71 ebt_ulog_buff_t *ub = &ulog_buffers[nlgroup]; 71 ebt_ulog_buff_t *ub = &ulog_buffers[nlgroup];
72 72
73 if (timer_pending(&ub->timer)) 73 del_timer(&ub->timer);
74 del_timer(&ub->timer);
75 74
76 if (!ub->skb) 75 if (!ub->skb)
77 return; 76 return;
@@ -319,8 +318,7 @@ static void __exit ebt_ulog_fini(void)
319 xt_unregister_target(&ebt_ulog_tg_reg); 318 xt_unregister_target(&ebt_ulog_tg_reg);
320 for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { 319 for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
321 ub = &ulog_buffers[i]; 320 ub = &ulog_buffers[i];
322 if (timer_pending(&ub->timer)) 321 del_timer(&ub->timer);
323 del_timer(&ub->timer);
324 spin_lock_bh(&ub->lock); 322 spin_lock_bh(&ub->lock);
325 if (ub->skb) { 323 if (ub->skb) {
326 kfree_skb(ub->skb); 324 kfree_skb(ub->skb);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5fe2ff3b01ef..8d493c91a562 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1472,16 +1472,17 @@ static int do_ebt_set_ctl(struct sock *sk,
1472 int cmd, void __user *user, unsigned int len) 1472 int cmd, void __user *user, unsigned int len)
1473{ 1473{
1474 int ret; 1474 int ret;
1475 struct net *net = sock_net(sk);
1475 1476
1476 if (!capable(CAP_NET_ADMIN)) 1477 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1477 return -EPERM; 1478 return -EPERM;
1478 1479
1479 switch(cmd) { 1480 switch(cmd) {
1480 case EBT_SO_SET_ENTRIES: 1481 case EBT_SO_SET_ENTRIES:
1481 ret = do_replace(sock_net(sk), user, len); 1482 ret = do_replace(net, user, len);
1482 break; 1483 break;
1483 case EBT_SO_SET_COUNTERS: 1484 case EBT_SO_SET_COUNTERS:
1484 ret = update_counters(sock_net(sk), user, len); 1485 ret = update_counters(net, user, len);
1485 break; 1486 break;
1486 default: 1487 default:
1487 ret = -EINVAL; 1488 ret = -EINVAL;
@@ -1494,14 +1495,15 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1494 int ret; 1495 int ret;
1495 struct ebt_replace tmp; 1496 struct ebt_replace tmp;
1496 struct ebt_table *t; 1497 struct ebt_table *t;
1498 struct net *net = sock_net(sk);
1497 1499
1498 if (!capable(CAP_NET_ADMIN)) 1500 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1499 return -EPERM; 1501 return -EPERM;
1500 1502
1501 if (copy_from_user(&tmp, user, sizeof(tmp))) 1503 if (copy_from_user(&tmp, user, sizeof(tmp)))
1502 return -EFAULT; 1504 return -EFAULT;
1503 1505
1504 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex); 1506 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1505 if (!t) 1507 if (!t)
1506 return ret; 1508 return ret;
1507 1509
@@ -2279,16 +2281,17 @@ static int compat_do_ebt_set_ctl(struct sock *sk,
2279 int cmd, void __user *user, unsigned int len) 2281 int cmd, void __user *user, unsigned int len)
2280{ 2282{
2281 int ret; 2283 int ret;
2284 struct net *net = sock_net(sk);
2282 2285
2283 if (!capable(CAP_NET_ADMIN)) 2286 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2284 return -EPERM; 2287 return -EPERM;
2285 2288
2286 switch (cmd) { 2289 switch (cmd) {
2287 case EBT_SO_SET_ENTRIES: 2290 case EBT_SO_SET_ENTRIES:
2288 ret = compat_do_replace(sock_net(sk), user, len); 2291 ret = compat_do_replace(net, user, len);
2289 break; 2292 break;
2290 case EBT_SO_SET_COUNTERS: 2293 case EBT_SO_SET_COUNTERS:
2291 ret = compat_update_counters(sock_net(sk), user, len); 2294 ret = compat_update_counters(net, user, len);
2292 break; 2295 break;
2293 default: 2296 default:
2294 ret = -EINVAL; 2297 ret = -EINVAL;
@@ -2302,8 +2305,9 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2302 int ret; 2305 int ret;
2303 struct compat_ebt_replace tmp; 2306 struct compat_ebt_replace tmp;
2304 struct ebt_table *t; 2307 struct ebt_table *t;
2308 struct net *net = sock_net(sk);
2305 2309
2306 if (!capable(CAP_NET_ADMIN)) 2310 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2307 return -EPERM; 2311 return -EPERM;
2308 2312
2309 /* try real handler in case userland supplied needed padding */ 2313 /* try real handler in case userland supplied needed padding */
@@ -2314,7 +2318,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2314 if (copy_from_user(&tmp, user, sizeof(tmp))) 2318 if (copy_from_user(&tmp, user, sizeof(tmp)))
2315 return -EFAULT; 2319 return -EFAULT;
2316 2320
2317 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex); 2321 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2318 if (!t) 2322 if (!t)
2319 return ret; 2323 return ret;
2320 2324
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index ba9cfd47778a..f1dbddb95a6c 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -402,7 +402,7 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
402 402
403 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); 403 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
404 if (phyinfo == NULL) { 404 if (phyinfo == NULL) {
405 pr_err("ERROR: Link Layer Device dissapeared" 405 pr_err("ERROR: Link Layer Device disappeared"
406 "while connecting\n"); 406 "while connecting\n");
407 goto unlock; 407 goto unlock;
408 } 408 }
diff --git a/net/can/Kconfig b/net/can/Kconfig
index 03200699d274..a15c0e0d1fc7 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -16,10 +16,11 @@ menuconfig CAN
16 If you want CAN support you should say Y here and also to the 16 If you want CAN support you should say Y here and also to the
17 specific driver for your controller(s) below. 17 specific driver for your controller(s) below.
18 18
19if CAN
20
19config CAN_RAW 21config CAN_RAW
20 tristate "Raw CAN Protocol (raw access with CAN-ID filtering)" 22 tristate "Raw CAN Protocol (raw access with CAN-ID filtering)"
21 depends on CAN 23 default y
22 default N
23 ---help--- 24 ---help---
24 The raw CAN protocol option offers access to the CAN bus via 25 The raw CAN protocol option offers access to the CAN bus via
25 the BSD socket API. You probably want to use the raw socket in 26 the BSD socket API. You probably want to use the raw socket in
@@ -29,8 +30,7 @@ config CAN_RAW
29 30
30config CAN_BCM 31config CAN_BCM
31 tristate "Broadcast Manager CAN Protocol (with content filtering)" 32 tristate "Broadcast Manager CAN Protocol (with content filtering)"
32 depends on CAN 33 default y
33 default N
34 ---help--- 34 ---help---
35 The Broadcast Manager offers content filtering, timeout monitoring, 35 The Broadcast Manager offers content filtering, timeout monitoring,
36 sending of RTR frames, and cyclic CAN messages without permanent user 36 sending of RTR frames, and cyclic CAN messages without permanent user
@@ -42,8 +42,7 @@ config CAN_BCM
42 42
43config CAN_GW 43config CAN_GW
44 tristate "CAN Gateway/Router (with netlink configuration)" 44 tristate "CAN Gateway/Router (with netlink configuration)"
45 depends on CAN 45 default y
46 default N
47 ---help--- 46 ---help---
48 The CAN Gateway/Router is used to route (and modify) CAN frames. 47 The CAN Gateway/Router is used to route (and modify) CAN frames.
49 It is based on the PF_CAN core infrastructure for msg filtering and 48 It is based on the PF_CAN core infrastructure for msg filtering and
@@ -53,3 +52,5 @@ config CAN_GW
53 by the netlink configuration interface known e.g. from iptables. 52 by the netlink configuration interface known e.g. from iptables.
54 53
55source "drivers/net/can/Kconfig" 54source "drivers/net/can/Kconfig"
55
56endif
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 969b7cdff59d..5dcb20076f39 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -54,6 +54,7 @@
54#include <linux/skbuff.h> 54#include <linux/skbuff.h>
55#include <linux/can.h> 55#include <linux/can.h>
56#include <linux/can/core.h> 56#include <linux/can/core.h>
57#include <linux/can/skb.h>
57#include <linux/can/bcm.h> 58#include <linux/can/bcm.h>
58#include <linux/slab.h> 59#include <linux/slab.h>
59#include <net/sock.h> 60#include <net/sock.h>
@@ -256,10 +257,13 @@ static void bcm_can_tx(struct bcm_op *op)
256 return; 257 return;
257 } 258 }
258 259
259 skb = alloc_skb(CFSIZ, gfp_any()); 260 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any());
260 if (!skb) 261 if (!skb)
261 goto out; 262 goto out;
262 263
264 can_skb_reserve(skb);
265 can_skb_prv(skb)->ifindex = dev->ifindex;
266
263 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 267 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
264 268
265 /* send with loopback */ 269 /* send with loopback */
@@ -1199,11 +1203,12 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1199 if (!ifindex) 1203 if (!ifindex)
1200 return -ENODEV; 1204 return -ENODEV;
1201 1205
1202 skb = alloc_skb(CFSIZ, GFP_KERNEL); 1206 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL);
1203
1204 if (!skb) 1207 if (!skb)
1205 return -ENOMEM; 1208 return -ENOMEM;
1206 1209
1210 can_skb_reserve(skb);
1211
1207 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ); 1212 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1208 if (err < 0) { 1213 if (err < 0) {
1209 kfree_skb(skb); 1214 kfree_skb(skb);
@@ -1216,6 +1221,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1216 return -ENODEV; 1221 return -ENODEV;
1217 } 1222 }
1218 1223
1224 can_skb_prv(skb)->ifindex = dev->ifindex;
1219 skb->dev = dev; 1225 skb->dev = dev;
1220 skb->sk = sk; 1226 skb->sk = sk;
1221 err = can_send(skb, 1); /* send with loopback */ 1227 err = can_send(skb, 1); /* send with loopback */
@@ -1627,7 +1633,7 @@ static void __exit bcm_module_exit(void)
1627 can_proto_unregister(&bcm_can_proto); 1633 can_proto_unregister(&bcm_can_proto);
1628 1634
1629 if (proc_dir) 1635 if (proc_dir)
1630 proc_net_remove(&init_net, "can-bcm"); 1636 remove_proc_entry("can-bcm", init_net.proc_net);
1631} 1637}
1632 1638
1633module_init(bcm_module_init); 1639module_init(bcm_module_init);
diff --git a/net/can/gw.c b/net/can/gw.c
index 574dda78eb0f..c185fcd5e828 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -42,6 +42,7 @@
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/kernel.h>
45#include <linux/list.h> 46#include <linux/list.h>
46#include <linux/spinlock.h> 47#include <linux/spinlock.h>
47#include <linux/rcupdate.h> 48#include <linux/rcupdate.h>
@@ -52,19 +53,31 @@
52#include <linux/skbuff.h> 53#include <linux/skbuff.h>
53#include <linux/can.h> 54#include <linux/can.h>
54#include <linux/can/core.h> 55#include <linux/can/core.h>
56#include <linux/can/skb.h>
55#include <linux/can/gw.h> 57#include <linux/can/gw.h>
56#include <net/rtnetlink.h> 58#include <net/rtnetlink.h>
57#include <net/net_namespace.h> 59#include <net/net_namespace.h>
58#include <net/sock.h> 60#include <net/sock.h>
59 61
60#define CAN_GW_VERSION "20101209" 62#define CAN_GW_VERSION "20130117"
61static __initconst const char banner[] = 63#define CAN_GW_NAME "can-gw"
62 KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n";
63 64
64MODULE_DESCRIPTION("PF_CAN netlink gateway"); 65MODULE_DESCRIPTION("PF_CAN netlink gateway");
65MODULE_LICENSE("Dual BSD/GPL"); 66MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 67MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
67MODULE_ALIAS("can-gw"); 68MODULE_ALIAS(CAN_GW_NAME);
69
70#define CGW_MIN_HOPS 1
71#define CGW_MAX_HOPS 6
72#define CGW_DEFAULT_HOPS 1
73
74static unsigned int max_hops __read_mostly = CGW_DEFAULT_HOPS;
75module_param(max_hops, uint, S_IRUGO);
76MODULE_PARM_DESC(max_hops,
77 "maximum " CAN_GW_NAME " routing hops for CAN frames "
78 "(valid values: " __stringify(CGW_MIN_HOPS) "-"
79 __stringify(CGW_MAX_HOPS) " hops, "
80 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
68 81
69static HLIST_HEAD(cgw_list); 82static HLIST_HEAD(cgw_list);
70static struct notifier_block notifier; 83static struct notifier_block notifier;
@@ -118,6 +131,7 @@ struct cgw_job {
118 struct rcu_head rcu; 131 struct rcu_head rcu;
119 u32 handled_frames; 132 u32 handled_frames;
120 u32 dropped_frames; 133 u32 dropped_frames;
134 u32 deleted_frames;
121 struct cf_mod mod; 135 struct cf_mod mod;
122 union { 136 union {
123 /* CAN frame data source */ 137 /* CAN frame data source */
@@ -338,15 +352,38 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
338 struct sk_buff *nskb; 352 struct sk_buff *nskb;
339 int modidx = 0; 353 int modidx = 0;
340 354
341 /* do not handle already routed frames - see comment below */ 355 /*
342 if (skb_mac_header_was_set(skb)) 356 * Do not handle CAN frames routed more than 'max_hops' times.
357 * In general we should never catch this delimiter which is intended
358 * to cover a misconfiguration protection (e.g. circular CAN routes).
359 *
360 * The Controller Area Network controllers only accept CAN frames with
361 * correct CRCs - which are not visible in the controller registers.
362 * According to skbuff.h documentation the csum_start element for IP
363 * checksums is undefined/unsued when ip_summed == CHECKSUM_UNNECESSARY.
364 * Only CAN skbs can be processed here which already have this property.
365 */
366
367#define cgw_hops(skb) ((skb)->csum_start)
368
369 BUG_ON(skb->ip_summed != CHECKSUM_UNNECESSARY);
370
371 if (cgw_hops(skb) >= max_hops) {
372 /* indicate deleted frames due to misconfiguration */
373 gwj->deleted_frames++;
343 return; 374 return;
375 }
344 376
345 if (!(gwj->dst.dev->flags & IFF_UP)) { 377 if (!(gwj->dst.dev->flags & IFF_UP)) {
346 gwj->dropped_frames++; 378 gwj->dropped_frames++;
347 return; 379 return;
348 } 380 }
349 381
382 /* is sending the skb back to the incoming interface not allowed? */
383 if (!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK) &&
384 can_skb_prv(skb)->ifindex == gwj->dst.dev->ifindex)
385 return;
386
350 /* 387 /*
351 * clone the given skb, which has not been done in can_rcv() 388 * clone the given skb, which has not been done in can_rcv()
352 * 389 *
@@ -363,15 +400,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
363 return; 400 return;
364 } 401 }
365 402
366 /* 403 /* put the incremented hop counter in the cloned skb */
367 * Mark routed frames by setting some mac header length which is 404 cgw_hops(nskb) = cgw_hops(skb) + 1;
368 * not relevant for the CAN frames located in the skb->data section.
369 *
370 * As dev->header_ops is not set in CAN netdevices no one is ever
371 * accessing the various header offsets in the CAN skbuffs anyway.
372 * E.g. using the packet socket to read CAN frames is still working.
373 */
374 skb_set_mac_header(nskb, 8);
375 nskb->dev = gwj->dst.dev; 405 nskb->dev = gwj->dst.dev;
376 406
377 /* pointer to modifiable CAN frame */ 407 /* pointer to modifiable CAN frame */
@@ -472,6 +502,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
472 goto cancel; 502 goto cancel;
473 } 503 }
474 504
505 if (gwj->deleted_frames) {
506 if (nla_put_u32(skb, CGW_DELETED, gwj->deleted_frames) < 0)
507 goto cancel;
508 }
509
475 /* check non default settings of attributes */ 510 /* check non default settings of attributes */
476 511
477 if (gwj->mod.modtype.and) { 512 if (gwj->mod.modtype.and) {
@@ -771,6 +806,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
771 806
772 gwj->handled_frames = 0; 807 gwj->handled_frames = 0;
773 gwj->dropped_frames = 0; 808 gwj->dropped_frames = 0;
809 gwj->deleted_frames = 0;
774 gwj->flags = r->flags; 810 gwj->flags = r->flags;
775 gwj->gwtype = r->gwtype; 811 gwj->gwtype = r->gwtype;
776 812
@@ -895,7 +931,11 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
895 931
896static __init int cgw_module_init(void) 932static __init int cgw_module_init(void)
897{ 933{
898 printk(banner); 934 /* sanitize given module parameter */
935 max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
936
937 pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
938 max_hops);
899 939
900 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job), 940 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
901 0, 0, NULL); 941 0, 0, NULL);
diff --git a/net/can/proc.c b/net/can/proc.c
index ae566902d2bf..497335892146 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -531,5 +531,5 @@ void can_remove_proc(void)
531 can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF); 531 can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF);
532 532
533 if (can_dir) 533 if (can_dir)
534 proc_net_remove(&init_net, "can"); 534 remove_proc_entry("can", init_net.proc_net);
535} 535}
diff --git a/net/can/raw.c b/net/can/raw.c
index 5b0e3e330d97..c1764e41ddaf 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -50,6 +50,7 @@
50#include <linux/skbuff.h> 50#include <linux/skbuff.h>
51#include <linux/can.h> 51#include <linux/can.h>
52#include <linux/can/core.h> 52#include <linux/can/core.h>
53#include <linux/can/skb.h>
53#include <linux/can/raw.h> 54#include <linux/can/raw.h>
54#include <net/sock.h> 55#include <net/sock.h>
55#include <net/net_namespace.h> 56#include <net/net_namespace.h>
@@ -699,11 +700,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
699 if (!dev) 700 if (!dev)
700 return -ENXIO; 701 return -ENXIO;
701 702
702 skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, 703 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
703 &err); 704 msg->msg_flags & MSG_DONTWAIT, &err);
704 if (!skb) 705 if (!skb)
705 goto put_dev; 706 goto put_dev;
706 707
708 can_skb_reserve(skb);
709 can_skb_prv(skb)->ifindex = dev->ifindex;
710
707 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 711 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
708 if (err < 0) 712 if (err < 0)
709 goto free_skb; 713 goto free_skb;
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index cc04dd667a10..e50cc69ae8ca 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -1,6 +1,6 @@
1config CEPH_LIB 1config CEPH_LIB
2 tristate "Ceph core library (EXPERIMENTAL)" 2 tristate "Ceph core library"
3 depends on INET && EXPERIMENTAL 3 depends on INET
4 select LIBCRC32C 4 select LIBCRC32C
5 select CRYPTO_AES 5 select CRYPTO_AES
6 select CRYPTO 6 select CRYPTO
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a8020293f342..ee71ea26777a 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name,
305 305
306 /* start with defaults */ 306 /* start with defaults */
307 opt->flags = CEPH_OPT_DEFAULT; 307 opt->flags = CEPH_OPT_DEFAULT;
308 opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
309 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; 308 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
310 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ 309 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
311 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ 310 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
@@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name,
391 390
392 /* misc */ 391 /* misc */
393 case Opt_osdtimeout: 392 case Opt_osdtimeout:
394 opt->osd_timeout = intval; 393 pr_warning("ignoring deprecated osdtimeout option\n");
395 break; 394 break;
396 case Opt_osdkeepalivetimeout: 395 case Opt_osdkeepalivetimeout:
397 opt->osd_keepalive_timeout = intval; 396 opt->osd_keepalive_timeout = intval;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3ef1759403b4..5ccf87ed8d68 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -506,6 +506,7 @@ static void reset_connection(struct ceph_connection *con)
506{ 506{
507 /* reset connection, out_queue, msg_ and connect_seq */ 507 /* reset connection, out_queue, msg_ and connect_seq */
508 /* discard existing out_queue and msg_seq */ 508 /* discard existing out_queue and msg_seq */
509 dout("reset_connection %p\n", con);
509 ceph_msg_remove_list(&con->out_queue); 510 ceph_msg_remove_list(&con->out_queue);
510 ceph_msg_remove_list(&con->out_sent); 511 ceph_msg_remove_list(&con->out_sent);
511 512
@@ -561,7 +562,7 @@ void ceph_con_open(struct ceph_connection *con,
561 mutex_lock(&con->mutex); 562 mutex_lock(&con->mutex);
562 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 563 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
563 564
564 BUG_ON(con->state != CON_STATE_CLOSED); 565 WARN_ON(con->state != CON_STATE_CLOSED);
565 con->state = CON_STATE_PREOPEN; 566 con->state = CON_STATE_PREOPEN;
566 567
567 con->peer_name.type = (__u8) entity_type; 568 con->peer_name.type = (__u8) entity_type;
@@ -1506,13 +1507,6 @@ static int process_banner(struct ceph_connection *con)
1506 return 0; 1507 return 0;
1507} 1508}
1508 1509
1509static void fail_protocol(struct ceph_connection *con)
1510{
1511 reset_connection(con);
1512 BUG_ON(con->state != CON_STATE_NEGOTIATING);
1513 con->state = CON_STATE_CLOSED;
1514}
1515
1516static int process_connect(struct ceph_connection *con) 1510static int process_connect(struct ceph_connection *con)
1517{ 1511{
1518 u64 sup_feat = con->msgr->supported_features; 1512 u64 sup_feat = con->msgr->supported_features;
@@ -1530,7 +1524,7 @@ static int process_connect(struct ceph_connection *con)
1530 ceph_pr_addr(&con->peer_addr.in_addr), 1524 ceph_pr_addr(&con->peer_addr.in_addr),
1531 sup_feat, server_feat, server_feat & ~sup_feat); 1525 sup_feat, server_feat, server_feat & ~sup_feat);
1532 con->error_msg = "missing required protocol features"; 1526 con->error_msg = "missing required protocol features";
1533 fail_protocol(con); 1527 reset_connection(con);
1534 return -1; 1528 return -1;
1535 1529
1536 case CEPH_MSGR_TAG_BADPROTOVER: 1530 case CEPH_MSGR_TAG_BADPROTOVER:
@@ -1541,7 +1535,7 @@ static int process_connect(struct ceph_connection *con)
1541 le32_to_cpu(con->out_connect.protocol_version), 1535 le32_to_cpu(con->out_connect.protocol_version),
1542 le32_to_cpu(con->in_reply.protocol_version)); 1536 le32_to_cpu(con->in_reply.protocol_version));
1543 con->error_msg = "protocol version mismatch"; 1537 con->error_msg = "protocol version mismatch";
1544 fail_protocol(con); 1538 reset_connection(con);
1545 return -1; 1539 return -1;
1546 1540
1547 case CEPH_MSGR_TAG_BADAUTHORIZER: 1541 case CEPH_MSGR_TAG_BADAUTHORIZER:
@@ -1631,11 +1625,11 @@ static int process_connect(struct ceph_connection *con)
1631 ceph_pr_addr(&con->peer_addr.in_addr), 1625 ceph_pr_addr(&con->peer_addr.in_addr),
1632 req_feat, server_feat, req_feat & ~server_feat); 1626 req_feat, server_feat, req_feat & ~server_feat);
1633 con->error_msg = "missing required protocol features"; 1627 con->error_msg = "missing required protocol features";
1634 fail_protocol(con); 1628 reset_connection(con);
1635 return -1; 1629 return -1;
1636 } 1630 }
1637 1631
1638 BUG_ON(con->state != CON_STATE_NEGOTIATING); 1632 WARN_ON(con->state != CON_STATE_NEGOTIATING);
1639 con->state = CON_STATE_OPEN; 1633 con->state = CON_STATE_OPEN;
1640 1634
1641 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 1635 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
@@ -2132,7 +2126,6 @@ more:
2132 if (ret < 0) 2126 if (ret < 0)
2133 goto out; 2127 goto out;
2134 2128
2135 BUG_ON(con->state != CON_STATE_CONNECTING);
2136 con->state = CON_STATE_NEGOTIATING; 2129 con->state = CON_STATE_NEGOTIATING;
2137 2130
2138 /* 2131 /*
@@ -2160,7 +2153,7 @@ more:
2160 goto more; 2153 goto more;
2161 } 2154 }
2162 2155
2163 BUG_ON(con->state != CON_STATE_OPEN); 2156 WARN_ON(con->state != CON_STATE_OPEN);
2164 2157
2165 if (con->in_base_pos < 0) { 2158 if (con->in_base_pos < 0) {
2166 /* 2159 /*
@@ -2244,22 +2237,62 @@ bad_tag:
2244 2237
2245 2238
2246/* 2239/*
2247 * Atomically queue work on a connection. Bump @con reference to 2240 * Atomically queue work on a connection after the specified delay.
2248 * avoid races with connection teardown. 2241 * Bump @con reference to avoid races with connection teardown.
2242 * Returns 0 if work was queued, or an error code otherwise.
2249 */ 2243 */
2250static void queue_con(struct ceph_connection *con) 2244static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2251{ 2245{
2252 if (!con->ops->get(con)) { 2246 if (!con->ops->get(con)) {
2253 dout("queue_con %p ref count 0\n", con); 2247 dout("%s %p ref count 0\n", __func__, con);
2254 return; 2248
2249 return -ENOENT;
2255 } 2250 }
2256 2251
2257 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { 2252 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2258 dout("queue_con %p - already queued\n", con); 2253 dout("%s %p - already queued\n", __func__, con);
2259 con->ops->put(con); 2254 con->ops->put(con);
2260 } else { 2255
2261 dout("queue_con %p\n", con); 2256 return -EBUSY;
2257 }
2258
2259 dout("%s %p %lu\n", __func__, con, delay);
2260
2261 return 0;
2262}
2263
2264static void queue_con(struct ceph_connection *con)
2265{
2266 (void) queue_con_delay(con, 0);
2267}
2268
2269static bool con_sock_closed(struct ceph_connection *con)
2270{
2271 if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
2272 return false;
2273
2274#define CASE(x) \
2275 case CON_STATE_ ## x: \
2276 con->error_msg = "socket closed (con state " #x ")"; \
2277 break;
2278
2279 switch (con->state) {
2280 CASE(CLOSED);
2281 CASE(PREOPEN);
2282 CASE(CONNECTING);
2283 CASE(NEGOTIATING);
2284 CASE(OPEN);
2285 CASE(STANDBY);
2286 default:
2287 pr_warning("%s con %p unrecognized state %lu\n",
2288 __func__, con, con->state);
2289 con->error_msg = "unrecognized con state";
2290 BUG();
2291 break;
2262 } 2292 }
2293#undef CASE
2294
2295 return true;
2263} 2296}
2264 2297
2265/* 2298/*
@@ -2273,35 +2306,16 @@ static void con_work(struct work_struct *work)
2273 2306
2274 mutex_lock(&con->mutex); 2307 mutex_lock(&con->mutex);
2275restart: 2308restart:
2276 if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) { 2309 if (con_sock_closed(con))
2277 switch (con->state) {
2278 case CON_STATE_CONNECTING:
2279 con->error_msg = "connection failed";
2280 break;
2281 case CON_STATE_NEGOTIATING:
2282 con->error_msg = "negotiation failed";
2283 break;
2284 case CON_STATE_OPEN:
2285 con->error_msg = "socket closed";
2286 break;
2287 default:
2288 dout("unrecognized con state %d\n", (int)con->state);
2289 con->error_msg = "unrecognized con state";
2290 BUG();
2291 }
2292 goto fault; 2310 goto fault;
2293 }
2294 2311
2295 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) { 2312 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
2296 dout("con_work %p backing off\n", con); 2313 dout("con_work %p backing off\n", con);
2297 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2314 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2298 round_jiffies_relative(con->delay))) { 2315 if (ret) {
2299 dout("con_work %p backoff %lu\n", con, con->delay);
2300 mutex_unlock(&con->mutex);
2301 return;
2302 } else {
2303 dout("con_work %p FAILED to back off %lu\n", con, 2316 dout("con_work %p FAILED to back off %lu\n", con,
2304 con->delay); 2317 con->delay);
2318 BUG_ON(ret == -ENOENT);
2305 set_bit(CON_FLAG_BACKOFF, &con->flags); 2319 set_bit(CON_FLAG_BACKOFF, &con->flags);
2306 } 2320 }
2307 goto done; 2321 goto done;
@@ -2356,12 +2370,12 @@ fault:
2356static void ceph_fault(struct ceph_connection *con) 2370static void ceph_fault(struct ceph_connection *con)
2357 __releases(con->mutex) 2371 __releases(con->mutex)
2358{ 2372{
2359 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2373 pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2360 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2374 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2361 dout("fault %p state %lu to peer %s\n", 2375 dout("fault %p state %lu to peer %s\n",
2362 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2376 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2363 2377
2364 BUG_ON(con->state != CON_STATE_CONNECTING && 2378 WARN_ON(con->state != CON_STATE_CONNECTING &&
2365 con->state != CON_STATE_NEGOTIATING && 2379 con->state != CON_STATE_NEGOTIATING &&
2366 con->state != CON_STATE_OPEN); 2380 con->state != CON_STATE_OPEN);
2367 2381
@@ -2398,24 +2412,8 @@ static void ceph_fault(struct ceph_connection *con)
2398 con->delay = BASE_DELAY_INTERVAL; 2412 con->delay = BASE_DELAY_INTERVAL;
2399 else if (con->delay < MAX_DELAY_INTERVAL) 2413 else if (con->delay < MAX_DELAY_INTERVAL)
2400 con->delay *= 2; 2414 con->delay *= 2;
2401 con->ops->get(con); 2415 set_bit(CON_FLAG_BACKOFF, &con->flags);
2402 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2416 queue_con(con);
2403 round_jiffies_relative(con->delay))) {
2404 dout("fault queued %p delay %lu\n", con, con->delay);
2405 } else {
2406 con->ops->put(con);
2407 dout("fault failed to queue %p delay %lu, backoff\n",
2408 con, con->delay);
2409 /*
2410 * In many cases we see a socket state change
2411 * while con_work is running and end up
2412 * queuing (non-delayed) work, such that we
2413 * can't backoff with a delay. Set a flag so
2414 * that when con_work restarts we schedule the
2415 * delay then.
2416 */
2417 set_bit(CON_FLAG_BACKOFF, &con->flags);
2418 }
2419 } 2417 }
2420 2418
2421out_unlock: 2419out_unlock:
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index c1d756cc7448..eb9a44478764 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -221,6 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
221 kref_init(&req->r_kref); 221 kref_init(&req->r_kref);
222 init_completion(&req->r_completion); 222 init_completion(&req->r_completion);
223 init_completion(&req->r_safe_completion); 223 init_completion(&req->r_safe_completion);
224 RB_CLEAR_NODE(&req->r_node);
224 INIT_LIST_HEAD(&req->r_unsafe_item); 225 INIT_LIST_HEAD(&req->r_unsafe_item);
225 INIT_LIST_HEAD(&req->r_linger_item); 226 INIT_LIST_HEAD(&req->r_linger_item);
226 INIT_LIST_HEAD(&req->r_linger_osd); 227 INIT_LIST_HEAD(&req->r_linger_osd);
@@ -580,7 +581,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
580 581
581 dout("__kick_osd_requests osd%d\n", osd->o_osd); 582 dout("__kick_osd_requests osd%d\n", osd->o_osd);
582 err = __reset_osd(osdc, osd); 583 err = __reset_osd(osdc, osd);
583 if (err == -EAGAIN) 584 if (err)
584 return; 585 return;
585 586
586 list_for_each_entry(req, &osd->o_requests, r_osd_item) { 587 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
@@ -607,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
607 } 608 }
608} 609}
609 610
610static void kick_osd_requests(struct ceph_osd_client *osdc,
611 struct ceph_osd *kickosd)
612{
613 mutex_lock(&osdc->request_mutex);
614 __kick_osd_requests(osdc, kickosd);
615 mutex_unlock(&osdc->request_mutex);
616}
617
618/* 611/*
619 * If the osd connection drops, we need to resubmit all requests. 612 * If the osd connection drops, we need to resubmit all requests.
620 */ 613 */
@@ -628,7 +621,9 @@ static void osd_reset(struct ceph_connection *con)
628 dout("osd_reset osd%d\n", osd->o_osd); 621 dout("osd_reset osd%d\n", osd->o_osd);
629 osdc = osd->o_osdc; 622 osdc = osd->o_osdc;
630 down_read(&osdc->map_sem); 623 down_read(&osdc->map_sem);
631 kick_osd_requests(osdc, osd); 624 mutex_lock(&osdc->request_mutex);
625 __kick_osd_requests(osdc, osd);
626 mutex_unlock(&osdc->request_mutex);
632 send_queued(osdc); 627 send_queued(osdc);
633 up_read(&osdc->map_sem); 628 up_read(&osdc->map_sem);
634} 629}
@@ -647,6 +642,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
647 atomic_set(&osd->o_ref, 1); 642 atomic_set(&osd->o_ref, 1);
648 osd->o_osdc = osdc; 643 osd->o_osdc = osdc;
649 osd->o_osd = onum; 644 osd->o_osd = onum;
645 RB_CLEAR_NODE(&osd->o_node);
650 INIT_LIST_HEAD(&osd->o_requests); 646 INIT_LIST_HEAD(&osd->o_requests);
651 INIT_LIST_HEAD(&osd->o_linger_requests); 647 INIT_LIST_HEAD(&osd->o_linger_requests);
652 INIT_LIST_HEAD(&osd->o_osd_lru); 648 INIT_LIST_HEAD(&osd->o_osd_lru);
@@ -750,6 +746,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
750 if (list_empty(&osd->o_requests) && 746 if (list_empty(&osd->o_requests) &&
751 list_empty(&osd->o_linger_requests)) { 747 list_empty(&osd->o_linger_requests)) {
752 __remove_osd(osdc, osd); 748 __remove_osd(osdc, osd);
749 ret = -ENODEV;
753 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], 750 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
754 &osd->o_con.peer_addr, 751 &osd->o_con.peer_addr,
755 sizeof(osd->o_con.peer_addr)) == 0 && 752 sizeof(osd->o_con.peer_addr)) == 0 &&
@@ -876,9 +873,9 @@ static void __unregister_request(struct ceph_osd_client *osdc,
876 req->r_osd = NULL; 873 req->r_osd = NULL;
877 } 874 }
878 875
876 list_del_init(&req->r_req_lru_item);
879 ceph_osdc_put_request(req); 877 ceph_osdc_put_request(req);
880 878
881 list_del_init(&req->r_req_lru_item);
882 if (osdc->num_requests == 0) { 879 if (osdc->num_requests == 0) {
883 dout(" no requests, canceling timeout\n"); 880 dout(" no requests, canceling timeout\n");
884 __cancel_osd_timeout(osdc); 881 __cancel_osd_timeout(osdc);
@@ -910,8 +907,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
910 struct ceph_osd_request *req) 907 struct ceph_osd_request *req)
911{ 908{
912 dout("__unregister_linger_request %p\n", req); 909 dout("__unregister_linger_request %p\n", req);
910 list_del_init(&req->r_linger_item);
913 if (req->r_osd) { 911 if (req->r_osd) {
914 list_del_init(&req->r_linger_item);
915 list_del_init(&req->r_linger_osd); 912 list_del_init(&req->r_linger_osd);
916 913
917 if (list_empty(&req->r_osd->o_requests) && 914 if (list_empty(&req->r_osd->o_requests) &&
@@ -1090,12 +1087,10 @@ static void handle_timeout(struct work_struct *work)
1090{ 1087{
1091 struct ceph_osd_client *osdc = 1088 struct ceph_osd_client *osdc =
1092 container_of(work, struct ceph_osd_client, timeout_work.work); 1089 container_of(work, struct ceph_osd_client, timeout_work.work);
1093 struct ceph_osd_request *req, *last_req = NULL; 1090 struct ceph_osd_request *req;
1094 struct ceph_osd *osd; 1091 struct ceph_osd *osd;
1095 unsigned long timeout = osdc->client->options->osd_timeout * HZ;
1096 unsigned long keepalive = 1092 unsigned long keepalive =
1097 osdc->client->options->osd_keepalive_timeout * HZ; 1093 osdc->client->options->osd_keepalive_timeout * HZ;
1098 unsigned long last_stamp = 0;
1099 struct list_head slow_osds; 1094 struct list_head slow_osds;
1100 dout("timeout\n"); 1095 dout("timeout\n");
1101 down_read(&osdc->map_sem); 1096 down_read(&osdc->map_sem);
@@ -1105,37 +1100,6 @@ static void handle_timeout(struct work_struct *work)
1105 mutex_lock(&osdc->request_mutex); 1100 mutex_lock(&osdc->request_mutex);
1106 1101
1107 /* 1102 /*
1108 * reset osds that appear to be _really_ unresponsive. this
1109 * is a failsafe measure.. we really shouldn't be getting to
1110 * this point if the system is working properly. the monitors
1111 * should mark the osd as failed and we should find out about
1112 * it from an updated osd map.
1113 */
1114 while (timeout && !list_empty(&osdc->req_lru)) {
1115 req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
1116 r_req_lru_item);
1117
1118 /* hasn't been long enough since we sent it? */
1119 if (time_before(jiffies, req->r_stamp + timeout))
1120 break;
1121
1122 /* hasn't been long enough since it was acked? */
1123 if (req->r_request->ack_stamp == 0 ||
1124 time_before(jiffies, req->r_request->ack_stamp + timeout))
1125 break;
1126
1127 BUG_ON(req == last_req && req->r_stamp == last_stamp);
1128 last_req = req;
1129 last_stamp = req->r_stamp;
1130
1131 osd = req->r_osd;
1132 BUG_ON(!osd);
1133 pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
1134 req->r_tid, osd->o_osd);
1135 __kick_osd_requests(osdc, osd);
1136 }
1137
1138 /*
1139 * ping osds that are a bit slow. this ensures that if there 1103 * ping osds that are a bit slow. this ensures that if there
1140 * is a break in the TCP connection we will notice, and reopen 1104 * is a break in the TCP connection we will notice, and reopen
1141 * a connection with that osd (from the fault callback). 1105 * a connection with that osd (from the fault callback).
@@ -1306,7 +1270,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
1306 * Requeue requests whose mapping to an OSD has changed. If requests map to 1270 * Requeue requests whose mapping to an OSD has changed. If requests map to
1307 * no osd, request a new map. 1271 * no osd, request a new map.
1308 * 1272 *
1309 * Caller should hold map_sem for read and request_mutex. 1273 * Caller should hold map_sem for read.
1310 */ 1274 */
1311static void kick_requests(struct ceph_osd_client *osdc, int force_resend) 1275static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1312{ 1276{
@@ -1320,6 +1284,24 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1320 for (p = rb_first(&osdc->requests); p; ) { 1284 for (p = rb_first(&osdc->requests); p; ) {
1321 req = rb_entry(p, struct ceph_osd_request, r_node); 1285 req = rb_entry(p, struct ceph_osd_request, r_node);
1322 p = rb_next(p); 1286 p = rb_next(p);
1287
1288 /*
1289 * For linger requests that have not yet been
1290 * registered, move them to the linger list; they'll
1291 * be sent to the osd in the loop below. Unregister
1292 * the request before re-registering it as a linger
1293 * request to ensure the __map_request() below
1294 * will decide it needs to be sent.
1295 */
1296 if (req->r_linger && list_empty(&req->r_linger_item)) {
1297 dout("%p tid %llu restart on osd%d\n",
1298 req, req->r_tid,
1299 req->r_osd ? req->r_osd->o_osd : -1);
1300 __unregister_request(osdc, req);
1301 __register_linger_request(osdc, req);
1302 continue;
1303 }
1304
1323 err = __map_request(osdc, req, force_resend); 1305 err = __map_request(osdc, req, force_resend);
1324 if (err < 0) 1306 if (err < 0)
1325 continue; /* error */ 1307 continue; /* error */
@@ -1334,17 +1316,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1334 req->r_flags |= CEPH_OSD_FLAG_RETRY; 1316 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1335 } 1317 }
1336 } 1318 }
1337 if (req->r_linger && list_empty(&req->r_linger_item)) {
1338 /*
1339 * register as a linger so that we will
1340 * re-submit below and get a new tid
1341 */
1342 dout("%p tid %llu restart on osd%d\n",
1343 req, req->r_tid,
1344 req->r_osd ? req->r_osd->o_osd : -1);
1345 __register_linger_request(osdc, req);
1346 __unregister_request(osdc, req);
1347 }
1348 } 1319 }
1349 1320
1350 list_for_each_entry_safe(req, nreq, &osdc->req_linger, 1321 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
@@ -1352,6 +1323,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1352 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); 1323 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1353 1324
1354 err = __map_request(osdc, req, force_resend); 1325 err = __map_request(osdc, req, force_resend);
1326 dout("__map_request returned %d\n", err);
1355 if (err == 0) 1327 if (err == 0)
1356 continue; /* no change and no osd was specified */ 1328 continue; /* no change and no osd was specified */
1357 if (err < 0) 1329 if (err < 0)
@@ -1364,8 +1336,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1364 1336
1365 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, 1337 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1366 req->r_osd ? req->r_osd->o_osd : -1); 1338 req->r_osd ? req->r_osd->o_osd : -1);
1367 __unregister_linger_request(osdc, req);
1368 __register_request(osdc, req); 1339 __register_request(osdc, req);
1340 __unregister_linger_request(osdc, req);
1369 } 1341 }
1370 mutex_unlock(&osdc->request_mutex); 1342 mutex_unlock(&osdc->request_mutex);
1371 1343
@@ -1373,6 +1345,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1373 dout("%d requests for down osds, need new map\n", needmap); 1345 dout("%d requests for down osds, need new map\n", needmap);
1374 ceph_monc_request_next_osdmap(&osdc->client->monc); 1346 ceph_monc_request_next_osdmap(&osdc->client->monc);
1375 } 1347 }
1348 reset_changed_osds(osdc);
1376} 1349}
1377 1350
1378 1351
@@ -1429,7 +1402,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1429 osdc->osdmap = newmap; 1402 osdc->osdmap = newmap;
1430 } 1403 }
1431 kick_requests(osdc, 0); 1404 kick_requests(osdc, 0);
1432 reset_changed_osds(osdc);
1433 } else { 1405 } else {
1434 dout("ignoring incremental map %u len %d\n", 1406 dout("ignoring incremental map %u len %d\n",
1435 epoch, maplen); 1407 epoch, maplen);
@@ -1599,6 +1571,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1599 event->data = data; 1571 event->data = data;
1600 event->osdc = osdc; 1572 event->osdc = osdc;
1601 INIT_LIST_HEAD(&event->osd_node); 1573 INIT_LIST_HEAD(&event->osd_node);
1574 RB_CLEAR_NODE(&event->node);
1602 kref_init(&event->kref); /* one ref for us */ 1575 kref_init(&event->kref); /* one ref for us */
1603 kref_get(&event->kref); /* one ref for the caller */ 1576 kref_get(&event->kref); /* one ref for the caller */
1604 init_completion(&event->completion); 1577 init_completion(&event->completion);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 5433fb0eb3c6..de73214b5d26 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -469,6 +469,22 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
469 return NULL; 469 return NULL;
470} 470}
471 471
472const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
473{
474 struct ceph_pg_pool_info *pi;
475
476 if (id == CEPH_NOPOOL)
477 return NULL;
478
479 if (WARN_ON_ONCE(id > (u64) INT_MAX))
480 return NULL;
481
482 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
483
484 return pi ? pi->name : NULL;
485}
486EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
487
472int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 488int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
473{ 489{
474 struct rb_node *rbp; 490 struct rb_node *rbp;
@@ -645,10 +661,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
645 ceph_decode_32_safe(p, end, max, bad); 661 ceph_decode_32_safe(p, end, max, bad);
646 while (max--) { 662 while (max--) {
647 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); 663 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
664 err = -ENOMEM;
648 pi = kzalloc(sizeof(*pi), GFP_NOFS); 665 pi = kzalloc(sizeof(*pi), GFP_NOFS);
649 if (!pi) 666 if (!pi)
650 goto bad; 667 goto bad;
651 pi->id = ceph_decode_32(p); 668 pi->id = ceph_decode_32(p);
669 err = -EINVAL;
652 ev = ceph_decode_8(p); /* encoding version */ 670 ev = ceph_decode_8(p); /* encoding version */
653 if (ev > CEPH_PG_POOL_VERSION) { 671 if (ev > CEPH_PG_POOL_VERSION) {
654 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 672 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
@@ -664,8 +682,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
664 __insert_pg_pool(&map->pg_pools, pi); 682 __insert_pg_pool(&map->pg_pools, pi);
665 } 683 }
666 684
667 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 685 if (version >= 5) {
668 goto bad; 686 err = __decode_pool_names(p, end, map);
687 if (err < 0) {
688 dout("fail to decode pool names");
689 goto bad;
690 }
691 }
669 692
670 ceph_decode_32_safe(p, end, map->pool_max, bad); 693 ceph_decode_32_safe(p, end, map->pool_max, bad);
671 694
@@ -745,7 +768,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
745 return map; 768 return map;
746 769
747bad: 770bad:
748 dout("osdmap_decode fail\n"); 771 dout("osdmap_decode fail err %d\n", err);
749 ceph_osdmap_destroy(map); 772 ceph_osdmap_destroy(map);
750 return ERR_PTR(err); 773 return ERR_PTR(err);
751} 774}
@@ -839,6 +862,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
839 if (ev > CEPH_PG_POOL_VERSION) { 862 if (ev > CEPH_PG_POOL_VERSION) {
840 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 863 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
841 ev, CEPH_PG_POOL_VERSION); 864 ev, CEPH_PG_POOL_VERSION);
865 err = -EINVAL;
842 goto bad; 866 goto bad;
843 } 867 }
844 pi = __lookup_pg_pool(&map->pg_pools, pool); 868 pi = __lookup_pg_pool(&map->pg_pools, pool);
@@ -855,8 +879,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
855 if (err < 0) 879 if (err < 0)
856 goto bad; 880 goto bad;
857 } 881 }
858 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 882 if (version >= 5) {
859 goto bad; 883 err = __decode_pool_names(p, end, map);
884 if (err < 0)
885 goto bad;
886 }
860 887
861 /* old_pool */ 888 /* old_pool */
862 ceph_decode_32_safe(p, end, len, bad); 889 ceph_decode_32_safe(p, end, len, bad);
@@ -932,15 +959,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
932 (void) __remove_pg_mapping(&map->pg_temp, pgid); 959 (void) __remove_pg_mapping(&map->pg_temp, pgid);
933 960
934 /* insert */ 961 /* insert */
935 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) { 962 err = -EINVAL;
936 err = -EINVAL; 963 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
937 goto bad; 964 goto bad;
938 } 965 err = -ENOMEM;
939 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); 966 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
940 if (!pg) { 967 if (!pg)
941 err = -ENOMEM;
942 goto bad; 968 goto bad;
943 }
944 pg->pgid = pgid; 969 pg->pgid = pgid;
945 pg->len = pglen; 970 pg->len = pglen;
946 for (j = 0; j < pglen; j++) 971 for (j = 0; j < pglen; j++)
diff --git a/net/core/Makefile b/net/core/Makefile
index 674641b13aea..b33b996f5dd6 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -9,10 +9,11 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ 10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
12 sock_diag.o 12 sock_diag.o dev_ioctl.o
13 13
14obj-$(CONFIG_XFRM) += flow.o 14obj-$(CONFIG_XFRM) += flow.o
15obj-y += net-sysfs.o 15obj-y += net-sysfs.o
16obj-$(CONFIG_PROC_FS) += net-procfs.o
16obj-$(CONFIG_NET_PKTGEN) += pktgen.o 17obj-$(CONFIG_NET_PKTGEN) += pktgen.o
17obj-$(CONFIG_NETPOLL) += netpoll.o 18obj-$(CONFIG_NETPOLL) += netpoll.o
18obj-$(CONFIG_NET_DMA) += user_dma.o 19obj-$(CONFIG_NET_DMA) += user_dma.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 0337e2b76862..368f9c3f9dc6 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
187 skb_queue_walk(queue, skb) { 187 skb_queue_walk(queue, skb) {
188 *peeked = skb->peeked; 188 *peeked = skb->peeked;
189 if (flags & MSG_PEEK) { 189 if (flags & MSG_PEEK) {
190 if (*off >= skb->len) { 190 if (*off >= skb->len && skb->len) {
191 *off -= skb->len; 191 *off -= skb->len;
192 continue; 192 continue;
193 } 193 }
diff --git a/net/core/dev.c b/net/core/dev.c
index d0cbc93fcf32..17bc535115d3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -97,8 +97,6 @@
97#include <net/net_namespace.h> 97#include <net/net_namespace.h>
98#include <net/sock.h> 98#include <net/sock.h>
99#include <linux/rtnetlink.h> 99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h> 100#include <linux/stat.h>
103#include <net/dst.h> 101#include <net/dst.h>
104#include <net/pkt_sched.h> 102#include <net/pkt_sched.h>
@@ -106,12 +104,10 @@
106#include <net/xfrm.h> 104#include <net/xfrm.h>
107#include <linux/highmem.h> 105#include <linux/highmem.h>
108#include <linux/init.h> 106#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h> 107#include <linux/module.h>
111#include <linux/netpoll.h> 108#include <linux/netpoll.h>
112#include <linux/rcupdate.h> 109#include <linux/rcupdate.h>
113#include <linux/delay.h> 110#include <linux/delay.h>
114#include <net/wext.h>
115#include <net/iw_handler.h> 111#include <net/iw_handler.h>
116#include <asm/current.h> 112#include <asm/current.h>
117#include <linux/audit.h> 113#include <linux/audit.h>
@@ -132,9 +128,7 @@
132#include <linux/pci.h> 128#include <linux/pci.h>
133#include <linux/inetdevice.h> 129#include <linux/inetdevice.h>
134#include <linux/cpu_rmap.h> 130#include <linux/cpu_rmap.h>
135#include <linux/net_tstamp.h>
136#include <linux/static_key.h> 131#include <linux/static_key.h>
137#include <net/flow_keys.h>
138 132
139#include "net-sysfs.h" 133#include "net-sysfs.h"
140 134
@@ -144,41 +138,10 @@
144/* This should be increased if a protocol with a bigger head is added. */ 138/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128) 139#define GRO_MAX_HEAD (MAX_HEADER + 128)
146 140
147/*
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
150 *
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
153 *
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
158 * the average user (w/out VLANs) will not be adversely affected.
159 * --BLG
160 *
161 * 0800 IP
162 * 8100 802.1Q VLAN
163 * 0001 802.3
164 * 0002 AX.25
165 * 0004 802.2
166 * 8035 RARP
167 * 0005 SNAP
168 * 0805 X.25
169 * 0806 ARP
170 * 8137 IPX
171 * 0009 Localtalk
172 * 86DD IPv6
173 */
174
175#define PTYPE_HASH_SIZE (16)
176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177
178static DEFINE_SPINLOCK(ptype_lock); 141static DEFINE_SPINLOCK(ptype_lock);
179static DEFINE_SPINLOCK(offload_lock); 142static DEFINE_SPINLOCK(offload_lock);
180static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 143struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
181static struct list_head ptype_all __read_mostly; /* Taps */ 144struct list_head ptype_all __read_mostly; /* Taps */
182static struct list_head offload_base __read_mostly; 145static struct list_head offload_base __read_mostly;
183 146
184/* 147/*
@@ -203,7 +166,7 @@ static struct list_head offload_base __read_mostly;
203DEFINE_RWLOCK(dev_base_lock); 166DEFINE_RWLOCK(dev_base_lock);
204EXPORT_SYMBOL(dev_base_lock); 167EXPORT_SYMBOL(dev_base_lock);
205 168
206DEFINE_SEQLOCK(devnet_rename_seq); 169seqcount_t devnet_rename_seq;
207 170
208static inline void dev_base_seq_inc(struct net *net) 171static inline void dev_base_seq_inc(struct net *net)
209{ 172{
@@ -1093,10 +1056,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
1093 if (dev->flags & IFF_UP) 1056 if (dev->flags & IFF_UP)
1094 return -EBUSY; 1057 return -EBUSY;
1095 1058
1096 write_seqlock(&devnet_rename_seq); 1059 write_seqcount_begin(&devnet_rename_seq);
1097 1060
1098 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1061 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1099 write_sequnlock(&devnet_rename_seq); 1062 write_seqcount_end(&devnet_rename_seq);
1100 return 0; 1063 return 0;
1101 } 1064 }
1102 1065
@@ -1104,7 +1067,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
1104 1067
1105 err = dev_get_valid_name(net, dev, newname); 1068 err = dev_get_valid_name(net, dev, newname);
1106 if (err < 0) { 1069 if (err < 0) {
1107 write_sequnlock(&devnet_rename_seq); 1070 write_seqcount_end(&devnet_rename_seq);
1108 return err; 1071 return err;
1109 } 1072 }
1110 1073
@@ -1112,11 +1075,11 @@ rollback:
1112 ret = device_rename(&dev->dev, dev->name); 1075 ret = device_rename(&dev->dev, dev->name);
1113 if (ret) { 1076 if (ret) {
1114 memcpy(dev->name, oldname, IFNAMSIZ); 1077 memcpy(dev->name, oldname, IFNAMSIZ);
1115 write_sequnlock(&devnet_rename_seq); 1078 write_seqcount_end(&devnet_rename_seq);
1116 return ret; 1079 return ret;
1117 } 1080 }
1118 1081
1119 write_sequnlock(&devnet_rename_seq); 1082 write_seqcount_end(&devnet_rename_seq);
1120 1083
1121 write_lock_bh(&dev_base_lock); 1084 write_lock_bh(&dev_base_lock);
1122 hlist_del_rcu(&dev->name_hlist); 1085 hlist_del_rcu(&dev->name_hlist);
@@ -1135,7 +1098,7 @@ rollback:
1135 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1098 /* err >= 0 after dev_alloc_name() or stores the first errno */
1136 if (err >= 0) { 1099 if (err >= 0) {
1137 err = ret; 1100 err = ret;
1138 write_seqlock(&devnet_rename_seq); 1101 write_seqcount_begin(&devnet_rename_seq);
1139 memcpy(dev->name, oldname, IFNAMSIZ); 1102 memcpy(dev->name, oldname, IFNAMSIZ);
1140 goto rollback; 1103 goto rollback;
1141 } else { 1104 } else {
@@ -1227,36 +1190,6 @@ void netdev_notify_peers(struct net_device *dev)
1227} 1190}
1228EXPORT_SYMBOL(netdev_notify_peers); 1191EXPORT_SYMBOL(netdev_notify_peers);
1229 1192
1230/**
1231 * dev_load - load a network module
1232 * @net: the applicable net namespace
1233 * @name: name of interface
1234 *
1235 * If a network interface is not present and the process has suitable
1236 * privileges this function loads the module. If module loading is not
1237 * available in this kernel then it becomes a nop.
1238 */
1239
1240void dev_load(struct net *net, const char *name)
1241{
1242 struct net_device *dev;
1243 int no_module;
1244
1245 rcu_read_lock();
1246 dev = dev_get_by_name_rcu(net, name);
1247 rcu_read_unlock();
1248
1249 no_module = !dev;
1250 if (no_module && capable(CAP_NET_ADMIN))
1251 no_module = request_module("netdev-%s", name);
1252 if (no_module && capable(CAP_SYS_MODULE)) {
1253 if (!request_module("%s", name))
1254 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1255 name);
1256 }
1257}
1258EXPORT_SYMBOL(dev_load);
1259
1260static int __dev_open(struct net_device *dev) 1193static int __dev_open(struct net_device *dev)
1261{ 1194{
1262 const struct net_device_ops *ops = dev->netdev_ops; 1195 const struct net_device_ops *ops = dev->netdev_ops;
@@ -1267,6 +1200,14 @@ static int __dev_open(struct net_device *dev)
1267 if (!netif_device_present(dev)) 1200 if (!netif_device_present(dev))
1268 return -ENODEV; 1201 return -ENODEV;
1269 1202
1203 /* Block netpoll from trying to do any rx path servicing.
1204 * If we don't do this there is a chance ndo_poll_controller
1205 * or ndo_poll may be running while we open the device
1206 */
1207 ret = netpoll_rx_disable(dev);
1208 if (ret)
1209 return ret;
1210
1270 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1211 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271 ret = notifier_to_errno(ret); 1212 ret = notifier_to_errno(ret);
1272 if (ret) 1213 if (ret)
@@ -1280,6 +1221,8 @@ static int __dev_open(struct net_device *dev)
1280 if (!ret && ops->ndo_open) 1221 if (!ret && ops->ndo_open)
1281 ret = ops->ndo_open(dev); 1222 ret = ops->ndo_open(dev);
1282 1223
1224 netpoll_rx_enable(dev);
1225
1283 if (ret) 1226 if (ret)
1284 clear_bit(__LINK_STATE_START, &dev->state); 1227 clear_bit(__LINK_STATE_START, &dev->state);
1285 else { 1228 else {
@@ -1371,9 +1314,16 @@ static int __dev_close(struct net_device *dev)
1371 int retval; 1314 int retval;
1372 LIST_HEAD(single); 1315 LIST_HEAD(single);
1373 1316
1317 /* Temporarily disable netpoll until the interface is down */
1318 retval = netpoll_rx_disable(dev);
1319 if (retval)
1320 return retval;
1321
1374 list_add(&dev->unreg_list, &single); 1322 list_add(&dev->unreg_list, &single);
1375 retval = __dev_close_many(&single); 1323 retval = __dev_close_many(&single);
1376 list_del(&single); 1324 list_del(&single);
1325
1326 netpoll_rx_enable(dev);
1377 return retval; 1327 return retval;
1378} 1328}
1379 1329
@@ -1409,14 +1359,22 @@ static int dev_close_many(struct list_head *head)
1409 */ 1359 */
1410int dev_close(struct net_device *dev) 1360int dev_close(struct net_device *dev)
1411{ 1361{
1362 int ret = 0;
1412 if (dev->flags & IFF_UP) { 1363 if (dev->flags & IFF_UP) {
1413 LIST_HEAD(single); 1364 LIST_HEAD(single);
1414 1365
1366 /* Block netpoll rx while the interface is going down */
1367 ret = netpoll_rx_disable(dev);
1368 if (ret)
1369 return ret;
1370
1415 list_add(&dev->unreg_list, &single); 1371 list_add(&dev->unreg_list, &single);
1416 dev_close_many(&single); 1372 dev_close_many(&single);
1417 list_del(&single); 1373 list_del(&single);
1374
1375 netpoll_rx_enable(dev);
1418 } 1376 }
1419 return 0; 1377 return ret;
1420} 1378}
1421EXPORT_SYMBOL(dev_close); 1379EXPORT_SYMBOL(dev_close);
1422 1380
@@ -1621,57 +1579,6 @@ static inline void net_timestamp_set(struct sk_buff *skb)
1621 __net_timestamp(SKB); \ 1579 __net_timestamp(SKB); \
1622 } \ 1580 } \
1623 1581
1624static int net_hwtstamp_validate(struct ifreq *ifr)
1625{
1626 struct hwtstamp_config cfg;
1627 enum hwtstamp_tx_types tx_type;
1628 enum hwtstamp_rx_filters rx_filter;
1629 int tx_type_valid = 0;
1630 int rx_filter_valid = 0;
1631
1632 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1633 return -EFAULT;
1634
1635 if (cfg.flags) /* reserved for future extensions */
1636 return -EINVAL;
1637
1638 tx_type = cfg.tx_type;
1639 rx_filter = cfg.rx_filter;
1640
1641 switch (tx_type) {
1642 case HWTSTAMP_TX_OFF:
1643 case HWTSTAMP_TX_ON:
1644 case HWTSTAMP_TX_ONESTEP_SYNC:
1645 tx_type_valid = 1;
1646 break;
1647 }
1648
1649 switch (rx_filter) {
1650 case HWTSTAMP_FILTER_NONE:
1651 case HWTSTAMP_FILTER_ALL:
1652 case HWTSTAMP_FILTER_SOME:
1653 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1654 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1655 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1657 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1658 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1659 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1660 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1662 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1663 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1664 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1665 rx_filter_valid = 1;
1666 break;
1667 }
1668
1669 if (!tx_type_valid || !rx_filter_valid)
1670 return -ERANGE;
1671
1672 return 0;
1673}
1674
1675static inline bool is_skb_forwardable(struct net_device *dev, 1582static inline bool is_skb_forwardable(struct net_device *dev,
1676 struct sk_buff *skb) 1583 struct sk_buff *skb)
1677{ 1584{
@@ -1857,6 +1764,228 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1857 } 1764 }
1858} 1765}
1859 1766
1767#ifdef CONFIG_XPS
1768static DEFINE_MUTEX(xps_map_mutex);
1769#define xmap_dereference(P) \
1770 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1771
1772static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1773 int cpu, u16 index)
1774{
1775 struct xps_map *map = NULL;
1776 int pos;
1777
1778 if (dev_maps)
1779 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1780
1781 for (pos = 0; map && pos < map->len; pos++) {
1782 if (map->queues[pos] == index) {
1783 if (map->len > 1) {
1784 map->queues[pos] = map->queues[--map->len];
1785 } else {
1786 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1787 kfree_rcu(map, rcu);
1788 map = NULL;
1789 }
1790 break;
1791 }
1792 }
1793
1794 return map;
1795}
1796
1797static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1798{
1799 struct xps_dev_maps *dev_maps;
1800 int cpu, i;
1801 bool active = false;
1802
1803 mutex_lock(&xps_map_mutex);
1804 dev_maps = xmap_dereference(dev->xps_maps);
1805
1806 if (!dev_maps)
1807 goto out_no_maps;
1808
1809 for_each_possible_cpu(cpu) {
1810 for (i = index; i < dev->num_tx_queues; i++) {
1811 if (!remove_xps_queue(dev_maps, cpu, i))
1812 break;
1813 }
1814 if (i == dev->num_tx_queues)
1815 active = true;
1816 }
1817
1818 if (!active) {
1819 RCU_INIT_POINTER(dev->xps_maps, NULL);
1820 kfree_rcu(dev_maps, rcu);
1821 }
1822
1823 for (i = index; i < dev->num_tx_queues; i++)
1824 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1825 NUMA_NO_NODE);
1826
1827out_no_maps:
1828 mutex_unlock(&xps_map_mutex);
1829}
1830
1831static struct xps_map *expand_xps_map(struct xps_map *map,
1832 int cpu, u16 index)
1833{
1834 struct xps_map *new_map;
1835 int alloc_len = XPS_MIN_MAP_ALLOC;
1836 int i, pos;
1837
1838 for (pos = 0; map && pos < map->len; pos++) {
1839 if (map->queues[pos] != index)
1840 continue;
1841 return map;
1842 }
1843
1844 /* Need to add queue to this CPU's existing map */
1845 if (map) {
1846 if (pos < map->alloc_len)
1847 return map;
1848
1849 alloc_len = map->alloc_len * 2;
1850 }
1851
1852 /* Need to allocate new map to store queue on this CPU's map */
1853 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1854 cpu_to_node(cpu));
1855 if (!new_map)
1856 return NULL;
1857
1858 for (i = 0; i < pos; i++)
1859 new_map->queues[i] = map->queues[i];
1860 new_map->alloc_len = alloc_len;
1861 new_map->len = pos;
1862
1863 return new_map;
1864}
1865
1866int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1867{
1868 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1869 struct xps_map *map, *new_map;
1870 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1871 int cpu, numa_node_id = -2;
1872 bool active = false;
1873
1874 mutex_lock(&xps_map_mutex);
1875
1876 dev_maps = xmap_dereference(dev->xps_maps);
1877
1878 /* allocate memory for queue storage */
1879 for_each_online_cpu(cpu) {
1880 if (!cpumask_test_cpu(cpu, mask))
1881 continue;
1882
1883 if (!new_dev_maps)
1884 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1885 if (!new_dev_maps)
1886 return -ENOMEM;
1887
1888 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1889 NULL;
1890
1891 map = expand_xps_map(map, cpu, index);
1892 if (!map)
1893 goto error;
1894
1895 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1896 }
1897
1898 if (!new_dev_maps)
1899 goto out_no_new_maps;
1900
1901 for_each_possible_cpu(cpu) {
1902 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1903 /* add queue to CPU maps */
1904 int pos = 0;
1905
1906 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1907 while ((pos < map->len) && (map->queues[pos] != index))
1908 pos++;
1909
1910 if (pos == map->len)
1911 map->queues[map->len++] = index;
1912#ifdef CONFIG_NUMA
1913 if (numa_node_id == -2)
1914 numa_node_id = cpu_to_node(cpu);
1915 else if (numa_node_id != cpu_to_node(cpu))
1916 numa_node_id = -1;
1917#endif
1918 } else if (dev_maps) {
1919 /* fill in the new device map from the old device map */
1920 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1921 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1922 }
1923
1924 }
1925
1926 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1927
1928 /* Cleanup old maps */
1929 if (dev_maps) {
1930 for_each_possible_cpu(cpu) {
1931 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1932 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1933 if (map && map != new_map)
1934 kfree_rcu(map, rcu);
1935 }
1936
1937 kfree_rcu(dev_maps, rcu);
1938 }
1939
1940 dev_maps = new_dev_maps;
1941 active = true;
1942
1943out_no_new_maps:
1944 /* update Tx queue numa node */
1945 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1946 (numa_node_id >= 0) ? numa_node_id :
1947 NUMA_NO_NODE);
1948
1949 if (!dev_maps)
1950 goto out_no_maps;
1951
1952 /* removes queue from unused CPUs */
1953 for_each_possible_cpu(cpu) {
1954 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
1955 continue;
1956
1957 if (remove_xps_queue(dev_maps, cpu, index))
1958 active = true;
1959 }
1960
1961 /* free map if not active */
1962 if (!active) {
1963 RCU_INIT_POINTER(dev->xps_maps, NULL);
1964 kfree_rcu(dev_maps, rcu);
1965 }
1966
1967out_no_maps:
1968 mutex_unlock(&xps_map_mutex);
1969
1970 return 0;
1971error:
1972 /* remove any maps that we added */
1973 for_each_possible_cpu(cpu) {
1974 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1975 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1976 NULL;
1977 if (new_map && new_map != map)
1978 kfree(new_map);
1979 }
1980
1981 mutex_unlock(&xps_map_mutex);
1982
1983 kfree(new_dev_maps);
1984 return -ENOMEM;
1985}
1986EXPORT_SYMBOL(netif_set_xps_queue);
1987
1988#endif
1860/* 1989/*
1861 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 1990 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1862 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 1991 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1880,8 +2009,12 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1880 if (dev->num_tc) 2009 if (dev->num_tc)
1881 netif_setup_tc(dev, txq); 2010 netif_setup_tc(dev, txq);
1882 2011
1883 if (txq < dev->real_num_tx_queues) 2012 if (txq < dev->real_num_tx_queues) {
1884 qdisc_reset_all_tx_gt(dev, txq); 2013 qdisc_reset_all_tx_gt(dev, txq);
2014#ifdef CONFIG_XPS
2015 netif_reset_xps_queues_gt(dev, txq);
2016#endif
2017 }
1885 } 2018 }
1886 2019
1887 dev->real_num_tx_queues = txq; 2020 dev->real_num_tx_queues = txq;
@@ -2046,6 +2179,15 @@ int skb_checksum_help(struct sk_buff *skb)
2046 return -EINVAL; 2179 return -EINVAL;
2047 } 2180 }
2048 2181
2182 /* Before computing a checksum, we should make sure no frag could
2183 * be modified by an external entity : checksum could be wrong.
2184 */
2185 if (skb_has_shared_frag(skb)) {
2186 ret = __skb_linearize(skb);
2187 if (ret)
2188 goto out;
2189 }
2190
2049 offset = skb_checksum_start_offset(skb); 2191 offset = skb_checksum_start_offset(skb);
2050 BUG_ON(offset >= skb_headlen(skb)); 2192 BUG_ON(offset >= skb_headlen(skb));
2051 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2193 csum = skb_checksum(skb, offset, skb->len - offset, 0);
@@ -2069,25 +2211,19 @@ out:
2069EXPORT_SYMBOL(skb_checksum_help); 2211EXPORT_SYMBOL(skb_checksum_help);
2070 2212
2071/** 2213/**
2072 * skb_gso_segment - Perform segmentation on skb. 2214 * skb_mac_gso_segment - mac layer segmentation handler.
2073 * @skb: buffer to segment 2215 * @skb: buffer to segment
2074 * @features: features for the output path (see dev->features) 2216 * @features: features for the output path (see dev->features)
2075 *
2076 * This function segments the given skb and returns a list of segments.
2077 *
2078 * It may return NULL if the skb requires no segmentation. This is
2079 * only possible when GSO is used for verifying header integrity.
2080 */ 2217 */
2081struct sk_buff *skb_gso_segment(struct sk_buff *skb, 2218struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2082 netdev_features_t features) 2219 netdev_features_t features)
2083{ 2220{
2084 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2221 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2085 struct packet_offload *ptype; 2222 struct packet_offload *ptype;
2086 __be16 type = skb->protocol; 2223 __be16 type = skb->protocol;
2087 int vlan_depth = ETH_HLEN;
2088 int err;
2089 2224
2090 while (type == htons(ETH_P_8021Q)) { 2225 while (type == htons(ETH_P_8021Q)) {
2226 int vlan_depth = ETH_HLEN;
2091 struct vlan_hdr *vh; 2227 struct vlan_hdr *vh;
2092 2228
2093 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2229 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
@@ -2098,22 +2234,14 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2098 vlan_depth += VLAN_HLEN; 2234 vlan_depth += VLAN_HLEN;
2099 } 2235 }
2100 2236
2101 skb_reset_mac_header(skb);
2102 skb->mac_len = skb->network_header - skb->mac_header;
2103 __skb_pull(skb, skb->mac_len); 2237 __skb_pull(skb, skb->mac_len);
2104 2238
2105 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2106 skb_warn_bad_offload(skb);
2107
2108 if (skb_header_cloned(skb) &&
2109 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2110 return ERR_PTR(err);
2111 }
2112
2113 rcu_read_lock(); 2239 rcu_read_lock();
2114 list_for_each_entry_rcu(ptype, &offload_base, list) { 2240 list_for_each_entry_rcu(ptype, &offload_base, list) {
2115 if (ptype->type == type && ptype->callbacks.gso_segment) { 2241 if (ptype->type == type && ptype->callbacks.gso_segment) {
2116 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2242 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2243 int err;
2244
2117 err = ptype->callbacks.gso_send_check(skb); 2245 err = ptype->callbacks.gso_send_check(skb);
2118 segs = ERR_PTR(err); 2246 segs = ERR_PTR(err);
2119 if (err || skb_gso_ok(skb, features)) 2247 if (err || skb_gso_ok(skb, features))
@@ -2131,7 +2259,50 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2131 2259
2132 return segs; 2260 return segs;
2133} 2261}
2134EXPORT_SYMBOL(skb_gso_segment); 2262EXPORT_SYMBOL(skb_mac_gso_segment);
2263
2264
2265/* openvswitch calls this on rx path, so we need a different check.
2266 */
2267static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2268{
2269 if (tx_path)
2270 return skb->ip_summed != CHECKSUM_PARTIAL;
2271 else
2272 return skb->ip_summed == CHECKSUM_NONE;
2273}
2274
2275/**
2276 * __skb_gso_segment - Perform segmentation on skb.
2277 * @skb: buffer to segment
2278 * @features: features for the output path (see dev->features)
2279 * @tx_path: whether it is called in TX path
2280 *
2281 * This function segments the given skb and returns a list of segments.
2282 *
2283 * It may return NULL if the skb requires no segmentation. This is
2284 * only possible when GSO is used for verifying header integrity.
2285 */
2286struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2287 netdev_features_t features, bool tx_path)
2288{
2289 if (unlikely(skb_needs_check(skb, tx_path))) {
2290 int err;
2291
2292 skb_warn_bad_offload(skb);
2293
2294 if (skb_header_cloned(skb) &&
2295 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2296 return ERR_PTR(err);
2297 }
2298
2299 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2300 skb_reset_mac_header(skb);
2301 skb_reset_mac_len(skb);
2302
2303 return skb_mac_gso_segment(skb, features);
2304}
2305EXPORT_SYMBOL(__skb_gso_segment);
2135 2306
2136/* Take action when hardware reception checksum errors are detected. */ 2307/* Take action when hardware reception checksum errors are detected. */
2137#ifdef CONFIG_BUG 2308#ifdef CONFIG_BUG
@@ -2410,126 +2581,28 @@ out:
2410 return rc; 2581 return rc;
2411} 2582}
2412 2583
2413static u32 hashrnd __read_mostly; 2584static void qdisc_pkt_len_init(struct sk_buff *skb)
2414
2415/*
2416 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2417 * to be used as a distribution range.
2418 */
2419u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2420 unsigned int num_tx_queues)
2421{ 2585{
2422 u32 hash; 2586 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2423 u16 qoffset = 0;
2424 u16 qcount = num_tx_queues;
2425
2426 if (skb_rx_queue_recorded(skb)) {
2427 hash = skb_get_rx_queue(skb);
2428 while (unlikely(hash >= num_tx_queues))
2429 hash -= num_tx_queues;
2430 return hash;
2431 }
2432
2433 if (dev->num_tc) {
2434 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2435 qoffset = dev->tc_to_txq[tc].offset;
2436 qcount = dev->tc_to_txq[tc].count;
2437 }
2438
2439 if (skb->sk && skb->sk->sk_hash)
2440 hash = skb->sk->sk_hash;
2441 else
2442 hash = (__force u16) skb->protocol;
2443 hash = jhash_1word(hash, hashrnd);
2444 2587
2445 return (u16) (((u64) hash * qcount) >> 32) + qoffset; 2588 qdisc_skb_cb(skb)->pkt_len = skb->len;
2446}
2447EXPORT_SYMBOL(__skb_tx_hash);
2448 2589
2449static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 2590 /* To get more precise estimation of bytes sent on wire,
2450{ 2591 * we add to pkt_len the headers size of all segments
2451 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2592 */
2452 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 2593 if (shinfo->gso_size) {
2453 dev->name, queue_index, 2594 unsigned int hdr_len;
2454 dev->real_num_tx_queues);
2455 return 0;
2456 }
2457 return queue_index;
2458}
2459 2595
2460static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 2596 /* mac layer + network layer */
2461{ 2597 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2462#ifdef CONFIG_XPS
2463 struct xps_dev_maps *dev_maps;
2464 struct xps_map *map;
2465 int queue_index = -1;
2466 2598
2467 rcu_read_lock(); 2599 /* + transport layer */
2468 dev_maps = rcu_dereference(dev->xps_maps); 2600 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2469 if (dev_maps) { 2601 hdr_len += tcp_hdrlen(skb);
2470 map = rcu_dereference( 2602 else
2471 dev_maps->cpu_map[raw_smp_processor_id()]); 2603 hdr_len += sizeof(struct udphdr);
2472 if (map) { 2604 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
2473 if (map->len == 1)
2474 queue_index = map->queues[0];
2475 else {
2476 u32 hash;
2477 if (skb->sk && skb->sk->sk_hash)
2478 hash = skb->sk->sk_hash;
2479 else
2480 hash = (__force u16) skb->protocol ^
2481 skb->rxhash;
2482 hash = jhash_1word(hash, hashrnd);
2483 queue_index = map->queues[
2484 ((u64)hash * map->len) >> 32];
2485 }
2486 if (unlikely(queue_index >= dev->real_num_tx_queues))
2487 queue_index = -1;
2488 }
2489 } 2605 }
2490 rcu_read_unlock();
2491
2492 return queue_index;
2493#else
2494 return -1;
2495#endif
2496}
2497
2498struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2499 struct sk_buff *skb)
2500{
2501 int queue_index;
2502 const struct net_device_ops *ops = dev->netdev_ops;
2503
2504 if (dev->real_num_tx_queues == 1)
2505 queue_index = 0;
2506 else if (ops->ndo_select_queue) {
2507 queue_index = ops->ndo_select_queue(dev, skb);
2508 queue_index = dev_cap_txqueue(dev, queue_index);
2509 } else {
2510 struct sock *sk = skb->sk;
2511 queue_index = sk_tx_queue_get(sk);
2512
2513 if (queue_index < 0 || skb->ooo_okay ||
2514 queue_index >= dev->real_num_tx_queues) {
2515 int old_index = queue_index;
2516
2517 queue_index = get_xps_queue(dev, skb);
2518 if (queue_index < 0)
2519 queue_index = skb_tx_hash(dev, skb);
2520
2521 if (queue_index != old_index && sk) {
2522 struct dst_entry *dst =
2523 rcu_dereference_check(sk->sk_dst_cache, 1);
2524
2525 if (dst && skb_dst(skb) == dst)
2526 sk_tx_queue_set(sk, queue_index);
2527 }
2528 }
2529 }
2530
2531 skb_set_queue_mapping(skb, queue_index);
2532 return netdev_get_tx_queue(dev, queue_index);
2533} 2606}
2534 2607
2535static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 2608static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
@@ -2540,7 +2613,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2540 bool contended; 2613 bool contended;
2541 int rc; 2614 int rc;
2542 2615
2543 qdisc_skb_cb(skb)->pkt_len = skb->len; 2616 qdisc_pkt_len_init(skb);
2544 qdisc_calculate_pkt_len(skb, q); 2617 qdisc_calculate_pkt_len(skb, q);
2545 /* 2618 /*
2546 * Heuristic to force contended enqueues to serialize on a 2619 * Heuristic to force contended enqueues to serialize on a
@@ -2663,6 +2736,8 @@ int dev_queue_xmit(struct sk_buff *skb)
2663 struct Qdisc *q; 2736 struct Qdisc *q;
2664 int rc = -ENOMEM; 2737 int rc = -ENOMEM;
2665 2738
2739 skb_reset_mac_header(skb);
2740
2666 /* Disable soft irqs for various locks below. Also 2741 /* Disable soft irqs for various locks below. Also
2667 * stops preemption for RCU. 2742 * stops preemption for RCU.
2668 */ 2743 */
@@ -2757,41 +2832,6 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2757 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2832 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2758} 2833}
2759 2834
2760/*
2761 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2762 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2763 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2764 * if hash is a canonical 4-tuple hash over transport ports.
2765 */
2766void __skb_get_rxhash(struct sk_buff *skb)
2767{
2768 struct flow_keys keys;
2769 u32 hash;
2770
2771 if (!skb_flow_dissect(skb, &keys))
2772 return;
2773
2774 if (keys.ports)
2775 skb->l4_rxhash = 1;
2776
2777 /* get a consistent hash (same value on both flow directions) */
2778 if (((__force u32)keys.dst < (__force u32)keys.src) ||
2779 (((__force u32)keys.dst == (__force u32)keys.src) &&
2780 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2781 swap(keys.dst, keys.src);
2782 swap(keys.port16[0], keys.port16[1]);
2783 }
2784
2785 hash = jhash_3words((__force u32)keys.dst,
2786 (__force u32)keys.src,
2787 (__force u32)keys.ports, hashrnd);
2788 if (!hash)
2789 hash = 1;
2790
2791 skb->rxhash = hash;
2792}
2793EXPORT_SYMBOL(__skb_get_rxhash);
2794
2795#ifdef CONFIG_RPS 2835#ifdef CONFIG_RPS
2796 2836
2797/* One global table that all flow-based protocols share. */ 2837/* One global table that all flow-based protocols share. */
@@ -3318,7 +3358,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3318 } 3358 }
3319} 3359}
3320 3360
3321static int __netif_receive_skb(struct sk_buff *skb) 3361static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3322{ 3362{
3323 struct packet_type *ptype, *pt_prev; 3363 struct packet_type *ptype, *pt_prev;
3324 rx_handler_func_t *rx_handler; 3364 rx_handler_func_t *rx_handler;
@@ -3327,24 +3367,11 @@ static int __netif_receive_skb(struct sk_buff *skb)
3327 bool deliver_exact = false; 3367 bool deliver_exact = false;
3328 int ret = NET_RX_DROP; 3368 int ret = NET_RX_DROP;
3329 __be16 type; 3369 __be16 type;
3330 unsigned long pflags = current->flags;
3331 3370
3332 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3371 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3333 3372
3334 trace_netif_receive_skb(skb); 3373 trace_netif_receive_skb(skb);
3335 3374
3336 /*
3337 * PFMEMALLOC skbs are special, they should
3338 * - be delivered to SOCK_MEMALLOC sockets only
3339 * - stay away from userspace
3340 * - have bounded memory usage
3341 *
3342 * Use PF_MEMALLOC as this saves us from propagating the allocation
3343 * context down to all allocation sites.
3344 */
3345 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3346 current->flags |= PF_MEMALLOC;
3347
3348 /* if we've gotten here through NAPI, check netpoll */ 3375 /* if we've gotten here through NAPI, check netpoll */
3349 if (netpoll_receive_skb(skb)) 3376 if (netpoll_receive_skb(skb))
3350 goto out; 3377 goto out;
@@ -3352,7 +3379,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
3352 orig_dev = skb->dev; 3379 orig_dev = skb->dev;
3353 3380
3354 skb_reset_network_header(skb); 3381 skb_reset_network_header(skb);
3355 skb_reset_transport_header(skb); 3382 if (!skb_transport_header_was_set(skb))
3383 skb_reset_transport_header(skb);
3356 skb_reset_mac_len(skb); 3384 skb_reset_mac_len(skb);
3357 3385
3358 pt_prev = NULL; 3386 pt_prev = NULL;
@@ -3377,7 +3405,7 @@ another_round:
3377 } 3405 }
3378#endif 3406#endif
3379 3407
3380 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 3408 if (pfmemalloc)
3381 goto skip_taps; 3409 goto skip_taps;
3382 3410
3383 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3411 list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -3396,8 +3424,7 @@ skip_taps:
3396ncls: 3424ncls:
3397#endif 3425#endif
3398 3426
3399 if (sk_memalloc_socks() && skb_pfmemalloc(skb) 3427 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3400 && !skb_pfmemalloc_protocol(skb))
3401 goto drop; 3428 goto drop;
3402 3429
3403 if (vlan_tx_tag_present(skb)) { 3430 if (vlan_tx_tag_present(skb)) {
@@ -3467,7 +3494,31 @@ drop:
3467unlock: 3494unlock:
3468 rcu_read_unlock(); 3495 rcu_read_unlock();
3469out: 3496out:
3470 tsk_restore_flags(current, pflags, PF_MEMALLOC); 3497 return ret;
3498}
3499
3500static int __netif_receive_skb(struct sk_buff *skb)
3501{
3502 int ret;
3503
3504 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3505 unsigned long pflags = current->flags;
3506
3507 /*
3508 * PFMEMALLOC skbs are special, they should
3509 * - be delivered to SOCK_MEMALLOC sockets only
3510 * - stay away from userspace
3511 * - have bounded memory usage
3512 *
3513 * Use PF_MEMALLOC as this saves us from propagating the allocation
3514 * context down to all allocation sites.
3515 */
3516 current->flags |= PF_MEMALLOC;
3517 ret = __netif_receive_skb_core(skb, true);
3518 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3519 } else
3520 ret = __netif_receive_skb_core(skb, false);
3521
3471 return ret; 3522 return ret;
3472} 3523}
3473 3524
@@ -3634,7 +3685,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3634 __be16 type = skb->protocol; 3685 __be16 type = skb->protocol;
3635 struct list_head *head = &offload_base; 3686 struct list_head *head = &offload_base;
3636 int same_flow; 3687 int same_flow;
3637 int mac_len;
3638 enum gro_result ret; 3688 enum gro_result ret;
3639 3689
3640 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3690 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
@@ -3651,8 +3701,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3651 continue; 3701 continue;
3652 3702
3653 skb_set_network_header(skb, skb_gro_offset(skb)); 3703 skb_set_network_header(skb, skb_gro_offset(skb));
3654 mac_len = skb->network_header - skb->mac_header; 3704 skb_reset_mac_len(skb);
3655 skb->mac_len = mac_len;
3656 NAPI_GRO_CB(skb)->same_flow = 0; 3705 NAPI_GRO_CB(skb)->same_flow = 0;
3657 NAPI_GRO_CB(skb)->flush = 0; 3706 NAPI_GRO_CB(skb)->flush = 0;
3658 NAPI_GRO_CB(skb)->free = 0; 3707 NAPI_GRO_CB(skb)->free = 0;
@@ -4134,530 +4183,231 @@ softnet_break:
4134 goto out; 4183 goto out;
4135} 4184}
4136 4185
4137static gifconf_func_t *gifconf_list[NPROTO]; 4186struct netdev_upper {
4138
4139/**
4140 * register_gifconf - register a SIOCGIF handler
4141 * @family: Address family
4142 * @gifconf: Function handler
4143 *
4144 * Register protocol dependent address dumping routines. The handler
4145 * that is passed must not be freed or reused until it has been replaced
4146 * by another handler.
4147 */
4148int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
4149{
4150 if (family >= NPROTO)
4151 return -EINVAL;
4152 gifconf_list[family] = gifconf;
4153 return 0;
4154}
4155EXPORT_SYMBOL(register_gifconf);
4156
4157
4158/*
4159 * Map an interface index to its name (SIOCGIFNAME)
4160 */
4161
4162/*
4163 * We need this ioctl for efficient implementation of the
4164 * if_indextoname() function required by the IPv6 API. Without
4165 * it, we would have to search all the interfaces to find a
4166 * match. --pb
4167 */
4168
4169static int dev_ifname(struct net *net, struct ifreq __user *arg)
4170{
4171 struct net_device *dev; 4187 struct net_device *dev;
4172 struct ifreq ifr; 4188 bool master;
4173 unsigned seq; 4189 struct list_head list;
4174 4190 struct rcu_head rcu;
4175 /* 4191 struct list_head search_list;
4176 * Fetch the caller's info block. 4192};
4177 */
4178 4193
4179 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 4194static void __append_search_uppers(struct list_head *search_list,
4180 return -EFAULT; 4195 struct net_device *dev)
4196{
4197 struct netdev_upper *upper;
4181 4198
4182retry: 4199 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4183 seq = read_seqbegin(&devnet_rename_seq); 4200 /* check if this upper is not already in search list */
4184 rcu_read_lock(); 4201 if (list_empty(&upper->search_list))
4185 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); 4202 list_add_tail(&upper->search_list, search_list);
4186 if (!dev) {
4187 rcu_read_unlock();
4188 return -ENODEV;
4189 } 4203 }
4190
4191 strcpy(ifr.ifr_name, dev->name);
4192 rcu_read_unlock();
4193 if (read_seqretry(&devnet_rename_seq, seq))
4194 goto retry;
4195
4196 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4197 return -EFAULT;
4198 return 0;
4199} 4204}
4200 4205
4201/* 4206static bool __netdev_search_upper_dev(struct net_device *dev,
4202 * Perform a SIOCGIFCONF call. This structure will change 4207 struct net_device *upper_dev)
4203 * size eventually, and there is nothing I can do about it.
4204 * Thus we will need a 'compatibility mode'.
4205 */
4206
4207static int dev_ifconf(struct net *net, char __user *arg)
4208{ 4208{
4209 struct ifconf ifc; 4209 LIST_HEAD(search_list);
4210 struct net_device *dev; 4210 struct netdev_upper *upper;
4211 char __user *pos; 4211 struct netdev_upper *tmp;
4212 int len; 4212 bool ret = false;
4213 int total;
4214 int i;
4215
4216 /*
4217 * Fetch the caller's info block.
4218 */
4219 4213
4220 if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) 4214 __append_search_uppers(&search_list, dev);
4221 return -EFAULT; 4215 list_for_each_entry(upper, &search_list, search_list) {
4222 4216 if (upper->dev == upper_dev) {
4223 pos = ifc.ifc_buf; 4217 ret = true;
4224 len = ifc.ifc_len; 4218 break;
4225
4226 /*
4227 * Loop over the interfaces, and write an info block for each.
4228 */
4229
4230 total = 0;
4231 for_each_netdev(net, dev) {
4232 for (i = 0; i < NPROTO; i++) {
4233 if (gifconf_list[i]) {
4234 int done;
4235 if (!pos)
4236 done = gifconf_list[i](dev, NULL, 0);
4237 else
4238 done = gifconf_list[i](dev, pos + total,
4239 len - total);
4240 if (done < 0)
4241 return -EFAULT;
4242 total += done;
4243 }
4244 } 4219 }
4220 __append_search_uppers(&search_list, upper->dev);
4245 } 4221 }
4246 4222 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4247 /* 4223 INIT_LIST_HEAD(&upper->search_list);
4248 * All done. Write the updated control block back to the caller. 4224 return ret;
4249 */
4250 ifc.ifc_len = total;
4251
4252 /*
4253 * Both BSD and Solaris return 0 here, so we do too.
4254 */
4255 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4256} 4225}
4257 4226
4258#ifdef CONFIG_PROC_FS 4227static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4259 4228 struct net_device *upper_dev)
4260#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4261
4262#define get_bucket(x) ((x) >> BUCKET_SPACE)
4263#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4264#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4265
4266static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4267{ 4229{
4268 struct net *net = seq_file_net(seq); 4230 struct netdev_upper *upper;
4269 struct net_device *dev;
4270 struct hlist_node *p;
4271 struct hlist_head *h;
4272 unsigned int count = 0, offset = get_offset(*pos);
4273 4231
4274 h = &net->dev_name_head[get_bucket(*pos)]; 4232 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4275 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4233 if (upper->dev == upper_dev)
4276 if (++count == offset) 4234 return upper;
4277 return dev;
4278 } 4235 }
4279
4280 return NULL; 4236 return NULL;
4281} 4237}
4282 4238
4283static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos) 4239/**
4284{ 4240 * netdev_has_upper_dev - Check if device is linked to an upper device
4285 struct net_device *dev; 4241 * @dev: device
4286 unsigned int bucket; 4242 * @upper_dev: upper device to check
4287 4243 *
4288 do { 4244 * Find out if a device is linked to specified upper device and return true
4289 dev = dev_from_same_bucket(seq, pos); 4245 * in case it is. Note that this checks only immediate upper device,
4290 if (dev) 4246 * not through a complete stack of devices. The caller must hold the RTNL lock.
4291 return dev;
4292
4293 bucket = get_bucket(*pos) + 1;
4294 *pos = set_bucket_offset(bucket, 1);
4295 } while (bucket < NETDEV_HASHENTRIES);
4296
4297 return NULL;
4298}
4299
4300/*
4301 * This is invoked by the /proc filesystem handler to display a device
4302 * in detail.
4303 */ 4247 */
4304void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4248bool netdev_has_upper_dev(struct net_device *dev,
4305 __acquires(RCU) 4249 struct net_device *upper_dev)
4306{
4307 rcu_read_lock();
4308 if (!*pos)
4309 return SEQ_START_TOKEN;
4310
4311 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4312 return NULL;
4313
4314 return dev_from_bucket(seq, pos);
4315}
4316
4317void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4318{
4319 ++*pos;
4320 return dev_from_bucket(seq, pos);
4321}
4322
4323void dev_seq_stop(struct seq_file *seq, void *v)
4324 __releases(RCU)
4325{
4326 rcu_read_unlock();
4327}
4328
4329static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4330{ 4250{
4331 struct rtnl_link_stats64 temp; 4251 ASSERT_RTNL();
4332 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4333 4252
4334 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " 4253 return __netdev_find_upper(dev, upper_dev);
4335 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4336 dev->name, stats->rx_bytes, stats->rx_packets,
4337 stats->rx_errors,
4338 stats->rx_dropped + stats->rx_missed_errors,
4339 stats->rx_fifo_errors,
4340 stats->rx_length_errors + stats->rx_over_errors +
4341 stats->rx_crc_errors + stats->rx_frame_errors,
4342 stats->rx_compressed, stats->multicast,
4343 stats->tx_bytes, stats->tx_packets,
4344 stats->tx_errors, stats->tx_dropped,
4345 stats->tx_fifo_errors, stats->collisions,
4346 stats->tx_carrier_errors +
4347 stats->tx_aborted_errors +
4348 stats->tx_window_errors +
4349 stats->tx_heartbeat_errors,
4350 stats->tx_compressed);
4351} 4254}
4255EXPORT_SYMBOL(netdev_has_upper_dev);
4352 4256
4353/* 4257/**
4354 * Called from the PROCfs module. This now uses the new arbitrary sized 4258 * netdev_has_any_upper_dev - Check if device is linked to some device
4355 * /proc/net interface to create /proc/net/dev 4259 * @dev: device
4260 *
4261 * Find out if a device is linked to an upper device and return true in case
4262 * it is. The caller must hold the RTNL lock.
4356 */ 4263 */
4357static int dev_seq_show(struct seq_file *seq, void *v) 4264bool netdev_has_any_upper_dev(struct net_device *dev)
4358{
4359 if (v == SEQ_START_TOKEN)
4360 seq_puts(seq, "Inter-| Receive "
4361 " | Transmit\n"
4362 " face |bytes packets errs drop fifo frame "
4363 "compressed multicast|bytes packets errs "
4364 "drop fifo colls carrier compressed\n");
4365 else
4366 dev_seq_printf_stats(seq, v);
4367 return 0;
4368}
4369
4370static struct softnet_data *softnet_get_online(loff_t *pos)
4371{
4372 struct softnet_data *sd = NULL;
4373
4374 while (*pos < nr_cpu_ids)
4375 if (cpu_online(*pos)) {
4376 sd = &per_cpu(softnet_data, *pos);
4377 break;
4378 } else
4379 ++*pos;
4380 return sd;
4381}
4382
4383static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4384{ 4265{
4385 return softnet_get_online(pos); 4266 ASSERT_RTNL();
4386}
4387
4388static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4389{
4390 ++*pos;
4391 return softnet_get_online(pos);
4392}
4393 4267
4394static void softnet_seq_stop(struct seq_file *seq, void *v) 4268 return !list_empty(&dev->upper_dev_list);
4395{
4396} 4269}
4270EXPORT_SYMBOL(netdev_has_any_upper_dev);
4397 4271
4398static int softnet_seq_show(struct seq_file *seq, void *v) 4272/**
4399{ 4273 * netdev_master_upper_dev_get - Get master upper device
4400 struct softnet_data *sd = v; 4274 * @dev: device
4401 4275 *
4402 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 4276 * Find a master upper device and return pointer to it or NULL in case
4403 sd->processed, sd->dropped, sd->time_squeeze, 0, 4277 * it's not there. The caller must hold the RTNL lock.
4404 0, 0, 0, 0, /* was fastroute */ 4278 */
4405 sd->cpu_collision, sd->received_rps); 4279struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4406 return 0;
4407}
4408
4409static const struct seq_operations dev_seq_ops = {
4410 .start = dev_seq_start,
4411 .next = dev_seq_next,
4412 .stop = dev_seq_stop,
4413 .show = dev_seq_show,
4414};
4415
4416static int dev_seq_open(struct inode *inode, struct file *file)
4417{ 4280{
4418 return seq_open_net(inode, file, &dev_seq_ops, 4281 struct netdev_upper *upper;
4419 sizeof(struct seq_net_private));
4420}
4421 4282
4422static const struct file_operations dev_seq_fops = { 4283 ASSERT_RTNL();
4423 .owner = THIS_MODULE,
4424 .open = dev_seq_open,
4425 .read = seq_read,
4426 .llseek = seq_lseek,
4427 .release = seq_release_net,
4428};
4429 4284
4430static const struct seq_operations softnet_seq_ops = { 4285 if (list_empty(&dev->upper_dev_list))
4431 .start = softnet_seq_start, 4286 return NULL;
4432 .next = softnet_seq_next,
4433 .stop = softnet_seq_stop,
4434 .show = softnet_seq_show,
4435};
4436 4287
4437static int softnet_seq_open(struct inode *inode, struct file *file) 4288 upper = list_first_entry(&dev->upper_dev_list,
4438{ 4289 struct netdev_upper, list);
4439 return seq_open(file, &softnet_seq_ops); 4290 if (likely(upper->master))
4291 return upper->dev;
4292 return NULL;
4440} 4293}
4294EXPORT_SYMBOL(netdev_master_upper_dev_get);
4441 4295
4442static const struct file_operations softnet_seq_fops = { 4296/**
4443 .owner = THIS_MODULE, 4297 * netdev_master_upper_dev_get_rcu - Get master upper device
4444 .open = softnet_seq_open, 4298 * @dev: device
4445 .read = seq_read, 4299 *
4446 .llseek = seq_lseek, 4300 * Find a master upper device and return pointer to it or NULL in case
4447 .release = seq_release, 4301 * it's not there. The caller must hold the RCU read lock.
4448}; 4302 */
4449 4303struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4450static void *ptype_get_idx(loff_t pos)
4451{ 4304{
4452 struct packet_type *pt = NULL; 4305 struct netdev_upper *upper;
4453 loff_t i = 0;
4454 int t;
4455
4456 list_for_each_entry_rcu(pt, &ptype_all, list) {
4457 if (i == pos)
4458 return pt;
4459 ++i;
4460 }
4461 4306
4462 for (t = 0; t < PTYPE_HASH_SIZE; t++) { 4307 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4463 list_for_each_entry_rcu(pt, &ptype_base[t], list) { 4308 struct netdev_upper, list);
4464 if (i == pos) 4309 if (upper && likely(upper->master))
4465 return pt; 4310 return upper->dev;
4466 ++i;
4467 }
4468 }
4469 return NULL; 4311 return NULL;
4470} 4312}
4313EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4471 4314
4472static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) 4315static int __netdev_upper_dev_link(struct net_device *dev,
4473 __acquires(RCU) 4316 struct net_device *upper_dev, bool master)
4474{ 4317{
4475 rcu_read_lock(); 4318 struct netdev_upper *upper;
4476 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4477}
4478 4319
4479static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4320 ASSERT_RTNL();
4480{
4481 struct packet_type *pt;
4482 struct list_head *nxt;
4483 int hash;
4484 4321
4485 ++*pos; 4322 if (dev == upper_dev)
4486 if (v == SEQ_START_TOKEN) 4323 return -EBUSY;
4487 return ptype_get_idx(0);
4488 4324
4489 pt = v; 4325 /* To prevent loops, check if dev is not upper device to upper_dev. */
4490 nxt = pt->list.next; 4326 if (__netdev_search_upper_dev(upper_dev, dev))
4491 if (pt->type == htons(ETH_P_ALL)) { 4327 return -EBUSY;
4492 if (nxt != &ptype_all)
4493 goto found;
4494 hash = 0;
4495 nxt = ptype_base[0].next;
4496 } else
4497 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4498 4328
4499 while (nxt == &ptype_base[hash]) { 4329 if (__netdev_find_upper(dev, upper_dev))
4500 if (++hash >= PTYPE_HASH_SIZE) 4330 return -EEXIST;
4501 return NULL;
4502 nxt = ptype_base[hash].next;
4503 }
4504found:
4505 return list_entry(nxt, struct packet_type, list);
4506}
4507 4331
4508static void ptype_seq_stop(struct seq_file *seq, void *v) 4332 if (master && netdev_master_upper_dev_get(dev))
4509 __releases(RCU) 4333 return -EBUSY;
4510{
4511 rcu_read_unlock();
4512}
4513 4334
4514static int ptype_seq_show(struct seq_file *seq, void *v) 4335 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4515{ 4336 if (!upper)
4516 struct packet_type *pt = v; 4337 return -ENOMEM;
4517 4338
4518 if (v == SEQ_START_TOKEN) 4339 upper->dev = upper_dev;
4519 seq_puts(seq, "Type Device Function\n"); 4340 upper->master = master;
4520 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { 4341 INIT_LIST_HEAD(&upper->search_list);
4521 if (pt->type == htons(ETH_P_ALL))
4522 seq_puts(seq, "ALL ");
4523 else
4524 seq_printf(seq, "%04x", ntohs(pt->type));
4525 4342
4526 seq_printf(seq, " %-8s %pF\n", 4343 /* Ensure that master upper link is always the first item in list. */
4527 pt->dev ? pt->dev->name : "", pt->func); 4344 if (master)
4528 } 4345 list_add_rcu(&upper->list, &dev->upper_dev_list);
4346 else
4347 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4348 dev_hold(upper_dev);
4529 4349
4530 return 0; 4350 return 0;
4531} 4351}
4532 4352
4533static const struct seq_operations ptype_seq_ops = { 4353/**
4534 .start = ptype_seq_start, 4354 * netdev_upper_dev_link - Add a link to the upper device
4535 .next = ptype_seq_next, 4355 * @dev: device
4536 .stop = ptype_seq_stop, 4356 * @upper_dev: new upper device
4537 .show = ptype_seq_show, 4357 *
4538}; 4358 * Adds a link to device which is upper to this one. The caller must hold
4539 4359 * the RTNL lock. On a failure a negative errno code is returned.
4540static int ptype_seq_open(struct inode *inode, struct file *file) 4360 * On success the reference counts are adjusted and the function
4541{ 4361 * returns zero.
4542 return seq_open_net(inode, file, &ptype_seq_ops, 4362 */
4543 sizeof(struct seq_net_private)); 4363int netdev_upper_dev_link(struct net_device *dev,
4544} 4364 struct net_device *upper_dev)
4545
4546static const struct file_operations ptype_seq_fops = {
4547 .owner = THIS_MODULE,
4548 .open = ptype_seq_open,
4549 .read = seq_read,
4550 .llseek = seq_lseek,
4551 .release = seq_release_net,
4552};
4553
4554
4555static int __net_init dev_proc_net_init(struct net *net)
4556{
4557 int rc = -ENOMEM;
4558
4559 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4560 goto out;
4561 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4562 goto out_dev;
4563 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4564 goto out_softnet;
4565
4566 if (wext_proc_init(net))
4567 goto out_ptype;
4568 rc = 0;
4569out:
4570 return rc;
4571out_ptype:
4572 proc_net_remove(net, "ptype");
4573out_softnet:
4574 proc_net_remove(net, "softnet_stat");
4575out_dev:
4576 proc_net_remove(net, "dev");
4577 goto out;
4578}
4579
4580static void __net_exit dev_proc_net_exit(struct net *net)
4581{
4582 wext_proc_exit(net);
4583
4584 proc_net_remove(net, "ptype");
4585 proc_net_remove(net, "softnet_stat");
4586 proc_net_remove(net, "dev");
4587}
4588
4589static struct pernet_operations __net_initdata dev_proc_ops = {
4590 .init = dev_proc_net_init,
4591 .exit = dev_proc_net_exit,
4592};
4593
4594static int __init dev_proc_init(void)
4595{ 4365{
4596 return register_pernet_subsys(&dev_proc_ops); 4366 return __netdev_upper_dev_link(dev, upper_dev, false);
4597} 4367}
4598#else 4368EXPORT_SYMBOL(netdev_upper_dev_link);
4599#define dev_proc_init() 0
4600#endif /* CONFIG_PROC_FS */
4601
4602 4369
4603/** 4370/**
4604 * netdev_set_master - set up master pointer 4371 * netdev_master_upper_dev_link - Add a master link to the upper device
4605 * @slave: slave device 4372 * @dev: device
4606 * @master: new master device 4373 * @upper_dev: new upper device
4607 * 4374 *
4608 * Changes the master device of the slave. Pass %NULL to break the 4375 * Adds a link to device which is upper to this one. In this case, only
4609 * bonding. The caller must hold the RTNL semaphore. On a failure 4376 * one master upper device can be linked, although other non-master devices
4610 * a negative errno code is returned. On success the reference counts 4377 * might be linked as well. The caller must hold the RTNL lock.
4611 * are adjusted and the function returns zero. 4378 * On a failure a negative errno code is returned. On success the reference
4379 * counts are adjusted and the function returns zero.
4612 */ 4380 */
4613int netdev_set_master(struct net_device *slave, struct net_device *master) 4381int netdev_master_upper_dev_link(struct net_device *dev,
4382 struct net_device *upper_dev)
4614{ 4383{
4615 struct net_device *old = slave->master; 4384 return __netdev_upper_dev_link(dev, upper_dev, true);
4616
4617 ASSERT_RTNL();
4618
4619 if (master) {
4620 if (old)
4621 return -EBUSY;
4622 dev_hold(master);
4623 }
4624
4625 slave->master = master;
4626
4627 if (old)
4628 dev_put(old);
4629 return 0;
4630} 4385}
4631EXPORT_SYMBOL(netdev_set_master); 4386EXPORT_SYMBOL(netdev_master_upper_dev_link);
4632 4387
4633/** 4388/**
4634 * netdev_set_bond_master - set up bonding master/slave pair 4389 * netdev_upper_dev_unlink - Removes a link to upper device
4635 * @slave: slave device 4390 * @dev: device
4636 * @master: new master device 4391 * @upper_dev: new upper device
4637 * 4392 *
4638 * Changes the master device of the slave. Pass %NULL to break the 4393 * Removes a link to device which is upper to this one. The caller must hold
4639 * bonding. The caller must hold the RTNL semaphore. On a failure 4394 * the RTNL lock.
4640 * a negative errno code is returned. On success %RTM_NEWLINK is sent
4641 * to the routing socket and the function returns zero.
4642 */ 4395 */
4643int netdev_set_bond_master(struct net_device *slave, struct net_device *master) 4396void netdev_upper_dev_unlink(struct net_device *dev,
4397 struct net_device *upper_dev)
4644{ 4398{
4645 int err; 4399 struct netdev_upper *upper;
4646 4400
4647 ASSERT_RTNL(); 4401 ASSERT_RTNL();
4648 4402
4649 err = netdev_set_master(slave, master); 4403 upper = __netdev_find_upper(dev, upper_dev);
4650 if (err) 4404 if (!upper)
4651 return err; 4405 return;
4652 if (master) 4406 list_del_rcu(&upper->list);
4653 slave->flags |= IFF_SLAVE; 4407 dev_put(upper_dev);
4654 else 4408 kfree_rcu(upper, rcu);
4655 slave->flags &= ~IFF_SLAVE;
4656
4657 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4658 return 0;
4659} 4409}
4660EXPORT_SYMBOL(netdev_set_bond_master); 4410EXPORT_SYMBOL(netdev_upper_dev_unlink);
4661 4411
4662static void dev_change_rx_flags(struct net_device *dev, int flags) 4412static void dev_change_rx_flags(struct net_device *dev, int flags)
4663{ 4413{
@@ -5020,381 +4770,33 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5020 if (!netif_device_present(dev)) 4770 if (!netif_device_present(dev))
5021 return -ENODEV; 4771 return -ENODEV;
5022 err = ops->ndo_set_mac_address(dev, sa); 4772 err = ops->ndo_set_mac_address(dev, sa);
5023 if (!err) 4773 if (err)
5024 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4774 return err;
4775 dev->addr_assign_type = NET_ADDR_SET;
4776 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5025 add_device_randomness(dev->dev_addr, dev->addr_len); 4777 add_device_randomness(dev->dev_addr, dev->addr_len);
5026 return err; 4778 return 0;
5027} 4779}
5028EXPORT_SYMBOL(dev_set_mac_address); 4780EXPORT_SYMBOL(dev_set_mac_address);
5029 4781
5030/*
5031 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
5032 */
5033static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
5034{
5035 int err;
5036 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
5037
5038 if (!dev)
5039 return -ENODEV;
5040
5041 switch (cmd) {
5042 case SIOCGIFFLAGS: /* Get interface flags */
5043 ifr->ifr_flags = (short) dev_get_flags(dev);
5044 return 0;
5045
5046 case SIOCGIFMETRIC: /* Get the metric on the interface
5047 (currently unused) */
5048 ifr->ifr_metric = 0;
5049 return 0;
5050
5051 case SIOCGIFMTU: /* Get the MTU of a device */
5052 ifr->ifr_mtu = dev->mtu;
5053 return 0;
5054
5055 case SIOCGIFHWADDR:
5056 if (!dev->addr_len)
5057 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5058 else
5059 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5060 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5061 ifr->ifr_hwaddr.sa_family = dev->type;
5062 return 0;
5063
5064 case SIOCGIFSLAVE:
5065 err = -EINVAL;
5066 break;
5067
5068 case SIOCGIFMAP:
5069 ifr->ifr_map.mem_start = dev->mem_start;
5070 ifr->ifr_map.mem_end = dev->mem_end;
5071 ifr->ifr_map.base_addr = dev->base_addr;
5072 ifr->ifr_map.irq = dev->irq;
5073 ifr->ifr_map.dma = dev->dma;
5074 ifr->ifr_map.port = dev->if_port;
5075 return 0;
5076
5077 case SIOCGIFINDEX:
5078 ifr->ifr_ifindex = dev->ifindex;
5079 return 0;
5080
5081 case SIOCGIFTXQLEN:
5082 ifr->ifr_qlen = dev->tx_queue_len;
5083 return 0;
5084
5085 default:
5086 /* dev_ioctl() should ensure this case
5087 * is never reached
5088 */
5089 WARN_ON(1);
5090 err = -ENOTTY;
5091 break;
5092
5093 }
5094 return err;
5095}
5096
5097/*
5098 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5099 */
5100static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5101{
5102 int err;
5103 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5104 const struct net_device_ops *ops;
5105
5106 if (!dev)
5107 return -ENODEV;
5108
5109 ops = dev->netdev_ops;
5110
5111 switch (cmd) {
5112 case SIOCSIFFLAGS: /* Set interface flags */
5113 return dev_change_flags(dev, ifr->ifr_flags);
5114
5115 case SIOCSIFMETRIC: /* Set the metric on the interface
5116 (currently unused) */
5117 return -EOPNOTSUPP;
5118
5119 case SIOCSIFMTU: /* Set the MTU of a device */
5120 return dev_set_mtu(dev, ifr->ifr_mtu);
5121
5122 case SIOCSIFHWADDR:
5123 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
5124
5125 case SIOCSIFHWBROADCAST:
5126 if (ifr->ifr_hwaddr.sa_family != dev->type)
5127 return -EINVAL;
5128 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5129 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5130 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5131 return 0;
5132
5133 case SIOCSIFMAP:
5134 if (ops->ndo_set_config) {
5135 if (!netif_device_present(dev))
5136 return -ENODEV;
5137 return ops->ndo_set_config(dev, &ifr->ifr_map);
5138 }
5139 return -EOPNOTSUPP;
5140
5141 case SIOCADDMULTI:
5142 if (!ops->ndo_set_rx_mode ||
5143 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5144 return -EINVAL;
5145 if (!netif_device_present(dev))
5146 return -ENODEV;
5147 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
5148
5149 case SIOCDELMULTI:
5150 if (!ops->ndo_set_rx_mode ||
5151 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5152 return -EINVAL;
5153 if (!netif_device_present(dev))
5154 return -ENODEV;
5155 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
5156
5157 case SIOCSIFTXQLEN:
5158 if (ifr->ifr_qlen < 0)
5159 return -EINVAL;
5160 dev->tx_queue_len = ifr->ifr_qlen;
5161 return 0;
5162
5163 case SIOCSIFNAME:
5164 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5165 return dev_change_name(dev, ifr->ifr_newname);
5166
5167 case SIOCSHWTSTAMP:
5168 err = net_hwtstamp_validate(ifr);
5169 if (err)
5170 return err;
5171 /* fall through */
5172
5173 /*
5174 * Unknown or private ioctl
5175 */
5176 default:
5177 if ((cmd >= SIOCDEVPRIVATE &&
5178 cmd <= SIOCDEVPRIVATE + 15) ||
5179 cmd == SIOCBONDENSLAVE ||
5180 cmd == SIOCBONDRELEASE ||
5181 cmd == SIOCBONDSETHWADDR ||
5182 cmd == SIOCBONDSLAVEINFOQUERY ||
5183 cmd == SIOCBONDINFOQUERY ||
5184 cmd == SIOCBONDCHANGEACTIVE ||
5185 cmd == SIOCGMIIPHY ||
5186 cmd == SIOCGMIIREG ||
5187 cmd == SIOCSMIIREG ||
5188 cmd == SIOCBRADDIF ||
5189 cmd == SIOCBRDELIF ||
5190 cmd == SIOCSHWTSTAMP ||
5191 cmd == SIOCWANDEV) {
5192 err = -EOPNOTSUPP;
5193 if (ops->ndo_do_ioctl) {
5194 if (netif_device_present(dev))
5195 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5196 else
5197 err = -ENODEV;
5198 }
5199 } else
5200 err = -EINVAL;
5201
5202 }
5203 return err;
5204}
5205
5206/*
5207 * This function handles all "interface"-type I/O control requests. The actual
5208 * 'doing' part of this is dev_ifsioc above.
5209 */
5210
5211/** 4782/**
5212 * dev_ioctl - network device ioctl 4783 * dev_change_carrier - Change device carrier
5213 * @net: the applicable net namespace 4784 * @dev: device
5214 * @cmd: command to issue 4785 * @new_carries: new value
5215 * @arg: pointer to a struct ifreq in user space
5216 * 4786 *
5217 * Issue ioctl functions to devices. This is normally called by the 4787 * Change device carrier
5218 * user space syscall interfaces but can sometimes be useful for
5219 * other purposes. The return value is the return from the syscall if
5220 * positive or a negative errno code on error.
5221 */ 4788 */
5222 4789int dev_change_carrier(struct net_device *dev, bool new_carrier)
5223int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5224{ 4790{
5225 struct ifreq ifr; 4791 const struct net_device_ops *ops = dev->netdev_ops;
5226 int ret;
5227 char *colon;
5228
5229 /* One special case: SIOCGIFCONF takes ifconf argument
5230 and requires shared lock, because it sleeps writing
5231 to user space.
5232 */
5233
5234 if (cmd == SIOCGIFCONF) {
5235 rtnl_lock();
5236 ret = dev_ifconf(net, (char __user *) arg);
5237 rtnl_unlock();
5238 return ret;
5239 }
5240 if (cmd == SIOCGIFNAME)
5241 return dev_ifname(net, (struct ifreq __user *)arg);
5242
5243 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5244 return -EFAULT;
5245
5246 ifr.ifr_name[IFNAMSIZ-1] = 0;
5247
5248 colon = strchr(ifr.ifr_name, ':');
5249 if (colon)
5250 *colon = 0;
5251
5252 /*
5253 * See which interface the caller is talking about.
5254 */
5255
5256 switch (cmd) {
5257 /*
5258 * These ioctl calls:
5259 * - can be done by all.
5260 * - atomic and do not require locking.
5261 * - return a value
5262 */
5263 case SIOCGIFFLAGS:
5264 case SIOCGIFMETRIC:
5265 case SIOCGIFMTU:
5266 case SIOCGIFHWADDR:
5267 case SIOCGIFSLAVE:
5268 case SIOCGIFMAP:
5269 case SIOCGIFINDEX:
5270 case SIOCGIFTXQLEN:
5271 dev_load(net, ifr.ifr_name);
5272 rcu_read_lock();
5273 ret = dev_ifsioc_locked(net, &ifr, cmd);
5274 rcu_read_unlock();
5275 if (!ret) {
5276 if (colon)
5277 *colon = ':';
5278 if (copy_to_user(arg, &ifr,
5279 sizeof(struct ifreq)))
5280 ret = -EFAULT;
5281 }
5282 return ret;
5283
5284 case SIOCETHTOOL:
5285 dev_load(net, ifr.ifr_name);
5286 rtnl_lock();
5287 ret = dev_ethtool(net, &ifr);
5288 rtnl_unlock();
5289 if (!ret) {
5290 if (colon)
5291 *colon = ':';
5292 if (copy_to_user(arg, &ifr,
5293 sizeof(struct ifreq)))
5294 ret = -EFAULT;
5295 }
5296 return ret;
5297
5298 /*
5299 * These ioctl calls:
5300 * - require superuser power.
5301 * - require strict serialization.
5302 * - return a value
5303 */
5304 case SIOCGMIIPHY:
5305 case SIOCGMIIREG:
5306 case SIOCSIFNAME:
5307 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5308 return -EPERM;
5309 dev_load(net, ifr.ifr_name);
5310 rtnl_lock();
5311 ret = dev_ifsioc(net, &ifr, cmd);
5312 rtnl_unlock();
5313 if (!ret) {
5314 if (colon)
5315 *colon = ':';
5316 if (copy_to_user(arg, &ifr,
5317 sizeof(struct ifreq)))
5318 ret = -EFAULT;
5319 }
5320 return ret;
5321
5322 /*
5323 * These ioctl calls:
5324 * - require superuser power.
5325 * - require strict serialization.
5326 * - do not return a value
5327 */
5328 case SIOCSIFMAP:
5329 case SIOCSIFTXQLEN:
5330 if (!capable(CAP_NET_ADMIN))
5331 return -EPERM;
5332 /* fall through */
5333 /*
5334 * These ioctl calls:
5335 * - require local superuser power.
5336 * - require strict serialization.
5337 * - do not return a value
5338 */
5339 case SIOCSIFFLAGS:
5340 case SIOCSIFMETRIC:
5341 case SIOCSIFMTU:
5342 case SIOCSIFHWADDR:
5343 case SIOCSIFSLAVE:
5344 case SIOCADDMULTI:
5345 case SIOCDELMULTI:
5346 case SIOCSIFHWBROADCAST:
5347 case SIOCSMIIREG:
5348 case SIOCBONDENSLAVE:
5349 case SIOCBONDRELEASE:
5350 case SIOCBONDSETHWADDR:
5351 case SIOCBONDCHANGEACTIVE:
5352 case SIOCBRADDIF:
5353 case SIOCBRDELIF:
5354 case SIOCSHWTSTAMP:
5355 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5356 return -EPERM;
5357 /* fall through */
5358 case SIOCBONDSLAVEINFOQUERY:
5359 case SIOCBONDINFOQUERY:
5360 dev_load(net, ifr.ifr_name);
5361 rtnl_lock();
5362 ret = dev_ifsioc(net, &ifr, cmd);
5363 rtnl_unlock();
5364 return ret;
5365
5366 case SIOCGIFMEM:
5367 /* Get the per device memory space. We can add this but
5368 * currently do not support it */
5369 case SIOCSIFMEM:
5370 /* Set the per device memory buffer space.
5371 * Not applicable in our case */
5372 case SIOCSIFLINK:
5373 return -ENOTTY;
5374 4792
5375 /* 4793 if (!ops->ndo_change_carrier)
5376 * Unknown or private ioctl. 4794 return -EOPNOTSUPP;
5377 */ 4795 if (!netif_device_present(dev))
5378 default: 4796 return -ENODEV;
5379 if (cmd == SIOCWANDEV || 4797 return ops->ndo_change_carrier(dev, new_carrier);
5380 (cmd >= SIOCDEVPRIVATE &&
5381 cmd <= SIOCDEVPRIVATE + 15)) {
5382 dev_load(net, ifr.ifr_name);
5383 rtnl_lock();
5384 ret = dev_ifsioc(net, &ifr, cmd);
5385 rtnl_unlock();
5386 if (!ret && copy_to_user(arg, &ifr,
5387 sizeof(struct ifreq)))
5388 ret = -EFAULT;
5389 return ret;
5390 }
5391 /* Take care of Wireless Extensions */
5392 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5393 return wext_handle_ioctl(net, &ifr, cmd, arg);
5394 return -ENOTTY;
5395 }
5396} 4798}
5397 4799EXPORT_SYMBOL(dev_change_carrier);
5398 4800
5399/** 4801/**
5400 * dev_new_index - allocate an ifindex 4802 * dev_new_index - allocate an ifindex
@@ -5482,11 +4884,15 @@ static void rollback_registered_many(struct list_head *head)
5482 if (dev->netdev_ops->ndo_uninit) 4884 if (dev->netdev_ops->ndo_uninit)
5483 dev->netdev_ops->ndo_uninit(dev); 4885 dev->netdev_ops->ndo_uninit(dev);
5484 4886
5485 /* Notifier chain MUST detach us from master device. */ 4887 /* Notifier chain MUST detach us all upper devices. */
5486 WARN_ON(dev->master); 4888 WARN_ON(netdev_has_any_upper_dev(dev));
5487 4889
5488 /* Remove entries from kobject tree */ 4890 /* Remove entries from kobject tree */
5489 netdev_unregister_kobject(dev); 4891 netdev_unregister_kobject(dev);
4892#ifdef CONFIG_XPS
4893 /* Remove XPS queueing entries */
4894 netif_reset_xps_queues_gt(dev, 0);
4895#endif
5490 } 4896 }
5491 4897
5492 synchronize_net(); 4898 synchronize_net();
@@ -5664,10 +5070,9 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5664 BUG_ON(count < 1); 5070 BUG_ON(count < 1);
5665 5071
5666 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5072 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5667 if (!rx) { 5073 if (!rx)
5668 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5669 return -ENOMEM; 5074 return -ENOMEM;
5670 } 5075
5671 dev->_rx = rx; 5076 dev->_rx = rx;
5672 5077
5673 for (i = 0; i < count; i++) 5078 for (i = 0; i < count; i++)
@@ -5698,10 +5103,9 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5698 BUG_ON(count < 1); 5103 BUG_ON(count < 1);
5699 5104
5700 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5105 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5701 if (!tx) { 5106 if (!tx)
5702 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5703 return -ENOMEM; 5107 return -ENOMEM;
5704 } 5108
5705 dev->_tx = tx; 5109 dev->_tx = tx;
5706 5110
5707 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5111 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
@@ -5760,6 +5164,14 @@ int register_netdevice(struct net_device *dev)
5760 } 5164 }
5761 } 5165 }
5762 5166
5167 if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) &&
5168 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5169 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5170 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5171 ret = -EINVAL;
5172 goto err_uninit;
5173 }
5174
5763 ret = -EBUSY; 5175 ret = -EBUSY;
5764 if (!dev->ifindex) 5176 if (!dev->ifindex)
5765 dev->ifindex = dev_new_index(net); 5177 dev->ifindex = dev_new_index(net);
@@ -5815,6 +5227,13 @@ int register_netdevice(struct net_device *dev)
5815 list_netdevice(dev); 5227 list_netdevice(dev);
5816 add_device_randomness(dev->dev_addr, dev->addr_len); 5228 add_device_randomness(dev->dev_addr, dev->addr_len);
5817 5229
5230 /* If the device has permanent device address, driver should
5231 * set dev_addr and also addr_assign_type should be set to
5232 * NET_ADDR_PERM (default value).
5233 */
5234 if (dev->addr_assign_type == NET_ADDR_PERM)
5235 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5236
5818 /* Notify protocols, that a new device appeared. */ 5237 /* Notify protocols, that a new device appeared. */
5819 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5238 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5820 ret = notifier_to_errno(ret); 5239 ret = notifier_to_errno(ret);
@@ -6121,6 +5540,14 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6121 5540
6122static const struct ethtool_ops default_ethtool_ops; 5541static const struct ethtool_ops default_ethtool_ops;
6123 5542
5543void netdev_set_default_ethtool_ops(struct net_device *dev,
5544 const struct ethtool_ops *ops)
5545{
5546 if (dev->ethtool_ops == &default_ethtool_ops)
5547 dev->ethtool_ops = ops;
5548}
5549EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
5550
6124/** 5551/**
6125 * alloc_netdev_mqs - allocate network device 5552 * alloc_netdev_mqs - allocate network device
6126 * @sizeof_priv: size of private data to allocate space for 5553 * @sizeof_priv: size of private data to allocate space for
@@ -6165,10 +5592,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6165 alloc_size += NETDEV_ALIGN - 1; 5592 alloc_size += NETDEV_ALIGN - 1;
6166 5593
6167 p = kzalloc(alloc_size, GFP_KERNEL); 5594 p = kzalloc(alloc_size, GFP_KERNEL);
6168 if (!p) { 5595 if (!p)
6169 pr_err("alloc_netdev: Unable to allocate device\n");
6170 return NULL; 5596 return NULL;
6171 }
6172 5597
6173 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5598 dev = PTR_ALIGN(p, NETDEV_ALIGN);
6174 dev->padded = (char *)dev - (char *)p; 5599 dev->padded = (char *)dev - (char *)p;
@@ -6191,6 +5616,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6191 INIT_LIST_HEAD(&dev->napi_list); 5616 INIT_LIST_HEAD(&dev->napi_list);
6192 INIT_LIST_HEAD(&dev->unreg_list); 5617 INIT_LIST_HEAD(&dev->unreg_list);
6193 INIT_LIST_HEAD(&dev->link_watch_list); 5618 INIT_LIST_HEAD(&dev->link_watch_list);
5619 INIT_LIST_HEAD(&dev->upper_dev_list);
6194 dev->priv_flags = IFF_XMIT_DST_RELEASE; 5620 dev->priv_flags = IFF_XMIT_DST_RELEASE;
6195 setup(dev); 5621 setup(dev);
6196 5622
@@ -6834,19 +6260,9 @@ static int __init net_dev_init(void)
6834 6260
6835 hotcpu_notifier(dev_cpu_callback, 0); 6261 hotcpu_notifier(dev_cpu_callback, 0);
6836 dst_init(); 6262 dst_init();
6837 dev_mcast_init();
6838 rc = 0; 6263 rc = 0;
6839out: 6264out:
6840 return rc; 6265 return rc;
6841} 6266}
6842 6267
6843subsys_initcall(net_dev_init); 6268subsys_initcall(net_dev_init);
6844
6845static int __init initialize_hashrnd(void)
6846{
6847 get_random_bytes(&hashrnd, sizeof(hashrnd));
6848 return 0;
6849}
6850
6851late_initcall_sync(initialize_hashrnd);
6852
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index b079c7bbc157..bd2eb9d3e369 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -15,7 +15,6 @@
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/proc_fs.h>
19 18
20/* 19/*
21 * General list handling functions 20 * General list handling functions
@@ -727,76 +726,3 @@ void dev_mc_init(struct net_device *dev)
727 __hw_addr_init(&dev->mc); 726 __hw_addr_init(&dev->mc);
728} 727}
729EXPORT_SYMBOL(dev_mc_init); 728EXPORT_SYMBOL(dev_mc_init);
730
731#ifdef CONFIG_PROC_FS
732#include <linux/seq_file.h>
733
734static int dev_mc_seq_show(struct seq_file *seq, void *v)
735{
736 struct netdev_hw_addr *ha;
737 struct net_device *dev = v;
738
739 if (v == SEQ_START_TOKEN)
740 return 0;
741
742 netif_addr_lock_bh(dev);
743 netdev_for_each_mc_addr(ha, dev) {
744 int i;
745
746 seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
747 dev->name, ha->refcount, ha->global_use);
748
749 for (i = 0; i < dev->addr_len; i++)
750 seq_printf(seq, "%02x", ha->addr[i]);
751
752 seq_putc(seq, '\n');
753 }
754 netif_addr_unlock_bh(dev);
755 return 0;
756}
757
758static const struct seq_operations dev_mc_seq_ops = {
759 .start = dev_seq_start,
760 .next = dev_seq_next,
761 .stop = dev_seq_stop,
762 .show = dev_mc_seq_show,
763};
764
765static int dev_mc_seq_open(struct inode *inode, struct file *file)
766{
767 return seq_open_net(inode, file, &dev_mc_seq_ops,
768 sizeof(struct seq_net_private));
769}
770
771static const struct file_operations dev_mc_seq_fops = {
772 .owner = THIS_MODULE,
773 .open = dev_mc_seq_open,
774 .read = seq_read,
775 .llseek = seq_lseek,
776 .release = seq_release_net,
777};
778
779#endif
780
781static int __net_init dev_mc_net_init(struct net *net)
782{
783 if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
784 return -ENOMEM;
785 return 0;
786}
787
788static void __net_exit dev_mc_net_exit(struct net *net)
789{
790 proc_net_remove(net, "dev_mcast");
791}
792
793static struct pernet_operations __net_initdata dev_mc_net_ops = {
794 .init = dev_mc_net_init,
795 .exit = dev_mc_net_exit,
796};
797
798void __init dev_mcast_init(void)
799{
800 register_pernet_subsys(&dev_mc_net_ops);
801}
802
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
new file mode 100644
index 000000000000..6cc0481faade
--- /dev/null
+++ b/net/core/dev_ioctl.c
@@ -0,0 +1,576 @@
1#include <linux/kmod.h>
2#include <linux/netdevice.h>
3#include <linux/etherdevice.h>
4#include <linux/rtnetlink.h>
5#include <linux/net_tstamp.h>
6#include <linux/wireless.h>
7#include <net/wext.h>
8
9/*
10 * Map an interface index to its name (SIOCGIFNAME)
11 */
12
13/*
14 * We need this ioctl for efficient implementation of the
15 * if_indextoname() function required by the IPv6 API. Without
16 * it, we would have to search all the interfaces to find a
17 * match. --pb
18 */
19
20static int dev_ifname(struct net *net, struct ifreq __user *arg)
21{
22 struct net_device *dev;
23 struct ifreq ifr;
24 unsigned seq;
25
26 /*
27 * Fetch the caller's info block.
28 */
29
30 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
31 return -EFAULT;
32
33retry:
34 seq = read_seqcount_begin(&devnet_rename_seq);
35 rcu_read_lock();
36 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
37 if (!dev) {
38 rcu_read_unlock();
39 return -ENODEV;
40 }
41
42 strcpy(ifr.ifr_name, dev->name);
43 rcu_read_unlock();
44 if (read_seqcount_retry(&devnet_rename_seq, seq))
45 goto retry;
46
47 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
48 return -EFAULT;
49 return 0;
50}
51
52static gifconf_func_t *gifconf_list[NPROTO];
53
54/**
55 * register_gifconf - register a SIOCGIF handler
56 * @family: Address family
57 * @gifconf: Function handler
58 *
59 * Register protocol dependent address dumping routines. The handler
60 * that is passed must not be freed or reused until it has been replaced
61 * by another handler.
62 */
63int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
64{
65 if (family >= NPROTO)
66 return -EINVAL;
67 gifconf_list[family] = gifconf;
68 return 0;
69}
70EXPORT_SYMBOL(register_gifconf);
71
72/*
73 * Perform a SIOCGIFCONF call. This structure will change
74 * size eventually, and there is nothing I can do about it.
75 * Thus we will need a 'compatibility mode'.
76 */
77
78static int dev_ifconf(struct net *net, char __user *arg)
79{
80 struct ifconf ifc;
81 struct net_device *dev;
82 char __user *pos;
83 int len;
84 int total;
85 int i;
86
87 /*
88 * Fetch the caller's info block.
89 */
90
91 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
92 return -EFAULT;
93
94 pos = ifc.ifc_buf;
95 len = ifc.ifc_len;
96
97 /*
98 * Loop over the interfaces, and write an info block for each.
99 */
100
101 total = 0;
102 for_each_netdev(net, dev) {
103 for (i = 0; i < NPROTO; i++) {
104 if (gifconf_list[i]) {
105 int done;
106 if (!pos)
107 done = gifconf_list[i](dev, NULL, 0);
108 else
109 done = gifconf_list[i](dev, pos + total,
110 len - total);
111 if (done < 0)
112 return -EFAULT;
113 total += done;
114 }
115 }
116 }
117
118 /*
119 * All done. Write the updated control block back to the caller.
120 */
121 ifc.ifc_len = total;
122
123 /*
124 * Both BSD and Solaris return 0 here, so we do too.
125 */
126 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
127}
128
129/*
130 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
131 */
132static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
133{
134 int err;
135 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
136
137 if (!dev)
138 return -ENODEV;
139
140 switch (cmd) {
141 case SIOCGIFFLAGS: /* Get interface flags */
142 ifr->ifr_flags = (short) dev_get_flags(dev);
143 return 0;
144
145 case SIOCGIFMETRIC: /* Get the metric on the interface
146 (currently unused) */
147 ifr->ifr_metric = 0;
148 return 0;
149
150 case SIOCGIFMTU: /* Get the MTU of a device */
151 ifr->ifr_mtu = dev->mtu;
152 return 0;
153
154 case SIOCGIFHWADDR:
155 if (!dev->addr_len)
156 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
157 else
158 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
159 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
160 ifr->ifr_hwaddr.sa_family = dev->type;
161 return 0;
162
163 case SIOCGIFSLAVE:
164 err = -EINVAL;
165 break;
166
167 case SIOCGIFMAP:
168 ifr->ifr_map.mem_start = dev->mem_start;
169 ifr->ifr_map.mem_end = dev->mem_end;
170 ifr->ifr_map.base_addr = dev->base_addr;
171 ifr->ifr_map.irq = dev->irq;
172 ifr->ifr_map.dma = dev->dma;
173 ifr->ifr_map.port = dev->if_port;
174 return 0;
175
176 case SIOCGIFINDEX:
177 ifr->ifr_ifindex = dev->ifindex;
178 return 0;
179
180 case SIOCGIFTXQLEN:
181 ifr->ifr_qlen = dev->tx_queue_len;
182 return 0;
183
184 default:
185 /* dev_ioctl() should ensure this case
186 * is never reached
187 */
188 WARN_ON(1);
189 err = -ENOTTY;
190 break;
191
192 }
193 return err;
194}
195
196static int net_hwtstamp_validate(struct ifreq *ifr)
197{
198 struct hwtstamp_config cfg;
199 enum hwtstamp_tx_types tx_type;
200 enum hwtstamp_rx_filters rx_filter;
201 int tx_type_valid = 0;
202 int rx_filter_valid = 0;
203
204 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
205 return -EFAULT;
206
207 if (cfg.flags) /* reserved for future extensions */
208 return -EINVAL;
209
210 tx_type = cfg.tx_type;
211 rx_filter = cfg.rx_filter;
212
213 switch (tx_type) {
214 case HWTSTAMP_TX_OFF:
215 case HWTSTAMP_TX_ON:
216 case HWTSTAMP_TX_ONESTEP_SYNC:
217 tx_type_valid = 1;
218 break;
219 }
220
221 switch (rx_filter) {
222 case HWTSTAMP_FILTER_NONE:
223 case HWTSTAMP_FILTER_ALL:
224 case HWTSTAMP_FILTER_SOME:
225 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
226 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
227 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
228 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
229 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
230 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
231 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
232 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
233 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
234 case HWTSTAMP_FILTER_PTP_V2_EVENT:
235 case HWTSTAMP_FILTER_PTP_V2_SYNC:
236 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
237 rx_filter_valid = 1;
238 break;
239 }
240
241 if (!tx_type_valid || !rx_filter_valid)
242 return -ERANGE;
243
244 return 0;
245}
246
247/*
248 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
249 */
250static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
251{
252 int err;
253 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
254 const struct net_device_ops *ops;
255
256 if (!dev)
257 return -ENODEV;
258
259 ops = dev->netdev_ops;
260
261 switch (cmd) {
262 case SIOCSIFFLAGS: /* Set interface flags */
263 return dev_change_flags(dev, ifr->ifr_flags);
264
265 case SIOCSIFMETRIC: /* Set the metric on the interface
266 (currently unused) */
267 return -EOPNOTSUPP;
268
269 case SIOCSIFMTU: /* Set the MTU of a device */
270 return dev_set_mtu(dev, ifr->ifr_mtu);
271
272 case SIOCSIFHWADDR:
273 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
274
275 case SIOCSIFHWBROADCAST:
276 if (ifr->ifr_hwaddr.sa_family != dev->type)
277 return -EINVAL;
278 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
279 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
280 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
281 return 0;
282
283 case SIOCSIFMAP:
284 if (ops->ndo_set_config) {
285 if (!netif_device_present(dev))
286 return -ENODEV;
287 return ops->ndo_set_config(dev, &ifr->ifr_map);
288 }
289 return -EOPNOTSUPP;
290
291 case SIOCADDMULTI:
292 if (!ops->ndo_set_rx_mode ||
293 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
294 return -EINVAL;
295 if (!netif_device_present(dev))
296 return -ENODEV;
297 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
298
299 case SIOCDELMULTI:
300 if (!ops->ndo_set_rx_mode ||
301 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
302 return -EINVAL;
303 if (!netif_device_present(dev))
304 return -ENODEV;
305 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
306
307 case SIOCSIFTXQLEN:
308 if (ifr->ifr_qlen < 0)
309 return -EINVAL;
310 dev->tx_queue_len = ifr->ifr_qlen;
311 return 0;
312
313 case SIOCSIFNAME:
314 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
315 return dev_change_name(dev, ifr->ifr_newname);
316
317 case SIOCSHWTSTAMP:
318 err = net_hwtstamp_validate(ifr);
319 if (err)
320 return err;
321 /* fall through */
322
323 /*
324 * Unknown or private ioctl
325 */
326 default:
327 if ((cmd >= SIOCDEVPRIVATE &&
328 cmd <= SIOCDEVPRIVATE + 15) ||
329 cmd == SIOCBONDENSLAVE ||
330 cmd == SIOCBONDRELEASE ||
331 cmd == SIOCBONDSETHWADDR ||
332 cmd == SIOCBONDSLAVEINFOQUERY ||
333 cmd == SIOCBONDINFOQUERY ||
334 cmd == SIOCBONDCHANGEACTIVE ||
335 cmd == SIOCGMIIPHY ||
336 cmd == SIOCGMIIREG ||
337 cmd == SIOCSMIIREG ||
338 cmd == SIOCBRADDIF ||
339 cmd == SIOCBRDELIF ||
340 cmd == SIOCSHWTSTAMP ||
341 cmd == SIOCWANDEV) {
342 err = -EOPNOTSUPP;
343 if (ops->ndo_do_ioctl) {
344 if (netif_device_present(dev))
345 err = ops->ndo_do_ioctl(dev, ifr, cmd);
346 else
347 err = -ENODEV;
348 }
349 } else
350 err = -EINVAL;
351
352 }
353 return err;
354}
355
356/**
357 * dev_load - load a network module
358 * @net: the applicable net namespace
359 * @name: name of interface
360 *
361 * If a network interface is not present and the process has suitable
362 * privileges this function loads the module. If module loading is not
363 * available in this kernel then it becomes a nop.
364 */
365
366void dev_load(struct net *net, const char *name)
367{
368 struct net_device *dev;
369 int no_module;
370
371 rcu_read_lock();
372 dev = dev_get_by_name_rcu(net, name);
373 rcu_read_unlock();
374
375 no_module = !dev;
376 if (no_module && capable(CAP_NET_ADMIN))
377 no_module = request_module("netdev-%s", name);
378 if (no_module && capable(CAP_SYS_MODULE)) {
379 if (!request_module("%s", name))
380 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
381 name);
382 }
383}
384EXPORT_SYMBOL(dev_load);
385
386/*
387 * This function handles all "interface"-type I/O control requests. The actual
388 * 'doing' part of this is dev_ifsioc above.
389 */
390
391/**
392 * dev_ioctl - network device ioctl
393 * @net: the applicable net namespace
394 * @cmd: command to issue
395 * @arg: pointer to a struct ifreq in user space
396 *
397 * Issue ioctl functions to devices. This is normally called by the
398 * user space syscall interfaces but can sometimes be useful for
399 * other purposes. The return value is the return from the syscall if
400 * positive or a negative errno code on error.
401 */
402
403int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
404{
405 struct ifreq ifr;
406 int ret;
407 char *colon;
408
409 /* One special case: SIOCGIFCONF takes ifconf argument
410 and requires shared lock, because it sleeps writing
411 to user space.
412 */
413
414 if (cmd == SIOCGIFCONF) {
415 rtnl_lock();
416 ret = dev_ifconf(net, (char __user *) arg);
417 rtnl_unlock();
418 return ret;
419 }
420 if (cmd == SIOCGIFNAME)
421 return dev_ifname(net, (struct ifreq __user *)arg);
422
423 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
424 return -EFAULT;
425
426 ifr.ifr_name[IFNAMSIZ-1] = 0;
427
428 colon = strchr(ifr.ifr_name, ':');
429 if (colon)
430 *colon = 0;
431
432 /*
433 * See which interface the caller is talking about.
434 */
435
436 switch (cmd) {
437 /*
438 * These ioctl calls:
439 * - can be done by all.
440 * - atomic and do not require locking.
441 * - return a value
442 */
443 case SIOCGIFFLAGS:
444 case SIOCGIFMETRIC:
445 case SIOCGIFMTU:
446 case SIOCGIFHWADDR:
447 case SIOCGIFSLAVE:
448 case SIOCGIFMAP:
449 case SIOCGIFINDEX:
450 case SIOCGIFTXQLEN:
451 dev_load(net, ifr.ifr_name);
452 rcu_read_lock();
453 ret = dev_ifsioc_locked(net, &ifr, cmd);
454 rcu_read_unlock();
455 if (!ret) {
456 if (colon)
457 *colon = ':';
458 if (copy_to_user(arg, &ifr,
459 sizeof(struct ifreq)))
460 ret = -EFAULT;
461 }
462 return ret;
463
464 case SIOCETHTOOL:
465 dev_load(net, ifr.ifr_name);
466 rtnl_lock();
467 ret = dev_ethtool(net, &ifr);
468 rtnl_unlock();
469 if (!ret) {
470 if (colon)
471 *colon = ':';
472 if (copy_to_user(arg, &ifr,
473 sizeof(struct ifreq)))
474 ret = -EFAULT;
475 }
476 return ret;
477
478 /*
479 * These ioctl calls:
480 * - require superuser power.
481 * - require strict serialization.
482 * - return a value
483 */
484 case SIOCGMIIPHY:
485 case SIOCGMIIREG:
486 case SIOCSIFNAME:
487 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
488 return -EPERM;
489 dev_load(net, ifr.ifr_name);
490 rtnl_lock();
491 ret = dev_ifsioc(net, &ifr, cmd);
492 rtnl_unlock();
493 if (!ret) {
494 if (colon)
495 *colon = ':';
496 if (copy_to_user(arg, &ifr,
497 sizeof(struct ifreq)))
498 ret = -EFAULT;
499 }
500 return ret;
501
502 /*
503 * These ioctl calls:
504 * - require superuser power.
505 * - require strict serialization.
506 * - do not return a value
507 */
508 case SIOCSIFMAP:
509 case SIOCSIFTXQLEN:
510 if (!capable(CAP_NET_ADMIN))
511 return -EPERM;
512 /* fall through */
513 /*
514 * These ioctl calls:
515 * - require local superuser power.
516 * - require strict serialization.
517 * - do not return a value
518 */
519 case SIOCSIFFLAGS:
520 case SIOCSIFMETRIC:
521 case SIOCSIFMTU:
522 case SIOCSIFHWADDR:
523 case SIOCSIFSLAVE:
524 case SIOCADDMULTI:
525 case SIOCDELMULTI:
526 case SIOCSIFHWBROADCAST:
527 case SIOCSMIIREG:
528 case SIOCBONDENSLAVE:
529 case SIOCBONDRELEASE:
530 case SIOCBONDSETHWADDR:
531 case SIOCBONDCHANGEACTIVE:
532 case SIOCBRADDIF:
533 case SIOCBRDELIF:
534 case SIOCSHWTSTAMP:
535 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
536 return -EPERM;
537 /* fall through */
538 case SIOCBONDSLAVEINFOQUERY:
539 case SIOCBONDINFOQUERY:
540 dev_load(net, ifr.ifr_name);
541 rtnl_lock();
542 ret = dev_ifsioc(net, &ifr, cmd);
543 rtnl_unlock();
544 return ret;
545
546 case SIOCGIFMEM:
547 /* Get the per device memory space. We can add this but
548 * currently do not support it */
549 case SIOCSIFMEM:
550 /* Set the per device memory buffer space.
551 * Not applicable in our case */
552 case SIOCSIFLINK:
553 return -ENOTTY;
554
555 /*
556 * Unknown or private ioctl.
557 */
558 default:
559 if (cmd == SIOCWANDEV ||
560 (cmd >= SIOCDEVPRIVATE &&
561 cmd <= SIOCDEVPRIVATE + 15)) {
562 dev_load(net, ifr.ifr_name);
563 rtnl_lock();
564 ret = dev_ifsioc(net, &ifr, cmd);
565 rtnl_unlock();
566 if (!ret && copy_to_user(arg, &ifr,
567 sizeof(struct ifreq)))
568 ret = -EFAULT;
569 return ret;
570 }
571 /* Take care of Wireless Extensions */
572 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
573 return wext_handle_ioctl(net, &ifr, cmd, arg);
574 return -ENOTTY;
575 }
576}
diff --git a/net/core/dst.c b/net/core/dst.c
index ee6153e2cf43..35fd12f1a69c 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -179,6 +179,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
179 dst_init_metrics(dst, dst_default_metrics, true); 179 dst_init_metrics(dst, dst_default_metrics, true);
180 dst->expires = 0UL; 180 dst->expires = 0UL;
181 dst->path = dst; 181 dst->path = dst;
182 dst->from = NULL;
182#ifdef CONFIG_XFRM 183#ifdef CONFIG_XFRM
183 dst->xfrm = NULL; 184 dst->xfrm = NULL;
184#endif 185#endif
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a8705432e4b1..3e9b2c3e30f0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -77,6 +77,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
77 [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", 77 [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation",
78 [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", 78 [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
79 [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", 79 [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
80 [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
80 81
81 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", 82 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
82 [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", 83 [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",
@@ -175,7 +176,7 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset)
175 if (sset == ETH_SS_FEATURES) 176 if (sset == ETH_SS_FEATURES)
176 return ARRAY_SIZE(netdev_features_strings); 177 return ARRAY_SIZE(netdev_features_strings);
177 178
178 if (ops && ops->get_sset_count && ops->get_strings) 179 if (ops->get_sset_count && ops->get_strings)
179 return ops->get_sset_count(dev, sset); 180 return ops->get_sset_count(dev, sset);
180 else 181 else
181 return -EOPNOTSUPP; 182 return -EOPNOTSUPP;
@@ -311,7 +312,7 @@ int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
311{ 312{
312 ASSERT_RTNL(); 313 ASSERT_RTNL();
313 314
314 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings) 315 if (!dev->ethtool_ops->get_settings)
315 return -EOPNOTSUPP; 316 return -EOPNOTSUPP;
316 317
317 memset(cmd, 0, sizeof(struct ethtool_cmd)); 318 memset(cmd, 0, sizeof(struct ethtool_cmd));
@@ -355,7 +356,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
355 356
356 memset(&info, 0, sizeof(info)); 357 memset(&info, 0, sizeof(info));
357 info.cmd = ETHTOOL_GDRVINFO; 358 info.cmd = ETHTOOL_GDRVINFO;
358 if (ops && ops->get_drvinfo) { 359 if (ops->get_drvinfo) {
359 ops->get_drvinfo(dev, &info); 360 ops->get_drvinfo(dev, &info);
360 } else if (dev->dev.parent && dev->dev.parent->driver) { 361 } else if (dev->dev.parent && dev->dev.parent->driver) {
361 strlcpy(info.bus_info, dev_name(dev->dev.parent), 362 strlcpy(info.bus_info, dev_name(dev->dev.parent),
@@ -370,7 +371,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
370 * this method of obtaining string set info is deprecated; 371 * this method of obtaining string set info is deprecated;
371 * Use ETHTOOL_GSSET_INFO instead. 372 * Use ETHTOOL_GSSET_INFO instead.
372 */ 373 */
373 if (ops && ops->get_sset_count) { 374 if (ops->get_sset_count) {
374 int rc; 375 int rc;
375 376
376 rc = ops->get_sset_count(dev, ETH_SS_TEST); 377 rc = ops->get_sset_count(dev, ETH_SS_TEST);
@@ -383,9 +384,9 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
383 if (rc >= 0) 384 if (rc >= 0)
384 info.n_priv_flags = rc; 385 info.n_priv_flags = rc;
385 } 386 }
386 if (ops && ops->get_regs_len) 387 if (ops->get_regs_len)
387 info.regdump_len = ops->get_regs_len(dev); 388 info.regdump_len = ops->get_regs_len(dev);
388 if (ops && ops->get_eeprom_len) 389 if (ops->get_eeprom_len)
389 info.eedump_len = ops->get_eeprom_len(dev); 390 info.eedump_len = ops->get_eeprom_len(dev);
390 391
391 if (copy_to_user(useraddr, &info, sizeof(info))) 392 if (copy_to_user(useraddr, &info, sizeof(info)))
@@ -590,13 +591,14 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
590 struct ethtool_rxnfc rx_rings; 591 struct ethtool_rxnfc rx_rings;
591 u32 user_size, dev_size, i; 592 u32 user_size, dev_size, i;
592 u32 *indir; 593 u32 *indir;
594 const struct ethtool_ops *ops = dev->ethtool_ops;
593 int ret; 595 int ret;
594 596
595 if (!dev->ethtool_ops->get_rxfh_indir_size || 597 if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir ||
596 !dev->ethtool_ops->set_rxfh_indir || 598 !ops->get_rxnfc)
597 !dev->ethtool_ops->get_rxnfc)
598 return -EOPNOTSUPP; 599 return -EOPNOTSUPP;
599 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); 600
601 dev_size = ops->get_rxfh_indir_size(dev);
600 if (dev_size == 0) 602 if (dev_size == 0)
601 return -EOPNOTSUPP; 603 return -EOPNOTSUPP;
602 604
@@ -613,7 +615,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
613 return -ENOMEM; 615 return -ENOMEM;
614 616
615 rx_rings.cmd = ETHTOOL_GRXRINGS; 617 rx_rings.cmd = ETHTOOL_GRXRINGS;
616 ret = dev->ethtool_ops->get_rxnfc(dev, &rx_rings, NULL); 618 ret = ops->get_rxnfc(dev, &rx_rings, NULL);
617 if (ret) 619 if (ret)
618 goto out; 620 goto out;
619 621
@@ -639,7 +641,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
639 } 641 }
640 } 642 }
641 643
642 ret = dev->ethtool_ops->set_rxfh_indir(dev, indir); 644 ret = ops->set_rxfh_indir(dev, indir);
643 645
644out: 646out:
645 kfree(indir); 647 kfree(indir);
@@ -1082,9 +1084,10 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1082{ 1084{
1083 struct ethtool_value id; 1085 struct ethtool_value id;
1084 static bool busy; 1086 static bool busy;
1087 const struct ethtool_ops *ops = dev->ethtool_ops;
1085 int rc; 1088 int rc;
1086 1089
1087 if (!dev->ethtool_ops->set_phys_id) 1090 if (!ops->set_phys_id)
1088 return -EOPNOTSUPP; 1091 return -EOPNOTSUPP;
1089 1092
1090 if (busy) 1093 if (busy)
@@ -1093,7 +1096,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1093 if (copy_from_user(&id, useraddr, sizeof(id))) 1096 if (copy_from_user(&id, useraddr, sizeof(id)))
1094 return -EFAULT; 1097 return -EFAULT;
1095 1098
1096 rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); 1099 rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1097 if (rc < 0) 1100 if (rc < 0)
1098 return rc; 1101 return rc;
1099 1102
@@ -1118,7 +1121,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1118 i = n; 1121 i = n;
1119 do { 1122 do {
1120 rtnl_lock(); 1123 rtnl_lock();
1121 rc = dev->ethtool_ops->set_phys_id(dev, 1124 rc = ops->set_phys_id(dev,
1122 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); 1125 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1123 rtnl_unlock(); 1126 rtnl_unlock();
1124 if (rc) 1127 if (rc)
@@ -1133,7 +1136,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1133 dev_put(dev); 1136 dev_put(dev);
1134 busy = false; 1137 busy = false;
1135 1138
1136 (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); 1139 (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1137 return rc; 1140 return rc;
1138} 1141}
1139 1142
@@ -1275,7 +1278,7 @@ static int ethtool_get_dump_flag(struct net_device *dev,
1275 struct ethtool_dump dump; 1278 struct ethtool_dump dump;
1276 const struct ethtool_ops *ops = dev->ethtool_ops; 1279 const struct ethtool_ops *ops = dev->ethtool_ops;
1277 1280
1278 if (!dev->ethtool_ops->get_dump_flag) 1281 if (!ops->get_dump_flag)
1279 return -EOPNOTSUPP; 1282 return -EOPNOTSUPP;
1280 1283
1281 if (copy_from_user(&dump, useraddr, sizeof(dump))) 1284 if (copy_from_user(&dump, useraddr, sizeof(dump)))
@@ -1299,8 +1302,7 @@ static int ethtool_get_dump_data(struct net_device *dev,
1299 const struct ethtool_ops *ops = dev->ethtool_ops; 1302 const struct ethtool_ops *ops = dev->ethtool_ops;
1300 void *data = NULL; 1303 void *data = NULL;
1301 1304
1302 if (!dev->ethtool_ops->get_dump_data || 1305 if (!ops->get_dump_data || !ops->get_dump_flag)
1303 !dev->ethtool_ops->get_dump_flag)
1304 return -EOPNOTSUPP; 1306 return -EOPNOTSUPP;
1305 1307
1306 if (copy_from_user(&dump, useraddr, sizeof(dump))) 1308 if (copy_from_user(&dump, useraddr, sizeof(dump)))
@@ -1346,13 +1348,9 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
1346 info.cmd = ETHTOOL_GET_TS_INFO; 1348 info.cmd = ETHTOOL_GET_TS_INFO;
1347 1349
1348 if (phydev && phydev->drv && phydev->drv->ts_info) { 1350 if (phydev && phydev->drv && phydev->drv->ts_info) {
1349
1350 err = phydev->drv->ts_info(phydev, &info); 1351 err = phydev->drv->ts_info(phydev, &info);
1351 1352 } else if (ops->get_ts_info) {
1352 } else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) {
1353
1354 err = ops->get_ts_info(dev, &info); 1353 err = ops->get_ts_info(dev, &info);
1355
1356 } else { 1354 } else {
1357 info.so_timestamping = 1355 info.so_timestamping =
1358 SOF_TIMESTAMPING_RX_SOFTWARE | 1356 SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/net/core/filter.c b/net/core/filter.c
index c23543cba132..2e20b55a7830 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -532,6 +532,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
532 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X, 532 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
533 }; 533 };
534 int pc; 534 int pc;
535 bool anc_found;
535 536
536 if (flen == 0 || flen > BPF_MAXINSNS) 537 if (flen == 0 || flen > BPF_MAXINSNS)
537 return -EINVAL; 538 return -EINVAL;
@@ -592,8 +593,10 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
592 case BPF_S_LD_W_ABS: 593 case BPF_S_LD_W_ABS:
593 case BPF_S_LD_H_ABS: 594 case BPF_S_LD_H_ABS:
594 case BPF_S_LD_B_ABS: 595 case BPF_S_LD_B_ABS:
596 anc_found = false;
595#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ 597#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
596 code = BPF_S_ANC_##CODE; \ 598 code = BPF_S_ANC_##CODE; \
599 anc_found = true; \
597 break 600 break
598 switch (ftest->k) { 601 switch (ftest->k) {
599 ANCILLARY(PROTOCOL); 602 ANCILLARY(PROTOCOL);
@@ -610,6 +613,10 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
610 ANCILLARY(VLAN_TAG); 613 ANCILLARY(VLAN_TAG);
611 ANCILLARY(VLAN_TAG_PRESENT); 614 ANCILLARY(VLAN_TAG_PRESENT);
612 } 615 }
616
617 /* ancillary operation unknown or unsupported */
618 if (anc_found == false && ftest->k >= SKF_AD_OFF)
619 return -EINVAL;
613 } 620 }
614 ftest->code = code; 621 ftest->code = code;
615 } 622 }
@@ -714,6 +721,9 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
714 unsigned int fsize = sizeof(struct sock_filter) * fprog->len; 721 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
715 int err; 722 int err;
716 723
724 if (sock_flag(sk, SOCK_FILTER_LOCKED))
725 return -EPERM;
726
717 /* Make sure new filter is there and in the right amounts. */ 727 /* Make sure new filter is there and in the right amounts. */
718 if (fprog->filter == NULL) 728 if (fprog->filter == NULL)
719 return -EINVAL; 729 return -EINVAL;
@@ -750,6 +760,9 @@ int sk_detach_filter(struct sock *sk)
750 int ret = -ENOENT; 760 int ret = -ENOENT;
751 struct sk_filter *filter; 761 struct sk_filter *filter;
752 762
763 if (sock_flag(sk, SOCK_FILTER_LOCKED))
764 return -EPERM;
765
753 filter = rcu_dereference_protected(sk->sk_filter, 766 filter = rcu_dereference_protected(sk->sk_filter,
754 sock_owned_by_user(sk)); 767 sock_owned_by_user(sk));
755 if (filter) { 768 if (filter) {
diff --git a/net/core/flow.c b/net/core/flow.c
index b0901ee5a002..43f7495df27a 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -286,7 +286,7 @@ nocache:
286 else 286 else
287 fle->genid--; 287 fle->genid--;
288 } else { 288 } else {
289 if (flo && !IS_ERR(flo)) 289 if (!IS_ERR_OR_NULL(flo))
290 flo->ops->delete(flo); 290 flo->ops->delete(flo);
291 } 291 }
292ret_object: 292ret_object:
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 466820b6e344..9d4c7201400d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -143,3 +143,176 @@ ipv6:
143 return true; 143 return true;
144} 144}
145EXPORT_SYMBOL(skb_flow_dissect); 145EXPORT_SYMBOL(skb_flow_dissect);
146
147static u32 hashrnd __read_mostly;
148
149/*
150 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
151 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
152 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
153 * if hash is a canonical 4-tuple hash over transport ports.
154 */
155void __skb_get_rxhash(struct sk_buff *skb)
156{
157 struct flow_keys keys;
158 u32 hash;
159
160 if (!skb_flow_dissect(skb, &keys))
161 return;
162
163 if (keys.ports)
164 skb->l4_rxhash = 1;
165
166 /* get a consistent hash (same value on both flow directions) */
167 if (((__force u32)keys.dst < (__force u32)keys.src) ||
168 (((__force u32)keys.dst == (__force u32)keys.src) &&
169 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
170 swap(keys.dst, keys.src);
171 swap(keys.port16[0], keys.port16[1]);
172 }
173
174 hash = jhash_3words((__force u32)keys.dst,
175 (__force u32)keys.src,
176 (__force u32)keys.ports, hashrnd);
177 if (!hash)
178 hash = 1;
179
180 skb->rxhash = hash;
181}
182EXPORT_SYMBOL(__skb_get_rxhash);
183
184/*
185 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
186 * to be used as a distribution range.
187 */
188u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
189 unsigned int num_tx_queues)
190{
191 u32 hash;
192 u16 qoffset = 0;
193 u16 qcount = num_tx_queues;
194
195 if (skb_rx_queue_recorded(skb)) {
196 hash = skb_get_rx_queue(skb);
197 while (unlikely(hash >= num_tx_queues))
198 hash -= num_tx_queues;
199 return hash;
200 }
201
202 if (dev->num_tc) {
203 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
204 qoffset = dev->tc_to_txq[tc].offset;
205 qcount = dev->tc_to_txq[tc].count;
206 }
207
208 if (skb->sk && skb->sk->sk_hash)
209 hash = skb->sk->sk_hash;
210 else
211 hash = (__force u16) skb->protocol;
212 hash = jhash_1word(hash, hashrnd);
213
214 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
215}
216EXPORT_SYMBOL(__skb_tx_hash);
217
218static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
219{
220 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
221 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
222 dev->name, queue_index,
223 dev->real_num_tx_queues);
224 return 0;
225 }
226 return queue_index;
227}
228
229static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
230{
231#ifdef CONFIG_XPS
232 struct xps_dev_maps *dev_maps;
233 struct xps_map *map;
234 int queue_index = -1;
235
236 rcu_read_lock();
237 dev_maps = rcu_dereference(dev->xps_maps);
238 if (dev_maps) {
239 map = rcu_dereference(
240 dev_maps->cpu_map[raw_smp_processor_id()]);
241 if (map) {
242 if (map->len == 1)
243 queue_index = map->queues[0];
244 else {
245 u32 hash;
246 if (skb->sk && skb->sk->sk_hash)
247 hash = skb->sk->sk_hash;
248 else
249 hash = (__force u16) skb->protocol ^
250 skb->rxhash;
251 hash = jhash_1word(hash, hashrnd);
252 queue_index = map->queues[
253 ((u64)hash * map->len) >> 32];
254 }
255 if (unlikely(queue_index >= dev->real_num_tx_queues))
256 queue_index = -1;
257 }
258 }
259 rcu_read_unlock();
260
261 return queue_index;
262#else
263 return -1;
264#endif
265}
266
267u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
268{
269 struct sock *sk = skb->sk;
270 int queue_index = sk_tx_queue_get(sk);
271
272 if (queue_index < 0 || skb->ooo_okay ||
273 queue_index >= dev->real_num_tx_queues) {
274 int new_index = get_xps_queue(dev, skb);
275 if (new_index < 0)
276 new_index = skb_tx_hash(dev, skb);
277
278 if (queue_index != new_index && sk) {
279 struct dst_entry *dst =
280 rcu_dereference_check(sk->sk_dst_cache, 1);
281
282 if (dst && skb_dst(skb) == dst)
283 sk_tx_queue_set(sk, queue_index);
284
285 }
286
287 queue_index = new_index;
288 }
289
290 return queue_index;
291}
292EXPORT_SYMBOL(__netdev_pick_tx);
293
294struct netdev_queue *netdev_pick_tx(struct net_device *dev,
295 struct sk_buff *skb)
296{
297 int queue_index = 0;
298
299 if (dev->real_num_tx_queues != 1) {
300 const struct net_device_ops *ops = dev->netdev_ops;
301 if (ops->ndo_select_queue)
302 queue_index = ops->ndo_select_queue(dev, skb);
303 else
304 queue_index = __netdev_pick_tx(dev, skb);
305 queue_index = dev_cap_txqueue(dev, queue_index);
306 }
307
308 skb_set_queue_mapping(skb, queue_index);
309 return netdev_get_tx_queue(dev, queue_index);
310}
311
312static int __init initialize_hashrnd(void)
313{
314 get_random_bytes(&hashrnd, sizeof(hashrnd));
315 return 0;
316}
317
318late_initcall_sync(initialize_hashrnd);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index c815f285e5ab..3863b8f639c5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -290,15 +290,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
290 goto out_entries; 290 goto out_entries;
291 } 291 }
292 292
293 if (tbl->entry_size) 293 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
294 n = kzalloc(tbl->entry_size, GFP_ATOMIC);
295 else {
296 int sz = sizeof(*n) + tbl->key_len;
297
298 sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
299 sz += dev->neigh_priv_len;
300 n = kzalloc(sz, GFP_ATOMIC);
301 }
302 if (!n) 294 if (!n)
303 goto out_entries; 295 goto out_entries;
304 296
@@ -778,6 +770,9 @@ static void neigh_periodic_work(struct work_struct *work)
778 nht = rcu_dereference_protected(tbl->nht, 770 nht = rcu_dereference_protected(tbl->nht,
779 lockdep_is_held(&tbl->lock)); 771 lockdep_is_held(&tbl->lock));
780 772
773 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
774 goto out;
775
781 /* 776 /*
782 * periodically recompute ReachableTime from random function 777 * periodically recompute ReachableTime from random function
783 */ 778 */
@@ -832,6 +827,7 @@ next_elt:
832 nht = rcu_dereference_protected(tbl->nht, 827 nht = rcu_dereference_protected(tbl->nht,
833 lockdep_is_held(&tbl->lock)); 828 lockdep_is_held(&tbl->lock));
834 } 829 }
830out:
835 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 831 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
836 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 832 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
837 * base_reachable_time. 833 * base_reachable_time.
@@ -1542,6 +1538,12 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1542 if (!tbl->nht || !tbl->phash_buckets) 1538 if (!tbl->nht || !tbl->phash_buckets)
1543 panic("cannot allocate neighbour cache hashes"); 1539 panic("cannot allocate neighbour cache hashes");
1544 1540
1541 if (!tbl->entry_size)
1542 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1543 tbl->key_len, NEIGH_PRIV_ALIGN);
1544 else
1545 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1546
1545 rwlock_init(&tbl->lock); 1547 rwlock_init(&tbl->lock);
1546 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); 1548 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1547 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); 1549 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
new file mode 100644
index 000000000000..0f6bb6f8d391
--- /dev/null
+++ b/net/core/net-procfs.c
@@ -0,0 +1,412 @@
1#include <linux/netdevice.h>
2#include <linux/proc_fs.h>
3#include <linux/seq_file.h>
4#include <net/wext.h>
5
6#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
7
8#define get_bucket(x) ((x) >> BUCKET_SPACE)
9#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
10#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
11
12extern struct list_head ptype_all __read_mostly;
13extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
14
15static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
16{
17 struct net *net = seq_file_net(seq);
18 struct net_device *dev;
19 struct hlist_node *p;
20 struct hlist_head *h;
21 unsigned int count = 0, offset = get_offset(*pos);
22
23 h = &net->dev_name_head[get_bucket(*pos)];
24 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
25 if (++count == offset)
26 return dev;
27 }
28
29 return NULL;
30}
31
32static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
33{
34 struct net_device *dev;
35 unsigned int bucket;
36
37 do {
38 dev = dev_from_same_bucket(seq, pos);
39 if (dev)
40 return dev;
41
42 bucket = get_bucket(*pos) + 1;
43 *pos = set_bucket_offset(bucket, 1);
44 } while (bucket < NETDEV_HASHENTRIES);
45
46 return NULL;
47}
48
49/*
50 * This is invoked by the /proc filesystem handler to display a device
51 * in detail.
52 */
53static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
54 __acquires(RCU)
55{
56 rcu_read_lock();
57 if (!*pos)
58 return SEQ_START_TOKEN;
59
60 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
61 return NULL;
62
63 return dev_from_bucket(seq, pos);
64}
65
66static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
67{
68 ++*pos;
69 return dev_from_bucket(seq, pos);
70}
71
72static void dev_seq_stop(struct seq_file *seq, void *v)
73 __releases(RCU)
74{
75 rcu_read_unlock();
76}
77
78static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
79{
80 struct rtnl_link_stats64 temp;
81 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
82
83 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
84 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
85 dev->name, stats->rx_bytes, stats->rx_packets,
86 stats->rx_errors,
87 stats->rx_dropped + stats->rx_missed_errors,
88 stats->rx_fifo_errors,
89 stats->rx_length_errors + stats->rx_over_errors +
90 stats->rx_crc_errors + stats->rx_frame_errors,
91 stats->rx_compressed, stats->multicast,
92 stats->tx_bytes, stats->tx_packets,
93 stats->tx_errors, stats->tx_dropped,
94 stats->tx_fifo_errors, stats->collisions,
95 stats->tx_carrier_errors +
96 stats->tx_aborted_errors +
97 stats->tx_window_errors +
98 stats->tx_heartbeat_errors,
99 stats->tx_compressed);
100}
101
102/*
103 * Called from the PROCfs module. This now uses the new arbitrary sized
104 * /proc/net interface to create /proc/net/dev
105 */
106static int dev_seq_show(struct seq_file *seq, void *v)
107{
108 if (v == SEQ_START_TOKEN)
109 seq_puts(seq, "Inter-| Receive "
110 " | Transmit\n"
111 " face |bytes packets errs drop fifo frame "
112 "compressed multicast|bytes packets errs "
113 "drop fifo colls carrier compressed\n");
114 else
115 dev_seq_printf_stats(seq, v);
116 return 0;
117}
118
119static struct softnet_data *softnet_get_online(loff_t *pos)
120{
121 struct softnet_data *sd = NULL;
122
123 while (*pos < nr_cpu_ids)
124 if (cpu_online(*pos)) {
125 sd = &per_cpu(softnet_data, *pos);
126 break;
127 } else
128 ++*pos;
129 return sd;
130}
131
132static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
133{
134 return softnet_get_online(pos);
135}
136
137static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
138{
139 ++*pos;
140 return softnet_get_online(pos);
141}
142
143static void softnet_seq_stop(struct seq_file *seq, void *v)
144{
145}
146
147static int softnet_seq_show(struct seq_file *seq, void *v)
148{
149 struct softnet_data *sd = v;
150
151 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
152 sd->processed, sd->dropped, sd->time_squeeze, 0,
153 0, 0, 0, 0, /* was fastroute */
154 sd->cpu_collision, sd->received_rps);
155 return 0;
156}
157
158static const struct seq_operations dev_seq_ops = {
159 .start = dev_seq_start,
160 .next = dev_seq_next,
161 .stop = dev_seq_stop,
162 .show = dev_seq_show,
163};
164
165static int dev_seq_open(struct inode *inode, struct file *file)
166{
167 return seq_open_net(inode, file, &dev_seq_ops,
168 sizeof(struct seq_net_private));
169}
170
171static const struct file_operations dev_seq_fops = {
172 .owner = THIS_MODULE,
173 .open = dev_seq_open,
174 .read = seq_read,
175 .llseek = seq_lseek,
176 .release = seq_release_net,
177};
178
179static const struct seq_operations softnet_seq_ops = {
180 .start = softnet_seq_start,
181 .next = softnet_seq_next,
182 .stop = softnet_seq_stop,
183 .show = softnet_seq_show,
184};
185
186static int softnet_seq_open(struct inode *inode, struct file *file)
187{
188 return seq_open(file, &softnet_seq_ops);
189}
190
191static const struct file_operations softnet_seq_fops = {
192 .owner = THIS_MODULE,
193 .open = softnet_seq_open,
194 .read = seq_read,
195 .llseek = seq_lseek,
196 .release = seq_release,
197};
198
199static void *ptype_get_idx(loff_t pos)
200{
201 struct packet_type *pt = NULL;
202 loff_t i = 0;
203 int t;
204
205 list_for_each_entry_rcu(pt, &ptype_all, list) {
206 if (i == pos)
207 return pt;
208 ++i;
209 }
210
211 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
212 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
213 if (i == pos)
214 return pt;
215 ++i;
216 }
217 }
218 return NULL;
219}
220
221static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
222 __acquires(RCU)
223{
224 rcu_read_lock();
225 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
226}
227
228static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
229{
230 struct packet_type *pt;
231 struct list_head *nxt;
232 int hash;
233
234 ++*pos;
235 if (v == SEQ_START_TOKEN)
236 return ptype_get_idx(0);
237
238 pt = v;
239 nxt = pt->list.next;
240 if (pt->type == htons(ETH_P_ALL)) {
241 if (nxt != &ptype_all)
242 goto found;
243 hash = 0;
244 nxt = ptype_base[0].next;
245 } else
246 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
247
248 while (nxt == &ptype_base[hash]) {
249 if (++hash >= PTYPE_HASH_SIZE)
250 return NULL;
251 nxt = ptype_base[hash].next;
252 }
253found:
254 return list_entry(nxt, struct packet_type, list);
255}
256
257static void ptype_seq_stop(struct seq_file *seq, void *v)
258 __releases(RCU)
259{
260 rcu_read_unlock();
261}
262
263static int ptype_seq_show(struct seq_file *seq, void *v)
264{
265 struct packet_type *pt = v;
266
267 if (v == SEQ_START_TOKEN)
268 seq_puts(seq, "Type Device Function\n");
269 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
270 if (pt->type == htons(ETH_P_ALL))
271 seq_puts(seq, "ALL ");
272 else
273 seq_printf(seq, "%04x", ntohs(pt->type));
274
275 seq_printf(seq, " %-8s %pF\n",
276 pt->dev ? pt->dev->name : "", pt->func);
277 }
278
279 return 0;
280}
281
282static const struct seq_operations ptype_seq_ops = {
283 .start = ptype_seq_start,
284 .next = ptype_seq_next,
285 .stop = ptype_seq_stop,
286 .show = ptype_seq_show,
287};
288
289static int ptype_seq_open(struct inode *inode, struct file *file)
290{
291 return seq_open_net(inode, file, &ptype_seq_ops,
292 sizeof(struct seq_net_private));
293}
294
295static const struct file_operations ptype_seq_fops = {
296 .owner = THIS_MODULE,
297 .open = ptype_seq_open,
298 .read = seq_read,
299 .llseek = seq_lseek,
300 .release = seq_release_net,
301};
302
303
304static int __net_init dev_proc_net_init(struct net *net)
305{
306 int rc = -ENOMEM;
307
308 if (!proc_create("dev", S_IRUGO, net->proc_net, &dev_seq_fops))
309 goto out;
310 if (!proc_create("softnet_stat", S_IRUGO, net->proc_net,
311 &softnet_seq_fops))
312 goto out_dev;
313 if (!proc_create("ptype", S_IRUGO, net->proc_net, &ptype_seq_fops))
314 goto out_softnet;
315
316 if (wext_proc_init(net))
317 goto out_ptype;
318 rc = 0;
319out:
320 return rc;
321out_ptype:
322 remove_proc_entry("ptype", net->proc_net);
323out_softnet:
324 remove_proc_entry("softnet_stat", net->proc_net);
325out_dev:
326 remove_proc_entry("dev", net->proc_net);
327 goto out;
328}
329
330static void __net_exit dev_proc_net_exit(struct net *net)
331{
332 wext_proc_exit(net);
333
334 remove_proc_entry("ptype", net->proc_net);
335 remove_proc_entry("softnet_stat", net->proc_net);
336 remove_proc_entry("dev", net->proc_net);
337}
338
339static struct pernet_operations __net_initdata dev_proc_ops = {
340 .init = dev_proc_net_init,
341 .exit = dev_proc_net_exit,
342};
343
344static int dev_mc_seq_show(struct seq_file *seq, void *v)
345{
346 struct netdev_hw_addr *ha;
347 struct net_device *dev = v;
348
349 if (v == SEQ_START_TOKEN)
350 return 0;
351
352 netif_addr_lock_bh(dev);
353 netdev_for_each_mc_addr(ha, dev) {
354 int i;
355
356 seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
357 dev->name, ha->refcount, ha->global_use);
358
359 for (i = 0; i < dev->addr_len; i++)
360 seq_printf(seq, "%02x", ha->addr[i]);
361
362 seq_putc(seq, '\n');
363 }
364 netif_addr_unlock_bh(dev);
365 return 0;
366}
367
368static const struct seq_operations dev_mc_seq_ops = {
369 .start = dev_seq_start,
370 .next = dev_seq_next,
371 .stop = dev_seq_stop,
372 .show = dev_mc_seq_show,
373};
374
375static int dev_mc_seq_open(struct inode *inode, struct file *file)
376{
377 return seq_open_net(inode, file, &dev_mc_seq_ops,
378 sizeof(struct seq_net_private));
379}
380
381static const struct file_operations dev_mc_seq_fops = {
382 .owner = THIS_MODULE,
383 .open = dev_mc_seq_open,
384 .read = seq_read,
385 .llseek = seq_lseek,
386 .release = seq_release_net,
387};
388
389static int __net_init dev_mc_net_init(struct net *net)
390{
391 if (!proc_create("dev_mcast", 0, net->proc_net, &dev_mc_seq_fops))
392 return -ENOMEM;
393 return 0;
394}
395
396static void __net_exit dev_mc_net_exit(struct net *net)
397{
398 remove_proc_entry("dev_mcast", net->proc_net);
399}
400
401static struct pernet_operations __net_initdata dev_mc_net_ops = {
402 .init = dev_mc_net_init,
403 .exit = dev_mc_net_exit,
404};
405
406int __init dev_proc_init(void)
407{
408 int ret = register_pernet_subsys(&dev_proc_ops);
409 if (!ret)
410 return register_pernet_subsys(&dev_mc_net_ops);
411 return ret;
412}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 334efd5d67a9..7427ab5e27d8 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -21,6 +21,7 @@
21#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24#include <linux/pm_runtime.h>
24 25
25#include "net-sysfs.h" 26#include "net-sysfs.h"
26 27
@@ -126,6 +127,19 @@ static ssize_t show_broadcast(struct device *dev,
126 return -EINVAL; 127 return -EINVAL;
127} 128}
128 129
130static int change_carrier(struct net_device *net, unsigned long new_carrier)
131{
132 if (!netif_running(net))
133 return -EINVAL;
134 return dev_change_carrier(net, (bool) new_carrier);
135}
136
137static ssize_t store_carrier(struct device *dev, struct device_attribute *attr,
138 const char *buf, size_t len)
139{
140 return netdev_store(dev, attr, buf, len, change_carrier);
141}
142
129static ssize_t show_carrier(struct device *dev, 143static ssize_t show_carrier(struct device *dev,
130 struct device_attribute *attr, char *buf) 144 struct device_attribute *attr, char *buf)
131{ 145{
@@ -331,7 +345,7 @@ static struct device_attribute net_class_attributes[] = {
331 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL), 345 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
332 __ATTR(address, S_IRUGO, show_address, NULL), 346 __ATTR(address, S_IRUGO, show_address, NULL),
333 __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), 347 __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
334 __ATTR(carrier, S_IRUGO, show_carrier, NULL), 348 __ATTR(carrier, S_IRUGO | S_IWUSR, show_carrier, store_carrier),
335 __ATTR(speed, S_IRUGO, show_speed, NULL), 349 __ATTR(speed, S_IRUGO, show_speed, NULL),
336 __ATTR(duplex, S_IRUGO, show_duplex, NULL), 350 __ATTR(duplex, S_IRUGO, show_duplex, NULL),
337 __ATTR(dormant, S_IRUGO, show_dormant, NULL), 351 __ATTR(dormant, S_IRUGO, show_dormant, NULL),
@@ -989,68 +1003,14 @@ static ssize_t show_xps_map(struct netdev_queue *queue,
989 return len; 1003 return len;
990} 1004}
991 1005
992static DEFINE_MUTEX(xps_map_mutex);
993#define xmap_dereference(P) \
994 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
995
996static void xps_queue_release(struct netdev_queue *queue)
997{
998 struct net_device *dev = queue->dev;
999 struct xps_dev_maps *dev_maps;
1000 struct xps_map *map;
1001 unsigned long index;
1002 int i, pos, nonempty = 0;
1003
1004 index = get_netdev_queue_index(queue);
1005
1006 mutex_lock(&xps_map_mutex);
1007 dev_maps = xmap_dereference(dev->xps_maps);
1008
1009 if (dev_maps) {
1010 for_each_possible_cpu(i) {
1011 map = xmap_dereference(dev_maps->cpu_map[i]);
1012 if (!map)
1013 continue;
1014
1015 for (pos = 0; pos < map->len; pos++)
1016 if (map->queues[pos] == index)
1017 break;
1018
1019 if (pos < map->len) {
1020 if (map->len > 1)
1021 map->queues[pos] =
1022 map->queues[--map->len];
1023 else {
1024 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1025 NULL);
1026 kfree_rcu(map, rcu);
1027 map = NULL;
1028 }
1029 }
1030 if (map)
1031 nonempty = 1;
1032 }
1033
1034 if (!nonempty) {
1035 RCU_INIT_POINTER(dev->xps_maps, NULL);
1036 kfree_rcu(dev_maps, rcu);
1037 }
1038 }
1039 mutex_unlock(&xps_map_mutex);
1040}
1041
1042static ssize_t store_xps_map(struct netdev_queue *queue, 1006static ssize_t store_xps_map(struct netdev_queue *queue,
1043 struct netdev_queue_attribute *attribute, 1007 struct netdev_queue_attribute *attribute,
1044 const char *buf, size_t len) 1008 const char *buf, size_t len)
1045{ 1009{
1046 struct net_device *dev = queue->dev; 1010 struct net_device *dev = queue->dev;
1047 cpumask_var_t mask;
1048 int err, i, cpu, pos, map_len, alloc_len, need_set;
1049 unsigned long index; 1011 unsigned long index;
1050 struct xps_map *map, *new_map; 1012 cpumask_var_t mask;
1051 struct xps_dev_maps *dev_maps, *new_dev_maps; 1013 int err;
1052 int nonempty = 0;
1053 int numa_node_id = -2;
1054 1014
1055 if (!capable(CAP_NET_ADMIN)) 1015 if (!capable(CAP_NET_ADMIN))
1056 return -EPERM; 1016 return -EPERM;
@@ -1066,105 +1026,11 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
1066 return err; 1026 return err;
1067 } 1027 }
1068 1028
1069 new_dev_maps = kzalloc(max_t(unsigned int, 1029 err = netif_set_xps_queue(dev, mask, index);
1070 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1071 if (!new_dev_maps) {
1072 free_cpumask_var(mask);
1073 return -ENOMEM;
1074 }
1075
1076 mutex_lock(&xps_map_mutex);
1077
1078 dev_maps = xmap_dereference(dev->xps_maps);
1079
1080 for_each_possible_cpu(cpu) {
1081 map = dev_maps ?
1082 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1083 new_map = map;
1084 if (map) {
1085 for (pos = 0; pos < map->len; pos++)
1086 if (map->queues[pos] == index)
1087 break;
1088 map_len = map->len;
1089 alloc_len = map->alloc_len;
1090 } else
1091 pos = map_len = alloc_len = 0;
1092
1093 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
1094#ifdef CONFIG_NUMA
1095 if (need_set) {
1096 if (numa_node_id == -2)
1097 numa_node_id = cpu_to_node(cpu);
1098 else if (numa_node_id != cpu_to_node(cpu))
1099 numa_node_id = -1;
1100 }
1101#endif
1102 if (need_set && pos >= map_len) {
1103 /* Need to add queue to this CPU's map */
1104 if (map_len >= alloc_len) {
1105 alloc_len = alloc_len ?
1106 2 * alloc_len : XPS_MIN_MAP_ALLOC;
1107 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1108 GFP_KERNEL,
1109 cpu_to_node(cpu));
1110 if (!new_map)
1111 goto error;
1112 new_map->alloc_len = alloc_len;
1113 for (i = 0; i < map_len; i++)
1114 new_map->queues[i] = map->queues[i];
1115 new_map->len = map_len;
1116 }
1117 new_map->queues[new_map->len++] = index;
1118 } else if (!need_set && pos < map_len) {
1119 /* Need to remove queue from this CPU's map */
1120 if (map_len > 1)
1121 new_map->queues[pos] =
1122 new_map->queues[--new_map->len];
1123 else
1124 new_map = NULL;
1125 }
1126 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1127 }
1128
1129 /* Cleanup old maps */
1130 for_each_possible_cpu(cpu) {
1131 map = dev_maps ?
1132 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1133 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1134 kfree_rcu(map, rcu);
1135 if (new_dev_maps->cpu_map[cpu])
1136 nonempty = 1;
1137 }
1138
1139 if (nonempty) {
1140 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1141 } else {
1142 kfree(new_dev_maps);
1143 RCU_INIT_POINTER(dev->xps_maps, NULL);
1144 }
1145
1146 if (dev_maps)
1147 kfree_rcu(dev_maps, rcu);
1148
1149 netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
1150 NUMA_NO_NODE);
1151
1152 mutex_unlock(&xps_map_mutex);
1153 1030
1154 free_cpumask_var(mask); 1031 free_cpumask_var(mask);
1155 return len;
1156 1032
1157error: 1033 return err ? : len;
1158 mutex_unlock(&xps_map_mutex);
1159
1160 if (new_dev_maps)
1161 for_each_possible_cpu(i)
1162 kfree(rcu_dereference_protected(
1163 new_dev_maps->cpu_map[i],
1164 1));
1165 kfree(new_dev_maps);
1166 free_cpumask_var(mask);
1167 return -ENOMEM;
1168} 1034}
1169 1035
1170static struct netdev_queue_attribute xps_cpus_attribute = 1036static struct netdev_queue_attribute xps_cpus_attribute =
@@ -1183,10 +1049,6 @@ static void netdev_queue_release(struct kobject *kobj)
1183{ 1049{
1184 struct netdev_queue *queue = to_netdev_queue(kobj); 1050 struct netdev_queue *queue = to_netdev_queue(kobj);
1185 1051
1186#ifdef CONFIG_XPS
1187 xps_queue_release(queue);
1188#endif
1189
1190 memset(kobj, 0, sizeof(*kobj)); 1052 memset(kobj, 0, sizeof(*kobj));
1191 dev_put(queue->dev); 1053 dev_put(queue->dev);
1192} 1054}
@@ -1334,7 +1196,6 @@ struct kobj_ns_type_operations net_ns_type_operations = {
1334}; 1196};
1335EXPORT_SYMBOL_GPL(net_ns_type_operations); 1197EXPORT_SYMBOL_GPL(net_ns_type_operations);
1336 1198
1337#ifdef CONFIG_HOTPLUG
1338static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1199static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1339{ 1200{
1340 struct net_device *dev = to_net_dev(d); 1201 struct net_device *dev = to_net_dev(d);
@@ -1353,7 +1214,6 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1353exit: 1214exit:
1354 return retval; 1215 return retval;
1355} 1216}
1356#endif
1357 1217
1358/* 1218/*
1359 * netdev_release -- destroy and free a dead device. 1219 * netdev_release -- destroy and free a dead device.
@@ -1382,9 +1242,7 @@ static struct class net_class = {
1382#ifdef CONFIG_SYSFS 1242#ifdef CONFIG_SYSFS
1383 .dev_attrs = net_class_attributes, 1243 .dev_attrs = net_class_attributes,
1384#endif /* CONFIG_SYSFS */ 1244#endif /* CONFIG_SYSFS */
1385#ifdef CONFIG_HOTPLUG
1386 .dev_uevent = netdev_uevent, 1245 .dev_uevent = netdev_uevent,
1387#endif
1388 .ns_type = &net_ns_type_operations, 1246 .ns_type = &net_ns_type_operations,
1389 .namespace = net_namespace, 1247 .namespace = net_namespace,
1390}; 1248};
@@ -1400,6 +1258,8 @@ void netdev_unregister_kobject(struct net_device * net)
1400 1258
1401 remove_queue_kobjects(net); 1259 remove_queue_kobjects(net);
1402 1260
1261 pm_runtime_set_memalloc_noio(dev, false);
1262
1403 device_del(dev); 1263 device_del(dev);
1404} 1264}
1405 1265
@@ -1444,6 +1304,8 @@ int netdev_register_kobject(struct net_device *net)
1444 return error; 1304 return error;
1445 } 1305 }
1446 1306
1307 pm_runtime_set_memalloc_noio(dev, true);
1308
1447 return error; 1309 return error;
1448} 1310}
1449 1311
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6456439cbbd9..8acce01b6dab 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -381,6 +381,21 @@ struct net *get_net_ns_by_pid(pid_t pid)
381} 381}
382EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 382EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
383 383
384static __net_init int net_ns_net_init(struct net *net)
385{
386 return proc_alloc_inum(&net->proc_inum);
387}
388
389static __net_exit void net_ns_net_exit(struct net *net)
390{
391 proc_free_inum(net->proc_inum);
392}
393
394static struct pernet_operations __net_initdata net_ns_ops = {
395 .init = net_ns_net_init,
396 .exit = net_ns_net_exit,
397};
398
384static int __init net_ns_init(void) 399static int __init net_ns_init(void)
385{ 400{
386 struct net_generic *ng; 401 struct net_generic *ng;
@@ -412,6 +427,8 @@ static int __init net_ns_init(void)
412 427
413 mutex_unlock(&net_mutex); 428 mutex_unlock(&net_mutex);
414 429
430 register_pernet_subsys(&net_ns_ops);
431
415 return 0; 432 return 0;
416} 433}
417 434
@@ -630,16 +647,29 @@ static void netns_put(void *ns)
630 647
631static int netns_install(struct nsproxy *nsproxy, void *ns) 648static int netns_install(struct nsproxy *nsproxy, void *ns)
632{ 649{
650 struct net *net = ns;
651
652 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
653 !nsown_capable(CAP_SYS_ADMIN))
654 return -EPERM;
655
633 put_net(nsproxy->net_ns); 656 put_net(nsproxy->net_ns);
634 nsproxy->net_ns = get_net(ns); 657 nsproxy->net_ns = get_net(net);
635 return 0; 658 return 0;
636} 659}
637 660
661static unsigned int netns_inum(void *ns)
662{
663 struct net *net = ns;
664 return net->proc_inum;
665}
666
638const struct proc_ns_operations netns_operations = { 667const struct proc_ns_operations netns_operations = {
639 .name = "net", 668 .name = "net",
640 .type = CLONE_NEWNET, 669 .type = CLONE_NEWNET,
641 .get = netns_get, 670 .get = netns_get,
642 .put = netns_put, 671 .put = netns_put,
643 .install = netns_install, 672 .install = netns_install,
673 .inum = netns_inum,
644}; 674};
645#endif 675#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3151acf5ec13..fa32899006a2 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -29,6 +29,9 @@
29#include <linux/if_vlan.h> 29#include <linux/if_vlan.h>
30#include <net/tcp.h> 30#include <net/tcp.h>
31#include <net/udp.h> 31#include <net/udp.h>
32#include <net/addrconf.h>
33#include <net/ndisc.h>
34#include <net/ip6_checksum.h>
32#include <asm/unaligned.h> 35#include <asm/unaligned.h>
33#include <trace/events/napi.h> 36#include <trace/events/napi.h>
34 37
@@ -44,6 +47,8 @@ static struct sk_buff_head skb_pool;
44 47
45static atomic_t trapped; 48static atomic_t trapped;
46 49
50static struct srcu_struct netpoll_srcu;
51
47#define USEC_PER_POLL 50 52#define USEC_PER_POLL 50
48#define NETPOLL_RX_ENABLED 1 53#define NETPOLL_RX_ENABLED 1
49#define NETPOLL_RX_DROP 2 54#define NETPOLL_RX_DROP 2
@@ -55,7 +60,8 @@ static atomic_t trapped;
55 MAX_UDP_CHUNK) 60 MAX_UDP_CHUNK)
56 61
57static void zap_completion_queue(void); 62static void zap_completion_queue(void);
58static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo); 63static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
64static void netpoll_async_cleanup(struct work_struct *work);
59 65
60static unsigned int carrier_timeout = 4; 66static unsigned int carrier_timeout = 4;
61module_param(carrier_timeout, uint, 0644); 67module_param(carrier_timeout, uint, 0644);
@@ -181,13 +187,13 @@ static void poll_napi(struct net_device *dev)
181 } 187 }
182} 188}
183 189
184static void service_arp_queue(struct netpoll_info *npi) 190static void service_neigh_queue(struct netpoll_info *npi)
185{ 191{
186 if (npi) { 192 if (npi) {
187 struct sk_buff *skb; 193 struct sk_buff *skb;
188 194
189 while ((skb = skb_dequeue(&npi->arp_tx))) 195 while ((skb = skb_dequeue(&npi->neigh_tx)))
190 netpoll_arp_reply(skb, npi); 196 netpoll_neigh_reply(skb, npi);
191 } 197 }
192} 198}
193 199
@@ -196,35 +202,76 @@ static void netpoll_poll_dev(struct net_device *dev)
196 const struct net_device_ops *ops; 202 const struct net_device_ops *ops;
197 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); 203 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
198 204
199 if (!dev || !netif_running(dev)) 205 /* Don't do any rx activity if the dev_lock mutex is held
206 * the dev_open/close paths use this to block netpoll activity
207 * while changing device state
208 */
209 if (!mutex_trylock(&ni->dev_lock))
200 return; 210 return;
201 211
212 if (!netif_running(dev)) {
213 mutex_unlock(&ni->dev_lock);
214 return;
215 }
216
202 ops = dev->netdev_ops; 217 ops = dev->netdev_ops;
203 if (!ops->ndo_poll_controller) 218 if (!ops->ndo_poll_controller) {
219 mutex_unlock(&ni->dev_lock);
204 return; 220 return;
221 }
205 222
206 /* Process pending work on NIC */ 223 /* Process pending work on NIC */
207 ops->ndo_poll_controller(dev); 224 ops->ndo_poll_controller(dev);
208 225
209 poll_napi(dev); 226 poll_napi(dev);
210 227
228 mutex_unlock(&ni->dev_lock);
229
211 if (dev->flags & IFF_SLAVE) { 230 if (dev->flags & IFF_SLAVE) {
212 if (ni) { 231 if (ni) {
213 struct net_device *bond_dev = dev->master; 232 struct net_device *bond_dev;
214 struct sk_buff *skb; 233 struct sk_buff *skb;
215 struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); 234 struct netpoll_info *bond_ni;
216 while ((skb = skb_dequeue(&ni->arp_tx))) { 235
236 bond_dev = netdev_master_upper_dev_get_rcu(dev);
237 bond_ni = rcu_dereference_bh(bond_dev->npinfo);
238 while ((skb = skb_dequeue(&ni->neigh_tx))) {
217 skb->dev = bond_dev; 239 skb->dev = bond_dev;
218 skb_queue_tail(&bond_ni->arp_tx, skb); 240 skb_queue_tail(&bond_ni->neigh_tx, skb);
219 } 241 }
220 } 242 }
221 } 243 }
222 244
223 service_arp_queue(ni); 245 service_neigh_queue(ni);
224 246
225 zap_completion_queue(); 247 zap_completion_queue();
226} 248}
227 249
250int netpoll_rx_disable(struct net_device *dev)
251{
252 struct netpoll_info *ni;
253 int idx;
254 might_sleep();
255 idx = srcu_read_lock(&netpoll_srcu);
256 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
257 if (ni)
258 mutex_lock(&ni->dev_lock);
259 srcu_read_unlock(&netpoll_srcu, idx);
260 return 0;
261}
262EXPORT_SYMBOL(netpoll_rx_disable);
263
264void netpoll_rx_enable(struct net_device *dev)
265{
266 struct netpoll_info *ni;
267 rcu_read_lock();
268 ni = rcu_dereference(dev->npinfo);
269 if (ni)
270 mutex_unlock(&ni->dev_lock);
271 rcu_read_unlock();
272}
273EXPORT_SYMBOL(netpoll_rx_enable);
274
228static void refill_skbs(void) 275static void refill_skbs(void)
229{ 276{
230 struct sk_buff *skb; 277 struct sk_buff *skb;
@@ -381,9 +428,14 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
381 struct iphdr *iph; 428 struct iphdr *iph;
382 struct ethhdr *eth; 429 struct ethhdr *eth;
383 static atomic_t ip_ident; 430 static atomic_t ip_ident;
431 struct ipv6hdr *ip6h;
384 432
385 udp_len = len + sizeof(*udph); 433 udp_len = len + sizeof(*udph);
386 ip_len = udp_len + sizeof(*iph); 434 if (np->ipv6)
435 ip_len = udp_len + sizeof(*ip6h);
436 else
437 ip_len = udp_len + sizeof(*iph);
438
387 total_len = ip_len + LL_RESERVED_SPACE(np->dev); 439 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
388 440
389 skb = find_skb(np, total_len + np->dev->needed_tailroom, 441 skb = find_skb(np, total_len + np->dev->needed_tailroom,
@@ -400,34 +452,66 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
400 udph->source = htons(np->local_port); 452 udph->source = htons(np->local_port);
401 udph->dest = htons(np->remote_port); 453 udph->dest = htons(np->remote_port);
402 udph->len = htons(udp_len); 454 udph->len = htons(udp_len);
403 udph->check = 0; 455
404 udph->check = csum_tcpudp_magic(np->local_ip, 456 if (np->ipv6) {
405 np->remote_ip, 457 udph->check = 0;
406 udp_len, IPPROTO_UDP, 458 udph->check = csum_ipv6_magic(&np->local_ip.in6,
407 csum_partial(udph, udp_len, 0)); 459 &np->remote_ip.in6,
408 if (udph->check == 0) 460 udp_len, IPPROTO_UDP,
409 udph->check = CSUM_MANGLED_0; 461 csum_partial(udph, udp_len, 0));
410 462 if (udph->check == 0)
411 skb_push(skb, sizeof(*iph)); 463 udph->check = CSUM_MANGLED_0;
412 skb_reset_network_header(skb); 464
413 iph = ip_hdr(skb); 465 skb_push(skb, sizeof(*ip6h));
414 466 skb_reset_network_header(skb);
415 /* iph->version = 4; iph->ihl = 5; */ 467 ip6h = ipv6_hdr(skb);
416 put_unaligned(0x45, (unsigned char *)iph); 468
417 iph->tos = 0; 469 /* ip6h->version = 6; ip6h->priority = 0; */
418 put_unaligned(htons(ip_len), &(iph->tot_len)); 470 put_unaligned(0x60, (unsigned char *)ip6h);
419 iph->id = htons(atomic_inc_return(&ip_ident)); 471 ip6h->flow_lbl[0] = 0;
420 iph->frag_off = 0; 472 ip6h->flow_lbl[1] = 0;
421 iph->ttl = 64; 473 ip6h->flow_lbl[2] = 0;
422 iph->protocol = IPPROTO_UDP; 474
423 iph->check = 0; 475 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
424 put_unaligned(np->local_ip, &(iph->saddr)); 476 ip6h->nexthdr = IPPROTO_UDP;
425 put_unaligned(np->remote_ip, &(iph->daddr)); 477 ip6h->hop_limit = 32;
426 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 478 ip6h->saddr = np->local_ip.in6;
427 479 ip6h->daddr = np->remote_ip.in6;
428 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); 480
429 skb_reset_mac_header(skb); 481 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
430 skb->protocol = eth->h_proto = htons(ETH_P_IP); 482 skb_reset_mac_header(skb);
483 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
484 } else {
485 udph->check = 0;
486 udph->check = csum_tcpudp_magic(np->local_ip.ip,
487 np->remote_ip.ip,
488 udp_len, IPPROTO_UDP,
489 csum_partial(udph, udp_len, 0));
490 if (udph->check == 0)
491 udph->check = CSUM_MANGLED_0;
492
493 skb_push(skb, sizeof(*iph));
494 skb_reset_network_header(skb);
495 iph = ip_hdr(skb);
496
497 /* iph->version = 4; iph->ihl = 5; */
498 put_unaligned(0x45, (unsigned char *)iph);
499 iph->tos = 0;
500 put_unaligned(htons(ip_len), &(iph->tot_len));
501 iph->id = htons(atomic_inc_return(&ip_ident));
502 iph->frag_off = 0;
503 iph->ttl = 64;
504 iph->protocol = IPPROTO_UDP;
505 iph->check = 0;
506 put_unaligned(np->local_ip.ip, &(iph->saddr));
507 put_unaligned(np->remote_ip.ip, &(iph->daddr));
508 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
509
510 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
511 skb_reset_mac_header(skb);
512 skb->protocol = eth->h_proto = htons(ETH_P_IP);
513 }
514
431 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN); 515 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
432 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN); 516 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
433 517
@@ -437,18 +521,16 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
437} 521}
438EXPORT_SYMBOL(netpoll_send_udp); 522EXPORT_SYMBOL(netpoll_send_udp);
439 523
440static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo) 524static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
441{ 525{
442 struct arphdr *arp; 526 int size, type = ARPOP_REPLY;
443 unsigned char *arp_ptr;
444 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
445 __be32 sip, tip; 527 __be32 sip, tip;
446 unsigned char *sha; 528 unsigned char *sha;
447 struct sk_buff *send_skb; 529 struct sk_buff *send_skb;
448 struct netpoll *np, *tmp; 530 struct netpoll *np, *tmp;
449 unsigned long flags; 531 unsigned long flags;
450 int hlen, tlen; 532 int hlen, tlen;
451 int hits = 0; 533 int hits = 0, proto;
452 534
453 if (list_empty(&npinfo->rx_np)) 535 if (list_empty(&npinfo->rx_np))
454 return; 536 return;
@@ -466,94 +548,214 @@ static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
466 if (!hits) 548 if (!hits)
467 return; 549 return;
468 550
469 /* No arp on this interface */ 551 proto = ntohs(eth_hdr(skb)->h_proto);
470 if (skb->dev->flags & IFF_NOARP) 552 if (proto == ETH_P_IP) {
471 return; 553 struct arphdr *arp;
472 554 unsigned char *arp_ptr;
473 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 555 /* No arp on this interface */
474 return; 556 if (skb->dev->flags & IFF_NOARP)
557 return;
475 558
476 skb_reset_network_header(skb); 559 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
477 skb_reset_transport_header(skb); 560 return;
478 arp = arp_hdr(skb);
479 561
480 if ((arp->ar_hrd != htons(ARPHRD_ETHER) && 562 skb_reset_network_header(skb);
481 arp->ar_hrd != htons(ARPHRD_IEEE802)) || 563 skb_reset_transport_header(skb);
482 arp->ar_pro != htons(ETH_P_IP) || 564 arp = arp_hdr(skb);
483 arp->ar_op != htons(ARPOP_REQUEST))
484 return;
485 565
486 arp_ptr = (unsigned char *)(arp+1); 566 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
487 /* save the location of the src hw addr */ 567 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
488 sha = arp_ptr; 568 arp->ar_pro != htons(ETH_P_IP) ||
489 arp_ptr += skb->dev->addr_len; 569 arp->ar_op != htons(ARPOP_REQUEST))
490 memcpy(&sip, arp_ptr, 4); 570 return;
491 arp_ptr += 4;
492 /* If we actually cared about dst hw addr,
493 it would get copied here */
494 arp_ptr += skb->dev->addr_len;
495 memcpy(&tip, arp_ptr, 4);
496
497 /* Should we ignore arp? */
498 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
499 return;
500 571
501 size = arp_hdr_len(skb->dev); 572 arp_ptr = (unsigned char *)(arp+1);
573 /* save the location of the src hw addr */
574 sha = arp_ptr;
575 arp_ptr += skb->dev->addr_len;
576 memcpy(&sip, arp_ptr, 4);
577 arp_ptr += 4;
578 /* If we actually cared about dst hw addr,
579 it would get copied here */
580 arp_ptr += skb->dev->addr_len;
581 memcpy(&tip, arp_ptr, 4);
502 582
503 spin_lock_irqsave(&npinfo->rx_lock, flags); 583 /* Should we ignore arp? */
504 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { 584 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
505 if (tip != np->local_ip) 585 return;
506 continue;
507 586
508 hlen = LL_RESERVED_SPACE(np->dev); 587 size = arp_hdr_len(skb->dev);
509 tlen = np->dev->needed_tailroom;
510 send_skb = find_skb(np, size + hlen + tlen, hlen);
511 if (!send_skb)
512 continue;
513 588
514 skb_reset_network_header(send_skb); 589 spin_lock_irqsave(&npinfo->rx_lock, flags);
515 arp = (struct arphdr *) skb_put(send_skb, size); 590 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
516 send_skb->dev = skb->dev; 591 if (tip != np->local_ip.ip)
517 send_skb->protocol = htons(ETH_P_ARP); 592 continue;
593
594 hlen = LL_RESERVED_SPACE(np->dev);
595 tlen = np->dev->needed_tailroom;
596 send_skb = find_skb(np, size + hlen + tlen, hlen);
597 if (!send_skb)
598 continue;
599
600 skb_reset_network_header(send_skb);
601 arp = (struct arphdr *) skb_put(send_skb, size);
602 send_skb->dev = skb->dev;
603 send_skb->protocol = htons(ETH_P_ARP);
604
605 /* Fill the device header for the ARP frame */
606 if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
607 sha, np->dev->dev_addr,
608 send_skb->len) < 0) {
609 kfree_skb(send_skb);
610 continue;
611 }
518 612
519 /* Fill the device header for the ARP frame */ 613 /*
520 if (dev_hard_header(send_skb, skb->dev, ptype, 614 * Fill out the arp protocol part.
521 sha, np->dev->dev_addr, 615 *
522 send_skb->len) < 0) { 616 * we only support ethernet device type,
523 kfree_skb(send_skb); 617 * which (according to RFC 1390) should
524 continue; 618 * always equal 1 (Ethernet).
619 */
620
621 arp->ar_hrd = htons(np->dev->type);
622 arp->ar_pro = htons(ETH_P_IP);
623 arp->ar_hln = np->dev->addr_len;
624 arp->ar_pln = 4;
625 arp->ar_op = htons(type);
626
627 arp_ptr = (unsigned char *)(arp + 1);
628 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
629 arp_ptr += np->dev->addr_len;
630 memcpy(arp_ptr, &tip, 4);
631 arp_ptr += 4;
632 memcpy(arp_ptr, sha, np->dev->addr_len);
633 arp_ptr += np->dev->addr_len;
634 memcpy(arp_ptr, &sip, 4);
635
636 netpoll_send_skb(np, send_skb);
637
638 /* If there are several rx_hooks for the same address,
639 we're fine by sending a single reply */
640 break;
525 } 641 }
642 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
643 } else if( proto == ETH_P_IPV6) {
644#if IS_ENABLED(CONFIG_IPV6)
645 struct nd_msg *msg;
646 u8 *lladdr = NULL;
647 struct ipv6hdr *hdr;
648 struct icmp6hdr *icmp6h;
649 const struct in6_addr *saddr;
650 const struct in6_addr *daddr;
651 struct inet6_dev *in6_dev = NULL;
652 struct in6_addr *target;
653
654 in6_dev = in6_dev_get(skb->dev);
655 if (!in6_dev || !in6_dev->cnf.accept_ra)
656 return;
526 657
527 /* 658 if (!pskb_may_pull(skb, skb->len))
528 * Fill out the arp protocol part. 659 return;
529 *
530 * we only support ethernet device type,
531 * which (according to RFC 1390) should
532 * always equal 1 (Ethernet).
533 */
534 660
535 arp->ar_hrd = htons(np->dev->type); 661 msg = (struct nd_msg *)skb_transport_header(skb);
536 arp->ar_pro = htons(ETH_P_IP);
537 arp->ar_hln = np->dev->addr_len;
538 arp->ar_pln = 4;
539 arp->ar_op = htons(type);
540 662
541 arp_ptr = (unsigned char *)(arp + 1); 663 __skb_push(skb, skb->data - skb_transport_header(skb));
542 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
543 arp_ptr += np->dev->addr_len;
544 memcpy(arp_ptr, &tip, 4);
545 arp_ptr += 4;
546 memcpy(arp_ptr, sha, np->dev->addr_len);
547 arp_ptr += np->dev->addr_len;
548 memcpy(arp_ptr, &sip, 4);
549 664
550 netpoll_send_skb(np, send_skb); 665 if (ipv6_hdr(skb)->hop_limit != 255)
666 return;
667 if (msg->icmph.icmp6_code != 0)
668 return;
669 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
670 return;
671
672 saddr = &ipv6_hdr(skb)->saddr;
673 daddr = &ipv6_hdr(skb)->daddr;
551 674
552 /* If there are several rx_hooks for the same address, 675 size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
553 we're fine by sending a single reply */ 676
554 break; 677 spin_lock_irqsave(&npinfo->rx_lock, flags);
678 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
679 if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
680 continue;
681
682 hlen = LL_RESERVED_SPACE(np->dev);
683 tlen = np->dev->needed_tailroom;
684 send_skb = find_skb(np, size + hlen + tlen, hlen);
685 if (!send_skb)
686 continue;
687
688 send_skb->protocol = htons(ETH_P_IPV6);
689 send_skb->dev = skb->dev;
690
691 skb_reset_network_header(send_skb);
692 skb_put(send_skb, sizeof(struct ipv6hdr));
693 hdr = ipv6_hdr(send_skb);
694
695 *(__be32*)hdr = htonl(0x60000000);
696
697 hdr->payload_len = htons(size);
698 hdr->nexthdr = IPPROTO_ICMPV6;
699 hdr->hop_limit = 255;
700 hdr->saddr = *saddr;
701 hdr->daddr = *daddr;
702
703 send_skb->transport_header = send_skb->tail;
704 skb_put(send_skb, size);
705
706 icmp6h = (struct icmp6hdr *)skb_transport_header(skb);
707 icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
708 icmp6h->icmp6_router = 0;
709 icmp6h->icmp6_solicited = 1;
710 target = (struct in6_addr *)(skb_transport_header(send_skb) + sizeof(struct icmp6hdr));
711 *target = msg->target;
712 icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
713 IPPROTO_ICMPV6,
714 csum_partial(icmp6h,
715 size, 0));
716
717 if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
718 lladdr, np->dev->dev_addr,
719 send_skb->len) < 0) {
720 kfree_skb(send_skb);
721 continue;
722 }
723
724 netpoll_send_skb(np, send_skb);
725
726 /* If there are several rx_hooks for the same address,
727 we're fine by sending a single reply */
728 break;
729 }
730 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
731#endif
555 } 732 }
556 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 733}
734
735static bool pkt_is_ns(struct sk_buff *skb)
736{
737 struct nd_msg *msg;
738 struct ipv6hdr *hdr;
739
740 if (skb->protocol != htons(ETH_P_ARP))
741 return false;
742 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
743 return false;
744
745 msg = (struct nd_msg *)skb_transport_header(skb);
746 __skb_push(skb, skb->data - skb_transport_header(skb));
747 hdr = ipv6_hdr(skb);
748
749 if (hdr->nexthdr != IPPROTO_ICMPV6)
750 return false;
751 if (hdr->hop_limit != 255)
752 return false;
753 if (msg->icmph.icmp6_code != 0)
754 return false;
755 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
756 return false;
757
758 return true;
557} 759}
558 760
559int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) 761int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
@@ -571,9 +773,11 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
571 goto out; 773 goto out;
572 774
573 /* check if netpoll clients need ARP */ 775 /* check if netpoll clients need ARP */
574 if (skb->protocol == htons(ETH_P_ARP) && 776 if (skb->protocol == htons(ETH_P_ARP) && atomic_read(&trapped)) {
575 atomic_read(&trapped)) { 777 skb_queue_tail(&npinfo->neigh_tx, skb);
576 skb_queue_tail(&npinfo->arp_tx, skb); 778 return 1;
779 } else if (pkt_is_ns(skb) && atomic_read(&trapped)) {
780 skb_queue_tail(&npinfo->neigh_tx, skb);
577 return 1; 781 return 1;
578 } 782 }
579 783
@@ -584,60 +788,100 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
584 } 788 }
585 789
586 proto = ntohs(eth_hdr(skb)->h_proto); 790 proto = ntohs(eth_hdr(skb)->h_proto);
587 if (proto != ETH_P_IP) 791 if (proto != ETH_P_IP && proto != ETH_P_IPV6)
588 goto out; 792 goto out;
589 if (skb->pkt_type == PACKET_OTHERHOST) 793 if (skb->pkt_type == PACKET_OTHERHOST)
590 goto out; 794 goto out;
591 if (skb_shared(skb)) 795 if (skb_shared(skb))
592 goto out; 796 goto out;
593 797
594 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 798 if (proto == ETH_P_IP) {
595 goto out; 799 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
596 iph = (struct iphdr *)skb->data; 800 goto out;
597 if (iph->ihl < 5 || iph->version != 4) 801 iph = (struct iphdr *)skb->data;
598 goto out; 802 if (iph->ihl < 5 || iph->version != 4)
599 if (!pskb_may_pull(skb, iph->ihl*4)) 803 goto out;
600 goto out; 804 if (!pskb_may_pull(skb, iph->ihl*4))
601 iph = (struct iphdr *)skb->data; 805 goto out;
602 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) 806 iph = (struct iphdr *)skb->data;
603 goto out; 807 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
604 808 goto out;
605 len = ntohs(iph->tot_len);
606 if (skb->len < len || len < iph->ihl*4)
607 goto out;
608 809
609 /* 810 len = ntohs(iph->tot_len);
610 * Our transport medium may have padded the buffer out. 811 if (skb->len < len || len < iph->ihl*4)
611 * Now We trim to the true length of the frame. 812 goto out;
612 */
613 if (pskb_trim_rcsum(skb, len))
614 goto out;
615 813
616 iph = (struct iphdr *)skb->data; 814 /*
617 if (iph->protocol != IPPROTO_UDP) 815 * Our transport medium may have padded the buffer out.
618 goto out; 816 * Now We trim to the true length of the frame.
817 */
818 if (pskb_trim_rcsum(skb, len))
819 goto out;
619 820
620 len -= iph->ihl*4; 821 iph = (struct iphdr *)skb->data;
621 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); 822 if (iph->protocol != IPPROTO_UDP)
622 ulen = ntohs(uh->len); 823 goto out;
623 824
624 if (ulen != len) 825 len -= iph->ihl*4;
625 goto out; 826 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
626 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) 827 ulen = ntohs(uh->len);
627 goto out;
628 828
629 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { 829 if (ulen != len)
630 if (np->local_ip && np->local_ip != iph->daddr) 830 goto out;
631 continue; 831 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
632 if (np->remote_ip && np->remote_ip != iph->saddr) 832 goto out;
633 continue; 833 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
634 if (np->local_port && np->local_port != ntohs(uh->dest)) 834 if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
635 continue; 835 continue;
836 if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
837 continue;
838 if (np->local_port && np->local_port != ntohs(uh->dest))
839 continue;
840
841 np->rx_hook(np, ntohs(uh->source),
842 (char *)(uh+1),
843 ulen - sizeof(struct udphdr));
844 hits++;
845 }
846 } else {
847#if IS_ENABLED(CONFIG_IPV6)
848 const struct ipv6hdr *ip6h;
636 849
637 np->rx_hook(np, ntohs(uh->source), 850 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
638 (char *)(uh+1), 851 goto out;
639 ulen - sizeof(struct udphdr)); 852 ip6h = (struct ipv6hdr *)skb->data;
640 hits++; 853 if (ip6h->version != 6)
854 goto out;
855 len = ntohs(ip6h->payload_len);
856 if (!len)
857 goto out;
858 if (len + sizeof(struct ipv6hdr) > skb->len)
859 goto out;
860 if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
861 goto out;
862 ip6h = ipv6_hdr(skb);
863 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
864 goto out;
865 uh = udp_hdr(skb);
866 ulen = ntohs(uh->len);
867 if (ulen != skb->len)
868 goto out;
869 if (udp6_csum_init(skb, uh, IPPROTO_UDP))
870 goto out;
871 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
872 if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
873 continue;
874 if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
875 continue;
876 if (np->local_port && np->local_port != ntohs(uh->dest))
877 continue;
878
879 np->rx_hook(np, ntohs(uh->source),
880 (char *)(uh+1),
881 ulen - sizeof(struct udphdr));
882 hits++;
883 }
884#endif
641 } 885 }
642 886
643 if (!hits) 887 if (!hits)
@@ -658,17 +902,44 @@ out:
658void netpoll_print_options(struct netpoll *np) 902void netpoll_print_options(struct netpoll *np)
659{ 903{
660 np_info(np, "local port %d\n", np->local_port); 904 np_info(np, "local port %d\n", np->local_port);
661 np_info(np, "local IP %pI4\n", &np->local_ip); 905 if (np->ipv6)
906 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
907 else
908 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
662 np_info(np, "interface '%s'\n", np->dev_name); 909 np_info(np, "interface '%s'\n", np->dev_name);
663 np_info(np, "remote port %d\n", np->remote_port); 910 np_info(np, "remote port %d\n", np->remote_port);
664 np_info(np, "remote IP %pI4\n", &np->remote_ip); 911 if (np->ipv6)
912 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
913 else
914 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
665 np_info(np, "remote ethernet address %pM\n", np->remote_mac); 915 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
666} 916}
667EXPORT_SYMBOL(netpoll_print_options); 917EXPORT_SYMBOL(netpoll_print_options);
668 918
919static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
920{
921 const char *end;
922
923 if (!strchr(str, ':') &&
924 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
925 if (!*end)
926 return 0;
927 }
928 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
929#if IS_ENABLED(CONFIG_IPV6)
930 if (!*end)
931 return 1;
932#else
933 return -1;
934#endif
935 }
936 return -1;
937}
938
669int netpoll_parse_options(struct netpoll *np, char *opt) 939int netpoll_parse_options(struct netpoll *np, char *opt)
670{ 940{
671 char *cur=opt, *delim; 941 char *cur=opt, *delim;
942 int ipv6;
672 943
673 if (*cur != '@') { 944 if (*cur != '@') {
674 if ((delim = strchr(cur, '@')) == NULL) 945 if ((delim = strchr(cur, '@')) == NULL)
@@ -684,7 +955,11 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
684 if ((delim = strchr(cur, '/')) == NULL) 955 if ((delim = strchr(cur, '/')) == NULL)
685 goto parse_failed; 956 goto parse_failed;
686 *delim = 0; 957 *delim = 0;
687 np->local_ip = in_aton(cur); 958 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
959 if (ipv6 < 0)
960 goto parse_failed;
961 else
962 np->ipv6 = (bool)ipv6;
688 cur = delim; 963 cur = delim;
689 } 964 }
690 cur++; 965 cur++;
@@ -716,7 +991,13 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
716 if ((delim = strchr(cur, '/')) == NULL) 991 if ((delim = strchr(cur, '/')) == NULL)
717 goto parse_failed; 992 goto parse_failed;
718 *delim = 0; 993 *delim = 0;
719 np->remote_ip = in_aton(cur); 994 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
995 if (ipv6 < 0)
996 goto parse_failed;
997 else if (np->ipv6 != (bool)ipv6)
998 goto parse_failed;
999 else
1000 np->ipv6 = (bool)ipv6;
720 cur = delim + 1; 1001 cur = delim + 1;
721 1002
722 if (*cur != 0) { 1003 if (*cur != 0) {
@@ -744,6 +1025,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
744 1025
745 np->dev = ndev; 1026 np->dev = ndev;
746 strlcpy(np->dev_name, ndev->name, IFNAMSIZ); 1027 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
1028 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
747 1029
748 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 1030 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
749 !ndev->netdev_ops->ndo_poll_controller) { 1031 !ndev->netdev_ops->ndo_poll_controller) {
@@ -764,7 +1046,8 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
764 INIT_LIST_HEAD(&npinfo->rx_np); 1046 INIT_LIST_HEAD(&npinfo->rx_np);
765 1047
766 spin_lock_init(&npinfo->rx_lock); 1048 spin_lock_init(&npinfo->rx_lock);
767 skb_queue_head_init(&npinfo->arp_tx); 1049 mutex_init(&npinfo->dev_lock);
1050 skb_queue_head_init(&npinfo->neigh_tx);
768 skb_queue_head_init(&npinfo->txq); 1051 skb_queue_head_init(&npinfo->txq);
769 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); 1052 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
770 1053
@@ -777,7 +1060,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
777 goto free_npinfo; 1060 goto free_npinfo;
778 } 1061 }
779 } else { 1062 } else {
780 npinfo = ndev->npinfo; 1063 npinfo = rtnl_dereference(ndev->npinfo);
781 atomic_inc(&npinfo->refcnt); 1064 atomic_inc(&npinfo->refcnt);
782 } 1065 }
783 1066
@@ -808,14 +1091,19 @@ int netpoll_setup(struct netpoll *np)
808 struct in_device *in_dev; 1091 struct in_device *in_dev;
809 int err; 1092 int err;
810 1093
811 if (np->dev_name) 1094 rtnl_lock();
812 ndev = dev_get_by_name(&init_net, np->dev_name); 1095 if (np->dev_name) {
1096 struct net *net = current->nsproxy->net_ns;
1097 ndev = __dev_get_by_name(net, np->dev_name);
1098 }
813 if (!ndev) { 1099 if (!ndev) {
814 np_err(np, "%s doesn't exist, aborting\n", np->dev_name); 1100 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
815 return -ENODEV; 1101 err = -ENODEV;
1102 goto unlock;
816 } 1103 }
1104 dev_hold(ndev);
817 1105
818 if (ndev->master) { 1106 if (netdev_master_upper_dev_get(ndev)) {
819 np_err(np, "%s is a slave device, aborting\n", np->dev_name); 1107 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
820 err = -EBUSY; 1108 err = -EBUSY;
821 goto put; 1109 goto put;
@@ -826,15 +1114,14 @@ int netpoll_setup(struct netpoll *np)
826 1114
827 np_info(np, "device %s not up yet, forcing it\n", np->dev_name); 1115 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
828 1116
829 rtnl_lock();
830 err = dev_open(ndev); 1117 err = dev_open(ndev);
831 rtnl_unlock();
832 1118
833 if (err) { 1119 if (err) {
834 np_err(np, "failed to open %s\n", ndev->name); 1120 np_err(np, "failed to open %s\n", ndev->name);
835 goto put; 1121 goto put;
836 } 1122 }
837 1123
1124 rtnl_unlock();
838 atleast = jiffies + HZ/10; 1125 atleast = jiffies + HZ/10;
839 atmost = jiffies + carrier_timeout * HZ; 1126 atmost = jiffies + carrier_timeout * HZ;
840 while (!netif_carrier_ok(ndev)) { 1127 while (!netif_carrier_ok(ndev)) {
@@ -854,39 +1141,70 @@ int netpoll_setup(struct netpoll *np)
854 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n"); 1141 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
855 msleep(4000); 1142 msleep(4000);
856 } 1143 }
1144 rtnl_lock();
857 } 1145 }
858 1146
859 if (!np->local_ip) { 1147 if (!np->local_ip.ip) {
860 rcu_read_lock(); 1148 if (!np->ipv6) {
861 in_dev = __in_dev_get_rcu(ndev); 1149 in_dev = __in_dev_get_rtnl(ndev);
1150
1151 if (!in_dev || !in_dev->ifa_list) {
1152 np_err(np, "no IP address for %s, aborting\n",
1153 np->dev_name);
1154 err = -EDESTADDRREQ;
1155 goto put;
1156 }
1157
1158 np->local_ip.ip = in_dev->ifa_list->ifa_local;
1159 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
1160 } else {
1161#if IS_ENABLED(CONFIG_IPV6)
1162 struct inet6_dev *idev;
862 1163
863 if (!in_dev || !in_dev->ifa_list) {
864 rcu_read_unlock();
865 np_err(np, "no IP address for %s, aborting\n",
866 np->dev_name);
867 err = -EDESTADDRREQ; 1164 err = -EDESTADDRREQ;
1165 idev = __in6_dev_get(ndev);
1166 if (idev) {
1167 struct inet6_ifaddr *ifp;
1168
1169 read_lock_bh(&idev->lock);
1170 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1171 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
1172 continue;
1173 np->local_ip.in6 = ifp->addr;
1174 err = 0;
1175 break;
1176 }
1177 read_unlock_bh(&idev->lock);
1178 }
1179 if (err) {
1180 np_err(np, "no IPv6 address for %s, aborting\n",
1181 np->dev_name);
1182 goto put;
1183 } else
1184 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
1185#else
1186 np_err(np, "IPv6 is not supported %s, aborting\n",
1187 np->dev_name);
1188 err = -EINVAL;
868 goto put; 1189 goto put;
1190#endif
869 } 1191 }
870
871 np->local_ip = in_dev->ifa_list->ifa_local;
872 rcu_read_unlock();
873 np_info(np, "local IP %pI4\n", &np->local_ip);
874 } 1192 }
875 1193
876 /* fill up the skb queue */ 1194 /* fill up the skb queue */
877 refill_skbs(); 1195 refill_skbs();
878 1196
879 rtnl_lock();
880 err = __netpoll_setup(np, ndev, GFP_KERNEL); 1197 err = __netpoll_setup(np, ndev, GFP_KERNEL);
881 rtnl_unlock();
882
883 if (err) 1198 if (err)
884 goto put; 1199 goto put;
885 1200
1201 rtnl_unlock();
886 return 0; 1202 return 0;
887 1203
888put: 1204put:
889 dev_put(ndev); 1205 dev_put(ndev);
1206unlock:
1207 rtnl_unlock();
890 return err; 1208 return err;
891} 1209}
892EXPORT_SYMBOL(netpoll_setup); 1210EXPORT_SYMBOL(netpoll_setup);
@@ -894,6 +1212,7 @@ EXPORT_SYMBOL(netpoll_setup);
894static int __init netpoll_init(void) 1212static int __init netpoll_init(void)
895{ 1213{
896 skb_queue_head_init(&skb_pool); 1214 skb_queue_head_init(&skb_pool);
1215 init_srcu_struct(&netpoll_srcu);
897 return 0; 1216 return 0;
898} 1217}
899core_initcall(netpoll_init); 1218core_initcall(netpoll_init);
@@ -903,7 +1222,7 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
903 struct netpoll_info *npinfo = 1222 struct netpoll_info *npinfo =
904 container_of(rcu_head, struct netpoll_info, rcu); 1223 container_of(rcu_head, struct netpoll_info, rcu);
905 1224
906 skb_queue_purge(&npinfo->arp_tx); 1225 skb_queue_purge(&npinfo->neigh_tx);
907 skb_queue_purge(&npinfo->txq); 1226 skb_queue_purge(&npinfo->txq);
908 1227
909 /* we can't call cancel_delayed_work_sync here, as we are in softirq */ 1228 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
@@ -921,7 +1240,11 @@ void __netpoll_cleanup(struct netpoll *np)
921 struct netpoll_info *npinfo; 1240 struct netpoll_info *npinfo;
922 unsigned long flags; 1241 unsigned long flags;
923 1242
924 npinfo = np->dev->npinfo; 1243 /* rtnl_dereference would be preferable here but
1244 * rcu_cleanup_netpoll path can put us in here safely without
1245 * holding the rtnl, so plain rcu_dereference it is
1246 */
1247 npinfo = rtnl_dereference(np->dev->npinfo);
925 if (!npinfo) 1248 if (!npinfo)
926 return; 1249 return;
927 1250
@@ -933,6 +1256,8 @@ void __netpoll_cleanup(struct netpoll *np)
933 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 1256 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
934 } 1257 }
935 1258
1259 synchronize_srcu(&netpoll_srcu);
1260
936 if (atomic_dec_and_test(&npinfo->refcnt)) { 1261 if (atomic_dec_and_test(&npinfo->refcnt)) {
937 const struct net_device_ops *ops; 1262 const struct net_device_ops *ops;
938 1263
@@ -940,25 +1265,27 @@ void __netpoll_cleanup(struct netpoll *np)
940 if (ops->ndo_netpoll_cleanup) 1265 if (ops->ndo_netpoll_cleanup)
941 ops->ndo_netpoll_cleanup(np->dev); 1266 ops->ndo_netpoll_cleanup(np->dev);
942 1267
943 RCU_INIT_POINTER(np->dev->npinfo, NULL); 1268 rcu_assign_pointer(np->dev->npinfo, NULL);
944 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); 1269 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
945 } 1270 }
946} 1271}
947EXPORT_SYMBOL_GPL(__netpoll_cleanup); 1272EXPORT_SYMBOL_GPL(__netpoll_cleanup);
948 1273
949static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) 1274static void netpoll_async_cleanup(struct work_struct *work)
950{ 1275{
951 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); 1276 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
952 1277
1278 rtnl_lock();
953 __netpoll_cleanup(np); 1279 __netpoll_cleanup(np);
1280 rtnl_unlock();
954 kfree(np); 1281 kfree(np);
955} 1282}
956 1283
957void __netpoll_free_rcu(struct netpoll *np) 1284void __netpoll_free_async(struct netpoll *np)
958{ 1285{
959 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); 1286 schedule_work(&np->cleanup_work);
960} 1287}
961EXPORT_SYMBOL_GPL(__netpoll_free_rcu); 1288EXPORT_SYMBOL_GPL(__netpoll_free_async);
962 1289
963void netpoll_cleanup(struct netpoll *np) 1290void netpoll_cleanup(struct netpoll *np)
964{ 1291{
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 5e67defe2cb0..0777d0aa18c3 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -69,10 +69,8 @@ static int extend_netdev_table(struct net_device *dev, u32 target_idx)
69 69
70 /* allocate & copy */ 70 /* allocate & copy */
71 new = kzalloc(new_sz, GFP_KERNEL); 71 new = kzalloc(new_sz, GFP_KERNEL);
72 if (!new) { 72 if (!new)
73 pr_warn("Unable to alloc new priomap!\n");
74 return -ENOMEM; 73 return -ENOMEM;
75 }
76 74
77 if (old) 75 if (old)
78 memcpy(new->priomap, old->priomap, 76 memcpy(new->priomap, old->priomap,
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b29dacf900f9..6048fc1da1c2 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -164,6 +164,7 @@
164#ifdef CONFIG_XFRM 164#ifdef CONFIG_XFRM
165#include <net/xfrm.h> 165#include <net/xfrm.h>
166#endif 166#endif
167#include <net/netns/generic.h>
167#include <asm/byteorder.h> 168#include <asm/byteorder.h>
168#include <linux/rcupdate.h> 169#include <linux/rcupdate.h>
169#include <linux/bitops.h> 170#include <linux/bitops.h>
@@ -212,7 +213,6 @@
212#define PKTGEN_MAGIC 0xbe9be955 213#define PKTGEN_MAGIC 0xbe9be955
213#define PG_PROC_DIR "pktgen" 214#define PG_PROC_DIR "pktgen"
214#define PGCTRL "pgctrl" 215#define PGCTRL "pgctrl"
215static struct proc_dir_entry *pg_proc_dir;
216 216
217#define MAX_CFLOWS 65536 217#define MAX_CFLOWS 65536
218 218
@@ -397,7 +397,15 @@ struct pktgen_hdr {
397 __be32 tv_usec; 397 __be32 tv_usec;
398}; 398};
399 399
400static bool pktgen_exiting __read_mostly; 400
401static int pg_net_id __read_mostly;
402
403struct pktgen_net {
404 struct net *net;
405 struct proc_dir_entry *proc_dir;
406 struct list_head pktgen_threads;
407 bool pktgen_exiting;
408};
401 409
402struct pktgen_thread { 410struct pktgen_thread {
403 spinlock_t if_lock; /* for list of devices */ 411 spinlock_t if_lock; /* for list of devices */
@@ -414,6 +422,7 @@ struct pktgen_thread {
414 422
415 wait_queue_head_t queue; 423 wait_queue_head_t queue;
416 struct completion start_done; 424 struct completion start_done;
425 struct pktgen_net *net;
417}; 426};
418 427
419#define REMOVE 1 428#define REMOVE 1
@@ -428,9 +437,9 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
428static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 437static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
429 const char *ifname, bool exact); 438 const char *ifname, bool exact);
430static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 439static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
431static void pktgen_run_all_threads(void); 440static void pktgen_run_all_threads(struct pktgen_net *pn);
432static void pktgen_reset_all_threads(void); 441static void pktgen_reset_all_threads(struct pktgen_net *pn);
433static void pktgen_stop_all_threads_ifs(void); 442static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn);
434 443
435static void pktgen_stop(struct pktgen_thread *t); 444static void pktgen_stop(struct pktgen_thread *t);
436static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 445static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
@@ -442,7 +451,6 @@ static int pg_clone_skb_d __read_mostly;
442static int debug __read_mostly; 451static int debug __read_mostly;
443 452
444static DEFINE_MUTEX(pktgen_thread_lock); 453static DEFINE_MUTEX(pktgen_thread_lock);
445static LIST_HEAD(pktgen_threads);
446 454
447static struct notifier_block pktgen_notifier_block = { 455static struct notifier_block pktgen_notifier_block = {
448 .notifier_call = pktgen_device_event, 456 .notifier_call = pktgen_device_event,
@@ -464,6 +472,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
464{ 472{
465 int err = 0; 473 int err = 0;
466 char data[128]; 474 char data[128];
475 struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id);
467 476
468 if (!capable(CAP_NET_ADMIN)) { 477 if (!capable(CAP_NET_ADMIN)) {
469 err = -EPERM; 478 err = -EPERM;
@@ -480,13 +489,13 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
480 data[count - 1] = 0; /* Make string */ 489 data[count - 1] = 0; /* Make string */
481 490
482 if (!strcmp(data, "stop")) 491 if (!strcmp(data, "stop"))
483 pktgen_stop_all_threads_ifs(); 492 pktgen_stop_all_threads_ifs(pn);
484 493
485 else if (!strcmp(data, "start")) 494 else if (!strcmp(data, "start"))
486 pktgen_run_all_threads(); 495 pktgen_run_all_threads(pn);
487 496
488 else if (!strcmp(data, "reset")) 497 else if (!strcmp(data, "reset"))
489 pktgen_reset_all_threads(); 498 pktgen_reset_all_threads(pn);
490 499
491 else 500 else
492 pr_warning("Unknown command: %s\n", data); 501 pr_warning("Unknown command: %s\n", data);
@@ -1781,10 +1790,13 @@ static ssize_t pktgen_thread_write(struct file *file,
1781 return -EFAULT; 1790 return -EFAULT;
1782 i += len; 1791 i += len;
1783 mutex_lock(&pktgen_thread_lock); 1792 mutex_lock(&pktgen_thread_lock);
1784 pktgen_add_device(t, f); 1793 ret = pktgen_add_device(t, f);
1785 mutex_unlock(&pktgen_thread_lock); 1794 mutex_unlock(&pktgen_thread_lock);
1786 ret = count; 1795 if (!ret) {
1787 sprintf(pg_result, "OK: add_device=%s", f); 1796 ret = count;
1797 sprintf(pg_result, "OK: add_device=%s", f);
1798 } else
1799 sprintf(pg_result, "ERROR: can not add device %s", f);
1788 goto out; 1800 goto out;
1789 } 1801 }
1790 1802
@@ -1824,13 +1836,14 @@ static const struct file_operations pktgen_thread_fops = {
1824}; 1836};
1825 1837
1826/* Think find or remove for NN */ 1838/* Think find or remove for NN */
1827static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove) 1839static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn,
1840 const char *ifname, int remove)
1828{ 1841{
1829 struct pktgen_thread *t; 1842 struct pktgen_thread *t;
1830 struct pktgen_dev *pkt_dev = NULL; 1843 struct pktgen_dev *pkt_dev = NULL;
1831 bool exact = (remove == FIND); 1844 bool exact = (remove == FIND);
1832 1845
1833 list_for_each_entry(t, &pktgen_threads, th_list) { 1846 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
1834 pkt_dev = pktgen_find_dev(t, ifname, exact); 1847 pkt_dev = pktgen_find_dev(t, ifname, exact);
1835 if (pkt_dev) { 1848 if (pkt_dev) {
1836 if (remove) { 1849 if (remove) {
@@ -1848,7 +1861,7 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
1848/* 1861/*
1849 * mark a device for removal 1862 * mark a device for removal
1850 */ 1863 */
1851static void pktgen_mark_device(const char *ifname) 1864static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
1852{ 1865{
1853 struct pktgen_dev *pkt_dev = NULL; 1866 struct pktgen_dev *pkt_dev = NULL;
1854 const int max_tries = 10, msec_per_try = 125; 1867 const int max_tries = 10, msec_per_try = 125;
@@ -1859,7 +1872,7 @@ static void pktgen_mark_device(const char *ifname)
1859 1872
1860 while (1) { 1873 while (1) {
1861 1874
1862 pkt_dev = __pktgen_NN_threads(ifname, REMOVE); 1875 pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE);
1863 if (pkt_dev == NULL) 1876 if (pkt_dev == NULL)
1864 break; /* success */ 1877 break; /* success */
1865 1878
@@ -1880,21 +1893,21 @@ static void pktgen_mark_device(const char *ifname)
1880 mutex_unlock(&pktgen_thread_lock); 1893 mutex_unlock(&pktgen_thread_lock);
1881} 1894}
1882 1895
1883static void pktgen_change_name(struct net_device *dev) 1896static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev)
1884{ 1897{
1885 struct pktgen_thread *t; 1898 struct pktgen_thread *t;
1886 1899
1887 list_for_each_entry(t, &pktgen_threads, th_list) { 1900 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
1888 struct pktgen_dev *pkt_dev; 1901 struct pktgen_dev *pkt_dev;
1889 1902
1890 list_for_each_entry(pkt_dev, &t->if_list, list) { 1903 list_for_each_entry(pkt_dev, &t->if_list, list) {
1891 if (pkt_dev->odev != dev) 1904 if (pkt_dev->odev != dev)
1892 continue; 1905 continue;
1893 1906
1894 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); 1907 remove_proc_entry(pkt_dev->entry->name, pn->proc_dir);
1895 1908
1896 pkt_dev->entry = proc_create_data(dev->name, 0600, 1909 pkt_dev->entry = proc_create_data(dev->name, 0600,
1897 pg_proc_dir, 1910 pn->proc_dir,
1898 &pktgen_if_fops, 1911 &pktgen_if_fops,
1899 pkt_dev); 1912 pkt_dev);
1900 if (!pkt_dev->entry) 1913 if (!pkt_dev->entry)
@@ -1909,8 +1922,9 @@ static int pktgen_device_event(struct notifier_block *unused,
1909 unsigned long event, void *ptr) 1922 unsigned long event, void *ptr)
1910{ 1923{
1911 struct net_device *dev = ptr; 1924 struct net_device *dev = ptr;
1925 struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id);
1912 1926
1913 if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting) 1927 if (pn->pktgen_exiting)
1914 return NOTIFY_DONE; 1928 return NOTIFY_DONE;
1915 1929
1916 /* It is OK that we do not hold the group lock right now, 1930 /* It is OK that we do not hold the group lock right now,
@@ -1919,18 +1933,19 @@ static int pktgen_device_event(struct notifier_block *unused,
1919 1933
1920 switch (event) { 1934 switch (event) {
1921 case NETDEV_CHANGENAME: 1935 case NETDEV_CHANGENAME:
1922 pktgen_change_name(dev); 1936 pktgen_change_name(pn, dev);
1923 break; 1937 break;
1924 1938
1925 case NETDEV_UNREGISTER: 1939 case NETDEV_UNREGISTER:
1926 pktgen_mark_device(dev->name); 1940 pktgen_mark_device(pn, dev->name);
1927 break; 1941 break;
1928 } 1942 }
1929 1943
1930 return NOTIFY_DONE; 1944 return NOTIFY_DONE;
1931} 1945}
1932 1946
1933static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, 1947static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn,
1948 struct pktgen_dev *pkt_dev,
1934 const char *ifname) 1949 const char *ifname)
1935{ 1950{
1936 char b[IFNAMSIZ+5]; 1951 char b[IFNAMSIZ+5];
@@ -1944,13 +1959,14 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
1944 } 1959 }
1945 b[i] = 0; 1960 b[i] = 0;
1946 1961
1947 return dev_get_by_name(&init_net, b); 1962 return dev_get_by_name(pn->net, b);
1948} 1963}
1949 1964
1950 1965
1951/* Associate pktgen_dev with a device. */ 1966/* Associate pktgen_dev with a device. */
1952 1967
1953static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) 1968static int pktgen_setup_dev(const struct pktgen_net *pn,
1969 struct pktgen_dev *pkt_dev, const char *ifname)
1954{ 1970{
1955 struct net_device *odev; 1971 struct net_device *odev;
1956 int err; 1972 int err;
@@ -1961,7 +1977,7 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
1961 pkt_dev->odev = NULL; 1977 pkt_dev->odev = NULL;
1962 } 1978 }
1963 1979
1964 odev = pktgen_dev_get_by_name(pkt_dev, ifname); 1980 odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname);
1965 if (!odev) { 1981 if (!odev) {
1966 pr_err("no such netdevice: \"%s\"\n", ifname); 1982 pr_err("no such netdevice: \"%s\"\n", ifname);
1967 return -ENODEV; 1983 return -ENODEV;
@@ -2203,9 +2219,10 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
2203static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) 2219static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2204{ 2220{
2205 struct xfrm_state *x = pkt_dev->flows[flow].x; 2221 struct xfrm_state *x = pkt_dev->flows[flow].x;
2222 struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id);
2206 if (!x) { 2223 if (!x) {
2207 /*slow path: we dont already have xfrm_state*/ 2224 /*slow path: we dont already have xfrm_state*/
2208 x = xfrm_stateonly_find(&init_net, DUMMY_MARK, 2225 x = xfrm_stateonly_find(pn->net, DUMMY_MARK,
2209 (xfrm_address_t *)&pkt_dev->cur_daddr, 2226 (xfrm_address_t *)&pkt_dev->cur_daddr,
2210 (xfrm_address_t *)&pkt_dev->cur_saddr, 2227 (xfrm_address_t *)&pkt_dev->cur_saddr,
2211 AF_INET, 2228 AF_INET,
@@ -2912,7 +2929,7 @@ static void pktgen_run(struct pktgen_thread *t)
2912 t->control &= ~(T_STOP); 2929 t->control &= ~(T_STOP);
2913} 2930}
2914 2931
2915static void pktgen_stop_all_threads_ifs(void) 2932static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn)
2916{ 2933{
2917 struct pktgen_thread *t; 2934 struct pktgen_thread *t;
2918 2935
@@ -2920,7 +2937,7 @@ static void pktgen_stop_all_threads_ifs(void)
2920 2937
2921 mutex_lock(&pktgen_thread_lock); 2938 mutex_lock(&pktgen_thread_lock);
2922 2939
2923 list_for_each_entry(t, &pktgen_threads, th_list) 2940 list_for_each_entry(t, &pn->pktgen_threads, th_list)
2924 t->control |= T_STOP; 2941 t->control |= T_STOP;
2925 2942
2926 mutex_unlock(&pktgen_thread_lock); 2943 mutex_unlock(&pktgen_thread_lock);
@@ -2956,28 +2973,28 @@ signal:
2956 return 0; 2973 return 0;
2957} 2974}
2958 2975
2959static int pktgen_wait_all_threads_run(void) 2976static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
2960{ 2977{
2961 struct pktgen_thread *t; 2978 struct pktgen_thread *t;
2962 int sig = 1; 2979 int sig = 1;
2963 2980
2964 mutex_lock(&pktgen_thread_lock); 2981 mutex_lock(&pktgen_thread_lock);
2965 2982
2966 list_for_each_entry(t, &pktgen_threads, th_list) { 2983 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
2967 sig = pktgen_wait_thread_run(t); 2984 sig = pktgen_wait_thread_run(t);
2968 if (sig == 0) 2985 if (sig == 0)
2969 break; 2986 break;
2970 } 2987 }
2971 2988
2972 if (sig == 0) 2989 if (sig == 0)
2973 list_for_each_entry(t, &pktgen_threads, th_list) 2990 list_for_each_entry(t, &pn->pktgen_threads, th_list)
2974 t->control |= (T_STOP); 2991 t->control |= (T_STOP);
2975 2992
2976 mutex_unlock(&pktgen_thread_lock); 2993 mutex_unlock(&pktgen_thread_lock);
2977 return sig; 2994 return sig;
2978} 2995}
2979 2996
2980static void pktgen_run_all_threads(void) 2997static void pktgen_run_all_threads(struct pktgen_net *pn)
2981{ 2998{
2982 struct pktgen_thread *t; 2999 struct pktgen_thread *t;
2983 3000
@@ -2985,7 +3002,7 @@ static void pktgen_run_all_threads(void)
2985 3002
2986 mutex_lock(&pktgen_thread_lock); 3003 mutex_lock(&pktgen_thread_lock);
2987 3004
2988 list_for_each_entry(t, &pktgen_threads, th_list) 3005 list_for_each_entry(t, &pn->pktgen_threads, th_list)
2989 t->control |= (T_RUN); 3006 t->control |= (T_RUN);
2990 3007
2991 mutex_unlock(&pktgen_thread_lock); 3008 mutex_unlock(&pktgen_thread_lock);
@@ -2993,10 +3010,10 @@ static void pktgen_run_all_threads(void)
2993 /* Propagate thread->control */ 3010 /* Propagate thread->control */
2994 schedule_timeout_interruptible(msecs_to_jiffies(125)); 3011 schedule_timeout_interruptible(msecs_to_jiffies(125));
2995 3012
2996 pktgen_wait_all_threads_run(); 3013 pktgen_wait_all_threads_run(pn);
2997} 3014}
2998 3015
2999static void pktgen_reset_all_threads(void) 3016static void pktgen_reset_all_threads(struct pktgen_net *pn)
3000{ 3017{
3001 struct pktgen_thread *t; 3018 struct pktgen_thread *t;
3002 3019
@@ -3004,7 +3021,7 @@ static void pktgen_reset_all_threads(void)
3004 3021
3005 mutex_lock(&pktgen_thread_lock); 3022 mutex_lock(&pktgen_thread_lock);
3006 3023
3007 list_for_each_entry(t, &pktgen_threads, th_list) 3024 list_for_each_entry(t, &pn->pktgen_threads, th_list)
3008 t->control |= (T_REMDEVALL); 3025 t->control |= (T_REMDEVALL);
3009 3026
3010 mutex_unlock(&pktgen_thread_lock); 3027 mutex_unlock(&pktgen_thread_lock);
@@ -3012,7 +3029,7 @@ static void pktgen_reset_all_threads(void)
3012 /* Propagate thread->control */ 3029 /* Propagate thread->control */
3013 schedule_timeout_interruptible(msecs_to_jiffies(125)); 3030 schedule_timeout_interruptible(msecs_to_jiffies(125));
3014 3031
3015 pktgen_wait_all_threads_run(); 3032 pktgen_wait_all_threads_run(pn);
3016} 3033}
3017 3034
3018static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 3035static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
@@ -3154,9 +3171,7 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
3154static void pktgen_rem_thread(struct pktgen_thread *t) 3171static void pktgen_rem_thread(struct pktgen_thread *t)
3155{ 3172{
3156 /* Remove from the thread list */ 3173 /* Remove from the thread list */
3157 3174 remove_proc_entry(t->tsk->comm, t->net->proc_dir);
3158 remove_proc_entry(t->tsk->comm, pg_proc_dir);
3159
3160} 3175}
3161 3176
3162static void pktgen_resched(struct pktgen_dev *pkt_dev) 3177static void pktgen_resched(struct pktgen_dev *pkt_dev)
@@ -3302,7 +3317,7 @@ static int pktgen_thread_worker(void *arg)
3302 pkt_dev = next_to_run(t); 3317 pkt_dev = next_to_run(t);
3303 3318
3304 if (unlikely(!pkt_dev && t->control == 0)) { 3319 if (unlikely(!pkt_dev && t->control == 0)) {
3305 if (pktgen_exiting) 3320 if (t->net->pktgen_exiting)
3306 break; 3321 break;
3307 wait_event_interruptible_timeout(t->queue, 3322 wait_event_interruptible_timeout(t->queue,
3308 t->control != 0, 3323 t->control != 0,
@@ -3424,7 +3439,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3424 3439
3425 /* We don't allow a device to be on several threads */ 3440 /* We don't allow a device to be on several threads */
3426 3441
3427 pkt_dev = __pktgen_NN_threads(ifname, FIND); 3442 pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND);
3428 if (pkt_dev) { 3443 if (pkt_dev) {
3429 pr_err("ERROR: interface already used\n"); 3444 pr_err("ERROR: interface already used\n");
3430 return -EBUSY; 3445 return -EBUSY;
@@ -3459,13 +3474,13 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3459 pkt_dev->svlan_id = 0xffff; 3474 pkt_dev->svlan_id = 0xffff;
3460 pkt_dev->node = -1; 3475 pkt_dev->node = -1;
3461 3476
3462 err = pktgen_setup_dev(pkt_dev, ifname); 3477 err = pktgen_setup_dev(t->net, pkt_dev, ifname);
3463 if (err) 3478 if (err)
3464 goto out1; 3479 goto out1;
3465 if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING) 3480 if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)
3466 pkt_dev->clone_skb = pg_clone_skb_d; 3481 pkt_dev->clone_skb = pg_clone_skb_d;
3467 3482
3468 pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir, 3483 pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir,
3469 &pktgen_if_fops, pkt_dev); 3484 &pktgen_if_fops, pkt_dev);
3470 if (!pkt_dev->entry) { 3485 if (!pkt_dev->entry) {
3471 pr_err("cannot create %s/%s procfs entry\n", 3486 pr_err("cannot create %s/%s procfs entry\n",
@@ -3490,7 +3505,7 @@ out1:
3490 return err; 3505 return err;
3491} 3506}
3492 3507
3493static int __init pktgen_create_thread(int cpu) 3508static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3494{ 3509{
3495 struct pktgen_thread *t; 3510 struct pktgen_thread *t;
3496 struct proc_dir_entry *pe; 3511 struct proc_dir_entry *pe;
@@ -3508,7 +3523,7 @@ static int __init pktgen_create_thread(int cpu)
3508 3523
3509 INIT_LIST_HEAD(&t->if_list); 3524 INIT_LIST_HEAD(&t->if_list);
3510 3525
3511 list_add_tail(&t->th_list, &pktgen_threads); 3526 list_add_tail(&t->th_list, &pn->pktgen_threads);
3512 init_completion(&t->start_done); 3527 init_completion(&t->start_done);
3513 3528
3514 p = kthread_create_on_node(pktgen_thread_worker, 3529 p = kthread_create_on_node(pktgen_thread_worker,
@@ -3524,7 +3539,7 @@ static int __init pktgen_create_thread(int cpu)
3524 kthread_bind(p, cpu); 3539 kthread_bind(p, cpu);
3525 t->tsk = p; 3540 t->tsk = p;
3526 3541
3527 pe = proc_create_data(t->tsk->comm, 0600, pg_proc_dir, 3542 pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
3528 &pktgen_thread_fops, t); 3543 &pktgen_thread_fops, t);
3529 if (!pe) { 3544 if (!pe) {
3530 pr_err("cannot create %s/%s procfs entry\n", 3545 pr_err("cannot create %s/%s procfs entry\n",
@@ -3535,6 +3550,7 @@ static int __init pktgen_create_thread(int cpu)
3535 return -EINVAL; 3550 return -EINVAL;
3536 } 3551 }
3537 3552
3553 t->net = pn;
3538 wake_up_process(p); 3554 wake_up_process(p);
3539 wait_for_completion(&t->start_done); 3555 wait_for_completion(&t->start_done);
3540 3556
@@ -3560,6 +3576,7 @@ static void _rem_dev_from_if_list(struct pktgen_thread *t,
3560static int pktgen_remove_device(struct pktgen_thread *t, 3576static int pktgen_remove_device(struct pktgen_thread *t,
3561 struct pktgen_dev *pkt_dev) 3577 struct pktgen_dev *pkt_dev)
3562{ 3578{
3579 struct pktgen_net *pn = t->net;
3563 3580
3564 pr_debug("remove_device pkt_dev=%p\n", pkt_dev); 3581 pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
3565 3582
@@ -3580,7 +3597,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
3580 _rem_dev_from_if_list(t, pkt_dev); 3597 _rem_dev_from_if_list(t, pkt_dev);
3581 3598
3582 if (pkt_dev->entry) 3599 if (pkt_dev->entry)
3583 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); 3600 remove_proc_entry(pkt_dev->entry->name, pn->proc_dir);
3584 3601
3585#ifdef CONFIG_XFRM 3602#ifdef CONFIG_XFRM
3586 free_SAs(pkt_dev); 3603 free_SAs(pkt_dev);
@@ -3592,63 +3609,63 @@ static int pktgen_remove_device(struct pktgen_thread *t,
3592 return 0; 3609 return 0;
3593} 3610}
3594 3611
3595static int __init pg_init(void) 3612static int __net_init pg_net_init(struct net *net)
3596{ 3613{
3597 int cpu; 3614 struct pktgen_net *pn = net_generic(net, pg_net_id);
3598 struct proc_dir_entry *pe; 3615 struct proc_dir_entry *pe;
3599 int ret = 0; 3616 int cpu, ret = 0;
3600 3617
3601 pr_info("%s", version); 3618 pn->net = net;
3602 3619 INIT_LIST_HEAD(&pn->pktgen_threads);
3603 pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net); 3620 pn->pktgen_exiting = false;
3604 if (!pg_proc_dir) 3621 pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
3622 if (!pn->proc_dir) {
3623 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
3605 return -ENODEV; 3624 return -ENODEV;
3606 3625 }
3607 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); 3626 pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_fops);
3608 if (pe == NULL) { 3627 if (pe == NULL) {
3609 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL); 3628 pr_err("cannot create %s procfs entry\n", PGCTRL);
3610 ret = -EINVAL; 3629 ret = -EINVAL;
3611 goto remove_dir; 3630 goto remove;
3612 } 3631 }
3613 3632
3614 register_netdevice_notifier(&pktgen_notifier_block);
3615
3616 for_each_online_cpu(cpu) { 3633 for_each_online_cpu(cpu) {
3617 int err; 3634 int err;
3618 3635
3619 err = pktgen_create_thread(cpu); 3636 err = pktgen_create_thread(cpu, pn);
3620 if (err) 3637 if (err)
3621 pr_warning("WARNING: Cannot create thread for cpu %d (%d)\n", 3638 pr_warn("Cannot create thread for cpu %d (%d)\n",
3622 cpu, err); 3639 cpu, err);
3623 } 3640 }
3624 3641
3625 if (list_empty(&pktgen_threads)) { 3642 if (list_empty(&pn->pktgen_threads)) {
3626 pr_err("ERROR: Initialization failed for all threads\n"); 3643 pr_err("Initialization failed for all threads\n");
3627 ret = -ENODEV; 3644 ret = -ENODEV;
3628 goto unregister; 3645 goto remove_entry;
3629 } 3646 }
3630 3647
3631 return 0; 3648 return 0;
3632 3649
3633 unregister: 3650remove_entry:
3634 unregister_netdevice_notifier(&pktgen_notifier_block); 3651 remove_proc_entry(PGCTRL, pn->proc_dir);
3635 remove_proc_entry(PGCTRL, pg_proc_dir); 3652remove:
3636 remove_dir: 3653 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
3637 proc_net_remove(&init_net, PG_PROC_DIR);
3638 return ret; 3654 return ret;
3639} 3655}
3640 3656
3641static void __exit pg_cleanup(void) 3657static void __net_exit pg_net_exit(struct net *net)
3642{ 3658{
3659 struct pktgen_net *pn = net_generic(net, pg_net_id);
3643 struct pktgen_thread *t; 3660 struct pktgen_thread *t;
3644 struct list_head *q, *n; 3661 struct list_head *q, *n;
3645 LIST_HEAD(list); 3662 LIST_HEAD(list);
3646 3663
3647 /* Stop all interfaces & threads */ 3664 /* Stop all interfaces & threads */
3648 pktgen_exiting = true; 3665 pn->pktgen_exiting = true;
3649 3666
3650 mutex_lock(&pktgen_thread_lock); 3667 mutex_lock(&pktgen_thread_lock);
3651 list_splice_init(&pktgen_threads, &list); 3668 list_splice_init(&pn->pktgen_threads, &list);
3652 mutex_unlock(&pktgen_thread_lock); 3669 mutex_unlock(&pktgen_thread_lock);
3653 3670
3654 list_for_each_safe(q, n, &list) { 3671 list_for_each_safe(q, n, &list) {
@@ -3658,12 +3675,36 @@ static void __exit pg_cleanup(void)
3658 kfree(t); 3675 kfree(t);
3659 } 3676 }
3660 3677
3661 /* Un-register us from receiving netdevice events */ 3678 remove_proc_entry(PGCTRL, pn->proc_dir);
3662 unregister_netdevice_notifier(&pktgen_notifier_block); 3679 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
3680}
3681
3682static struct pernet_operations pg_net_ops = {
3683 .init = pg_net_init,
3684 .exit = pg_net_exit,
3685 .id = &pg_net_id,
3686 .size = sizeof(struct pktgen_net),
3687};
3688
3689static int __init pg_init(void)
3690{
3691 int ret = 0;
3663 3692
3664 /* Clean up proc file system */ 3693 pr_info("%s", version);
3665 remove_proc_entry(PGCTRL, pg_proc_dir); 3694 ret = register_pernet_subsys(&pg_net_ops);
3666 proc_net_remove(&init_net, PG_PROC_DIR); 3695 if (ret)
3696 return ret;
3697 ret = register_netdevice_notifier(&pktgen_notifier_block);
3698 if (ret)
3699 unregister_pernet_subsys(&pg_net_ops);
3700
3701 return ret;
3702}
3703
3704static void __exit pg_cleanup(void)
3705{
3706 unregister_netdevice_notifier(&pktgen_notifier_block);
3707 unregister_pernet_subsys(&pg_net_ops);
3667} 3708}
3668 3709
3669module_init(pg_init); 3710module_init(pg_init);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index c31d9e8668c3..4425148d2b51 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -186,8 +186,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
186 struct fastopen_queue *fastopenq = 186 struct fastopen_queue *fastopenq =
187 inet_csk(lsk)->icsk_accept_queue.fastopenq; 187 inet_csk(lsk)->icsk_accept_queue.fastopenq;
188 188
189 BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
190
191 tcp_sk(sk)->fastopen_rsk = NULL; 189 tcp_sk(sk)->fastopen_rsk = NULL;
192 spin_lock_bh(&fastopenq->lock); 190 spin_lock_bh(&fastopenq->lock);
193 fastopenq->qlen--; 191 fastopenq->qlen--;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1868625af25e..d8aa20f6a46e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -780,6 +780,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
780 + nla_total_size(4) /* IFLA_MTU */ 780 + nla_total_size(4) /* IFLA_MTU */
781 + nla_total_size(4) /* IFLA_LINK */ 781 + nla_total_size(4) /* IFLA_LINK */
782 + nla_total_size(4) /* IFLA_MASTER */ 782 + nla_total_size(4) /* IFLA_MASTER */
783 + nla_total_size(1) /* IFLA_CARRIER */
783 + nla_total_size(4) /* IFLA_PROMISCUITY */ 784 + nla_total_size(4) /* IFLA_PROMISCUITY */
784 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 785 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
785 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 786 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
@@ -879,6 +880,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
879 const struct rtnl_link_stats64 *stats; 880 const struct rtnl_link_stats64 *stats;
880 struct nlattr *attr, *af_spec; 881 struct nlattr *attr, *af_spec;
881 struct rtnl_af_ops *af_ops; 882 struct rtnl_af_ops *af_ops;
883 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
882 884
883 ASSERT_RTNL(); 885 ASSERT_RTNL();
884 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 886 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -907,8 +909,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
907#endif 909#endif
908 (dev->ifindex != dev->iflink && 910 (dev->ifindex != dev->iflink &&
909 nla_put_u32(skb, IFLA_LINK, dev->iflink)) || 911 nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
910 (dev->master && 912 (upper_dev &&
911 nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) || 913 nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
914 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
912 (dev->qdisc && 915 (dev->qdisc &&
913 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || 916 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
914 (dev->ifalias && 917 (dev->ifalias &&
@@ -1108,6 +1111,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1108 [IFLA_MTU] = { .type = NLA_U32 }, 1111 [IFLA_MTU] = { .type = NLA_U32 },
1109 [IFLA_LINK] = { .type = NLA_U32 }, 1112 [IFLA_LINK] = { .type = NLA_U32 },
1110 [IFLA_MASTER] = { .type = NLA_U32 }, 1113 [IFLA_MASTER] = { .type = NLA_U32 },
1114 [IFLA_CARRIER] = { .type = NLA_U8 },
1111 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1115 [IFLA_TXQLEN] = { .type = NLA_U32 },
1112 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1116 [IFLA_WEIGHT] = { .type = NLA_U32 },
1113 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1117 [IFLA_OPERSTATE] = { .type = NLA_U8 },
@@ -1270,16 +1274,16 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
1270 1274
1271static int do_set_master(struct net_device *dev, int ifindex) 1275static int do_set_master(struct net_device *dev, int ifindex)
1272{ 1276{
1273 struct net_device *master_dev; 1277 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1274 const struct net_device_ops *ops; 1278 const struct net_device_ops *ops;
1275 int err; 1279 int err;
1276 1280
1277 if (dev->master) { 1281 if (upper_dev) {
1278 if (dev->master->ifindex == ifindex) 1282 if (upper_dev->ifindex == ifindex)
1279 return 0; 1283 return 0;
1280 ops = dev->master->netdev_ops; 1284 ops = upper_dev->netdev_ops;
1281 if (ops->ndo_del_slave) { 1285 if (ops->ndo_del_slave) {
1282 err = ops->ndo_del_slave(dev->master, dev); 1286 err = ops->ndo_del_slave(upper_dev, dev);
1283 if (err) 1287 if (err)
1284 return err; 1288 return err;
1285 } else { 1289 } else {
@@ -1288,12 +1292,12 @@ static int do_set_master(struct net_device *dev, int ifindex)
1288 } 1292 }
1289 1293
1290 if (ifindex) { 1294 if (ifindex) {
1291 master_dev = __dev_get_by_index(dev_net(dev), ifindex); 1295 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
1292 if (!master_dev) 1296 if (!upper_dev)
1293 return -EINVAL; 1297 return -EINVAL;
1294 ops = master_dev->netdev_ops; 1298 ops = upper_dev->netdev_ops;
1295 if (ops->ndo_add_slave) { 1299 if (ops->ndo_add_slave) {
1296 err = ops->ndo_add_slave(master_dev, dev); 1300 err = ops->ndo_add_slave(upper_dev, dev);
1297 if (err) 1301 if (err)
1298 return err; 1302 return err;
1299 } else { 1303 } else {
@@ -1307,7 +1311,6 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1307 struct nlattr **tb, char *ifname, int modified) 1311 struct nlattr **tb, char *ifname, int modified)
1308{ 1312{
1309 const struct net_device_ops *ops = dev->netdev_ops; 1313 const struct net_device_ops *ops = dev->netdev_ops;
1310 int send_addr_notify = 0;
1311 int err; 1314 int err;
1312 1315
1313 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) { 1316 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
@@ -1360,16 +1363,6 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1360 struct sockaddr *sa; 1363 struct sockaddr *sa;
1361 int len; 1364 int len;
1362 1365
1363 if (!ops->ndo_set_mac_address) {
1364 err = -EOPNOTSUPP;
1365 goto errout;
1366 }
1367
1368 if (!netif_device_present(dev)) {
1369 err = -ENODEV;
1370 goto errout;
1371 }
1372
1373 len = sizeof(sa_family_t) + dev->addr_len; 1366 len = sizeof(sa_family_t) + dev->addr_len;
1374 sa = kmalloc(len, GFP_KERNEL); 1367 sa = kmalloc(len, GFP_KERNEL);
1375 if (!sa) { 1368 if (!sa) {
@@ -1379,13 +1372,11 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1379 sa->sa_family = dev->type; 1372 sa->sa_family = dev->type;
1380 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 1373 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
1381 dev->addr_len); 1374 dev->addr_len);
1382 err = ops->ndo_set_mac_address(dev, sa); 1375 err = dev_set_mac_address(dev, sa);
1383 kfree(sa); 1376 kfree(sa);
1384 if (err) 1377 if (err)
1385 goto errout; 1378 goto errout;
1386 send_addr_notify = 1;
1387 modified = 1; 1379 modified = 1;
1388 add_device_randomness(dev->dev_addr, dev->addr_len);
1389 } 1380 }
1390 1381
1391 if (tb[IFLA_MTU]) { 1382 if (tb[IFLA_MTU]) {
@@ -1422,7 +1413,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1422 1413
1423 if (tb[IFLA_BROADCAST]) { 1414 if (tb[IFLA_BROADCAST]) {
1424 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 1415 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
1425 send_addr_notify = 1; 1416 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1426 } 1417 }
1427 1418
1428 if (ifm->ifi_flags || ifm->ifi_change) { 1419 if (ifm->ifi_flags || ifm->ifi_change) {
@@ -1438,6 +1429,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1438 modified = 1; 1429 modified = 1;
1439 } 1430 }
1440 1431
1432 if (tb[IFLA_CARRIER]) {
1433 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
1434 if (err)
1435 goto errout;
1436 modified = 1;
1437 }
1438
1441 if (tb[IFLA_TXQLEN]) 1439 if (tb[IFLA_TXQLEN])
1442 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 1440 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
1443 1441
@@ -1536,9 +1534,6 @@ errout:
1536 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 1534 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
1537 dev->name); 1535 dev->name);
1538 1536
1539 if (send_addr_notify)
1540 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1541
1542 return err; 1537 return err;
1543} 1538}
1544 1539
@@ -1672,9 +1667,11 @@ struct net_device *rtnl_create_link(struct net *net,
1672 1667
1673 if (tb[IFLA_MTU]) 1668 if (tb[IFLA_MTU])
1674 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1669 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1675 if (tb[IFLA_ADDRESS]) 1670 if (tb[IFLA_ADDRESS]) {
1676 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), 1671 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
1677 nla_len(tb[IFLA_ADDRESS])); 1672 nla_len(tb[IFLA_ADDRESS]));
1673 dev->addr_assign_type = NET_ADDR_SET;
1674 }
1678 if (tb[IFLA_BROADCAST]) 1675 if (tb[IFLA_BROADCAST])
1679 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 1676 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
1680 nla_len(tb[IFLA_BROADCAST])); 1677 nla_len(tb[IFLA_BROADCAST]));
@@ -1992,6 +1989,7 @@ errout:
1992 if (err < 0) 1989 if (err < 0)
1993 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 1990 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
1994} 1991}
1992EXPORT_SYMBOL(rtmsg_ifinfo);
1995 1993
1996static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 1994static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
1997 struct net_device *dev, 1995 struct net_device *dev,
@@ -2054,16 +2052,12 @@ errout:
2054static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2052static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2055{ 2053{
2056 struct net *net = sock_net(skb->sk); 2054 struct net *net = sock_net(skb->sk);
2057 struct net_device *master = NULL;
2058 struct ndmsg *ndm; 2055 struct ndmsg *ndm;
2059 struct nlattr *tb[NDA_MAX+1]; 2056 struct nlattr *tb[NDA_MAX+1];
2060 struct net_device *dev; 2057 struct net_device *dev;
2061 u8 *addr; 2058 u8 *addr;
2062 int err; 2059 int err;
2063 2060
2064 if (!capable(CAP_NET_ADMIN))
2065 return -EPERM;
2066
2067 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); 2061 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
2068 if (err < 0) 2062 if (err < 0)
2069 return err; 2063 return err;
@@ -2096,10 +2090,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2096 /* Support fdb on master device the net/bridge default case */ 2090 /* Support fdb on master device the net/bridge default case */
2097 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 2091 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2098 (dev->priv_flags & IFF_BRIDGE_PORT)) { 2092 (dev->priv_flags & IFF_BRIDGE_PORT)) {
2099 master = dev->master; 2093 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2100 err = master->netdev_ops->ndo_fdb_add(ndm, tb, 2094 const struct net_device_ops *ops = br_dev->netdev_ops;
2101 dev, addr, 2095
2102 nlh->nlmsg_flags); 2096 err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags);
2103 if (err) 2097 if (err)
2104 goto out; 2098 goto out;
2105 else 2099 else
@@ -2125,7 +2119,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2125{ 2119{
2126 struct net *net = sock_net(skb->sk); 2120 struct net *net = sock_net(skb->sk);
2127 struct ndmsg *ndm; 2121 struct ndmsg *ndm;
2128 struct nlattr *llattr; 2122 struct nlattr *tb[NDA_MAX+1];
2129 struct net_device *dev; 2123 struct net_device *dev;
2130 int err = -EINVAL; 2124 int err = -EINVAL;
2131 __u8 *addr; 2125 __u8 *addr;
@@ -2133,8 +2127,9 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2133 if (!capable(CAP_NET_ADMIN)) 2127 if (!capable(CAP_NET_ADMIN))
2134 return -EPERM; 2128 return -EPERM;
2135 2129
2136 if (nlmsg_len(nlh) < sizeof(*ndm)) 2130 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
2137 return -EINVAL; 2131 if (err < 0)
2132 return err;
2138 2133
2139 ndm = nlmsg_data(nlh); 2134 ndm = nlmsg_data(nlh);
2140 if (ndm->ndm_ifindex == 0) { 2135 if (ndm->ndm_ifindex == 0) {
@@ -2148,22 +2143,27 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2148 return -ENODEV; 2143 return -ENODEV;
2149 } 2144 }
2150 2145
2151 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR); 2146 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
2152 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) { 2147 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
2153 pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n"); 2148 return -EINVAL;
2149 }
2150
2151 addr = nla_data(tb[NDA_LLADDR]);
2152 if (!is_valid_ether_addr(addr)) {
2153 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ether address\n");
2154 return -EINVAL; 2154 return -EINVAL;
2155 } 2155 }
2156 2156
2157 addr = nla_data(llattr);
2158 err = -EOPNOTSUPP; 2157 err = -EOPNOTSUPP;
2159 2158
2160 /* Support fdb on master device the net/bridge default case */ 2159 /* Support fdb on master device the net/bridge default case */
2161 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 2160 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2162 (dev->priv_flags & IFF_BRIDGE_PORT)) { 2161 (dev->priv_flags & IFF_BRIDGE_PORT)) {
2163 struct net_device *master = dev->master; 2162 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2163 const struct net_device_ops *ops = br_dev->netdev_ops;
2164 2164
2165 if (master->netdev_ops->ndo_fdb_del) 2165 if (ops->ndo_fdb_del)
2166 err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr); 2166 err = ops->ndo_fdb_del(ndm, tb, dev, addr);
2167 2167
2168 if (err) 2168 if (err)
2169 goto out; 2169 goto out;
@@ -2173,7 +2173,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2173 2173
2174 /* Embedded bridge, macvlan, and any other device support */ 2174 /* Embedded bridge, macvlan, and any other device support */
2175 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { 2175 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
2176 err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr); 2176 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
2177 2177
2178 if (!err) { 2178 if (!err) {
2179 rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); 2179 rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
@@ -2247,9 +2247,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
2247 rcu_read_lock(); 2247 rcu_read_lock();
2248 for_each_netdev_rcu(net, dev) { 2248 for_each_netdev_rcu(net, dev) {
2249 if (dev->priv_flags & IFF_BRIDGE_PORT) { 2249 if (dev->priv_flags & IFF_BRIDGE_PORT) {
2250 struct net_device *master = dev->master; 2250 struct net_device *br_dev;
2251 const struct net_device_ops *ops = master->netdev_ops; 2251 const struct net_device_ops *ops;
2252 2252
2253 br_dev = netdev_master_upper_dev_get(dev);
2254 ops = br_dev->netdev_ops;
2253 if (ops->ndo_fdb_dump) 2255 if (ops->ndo_fdb_dump)
2254 idx = ops->ndo_fdb_dump(skb, cb, dev, idx); 2256 idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
2255 } 2257 }
@@ -2270,6 +2272,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2270 struct ifinfomsg *ifm; 2272 struct ifinfomsg *ifm;
2271 struct nlattr *br_afspec; 2273 struct nlattr *br_afspec;
2272 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 2274 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
2275 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2273 2276
2274 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI); 2277 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
2275 if (nlh == NULL) 2278 if (nlh == NULL)
@@ -2287,8 +2290,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2287 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 2290 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
2288 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 2291 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
2289 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 2292 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
2290 (dev->master && 2293 (br_dev &&
2291 nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) || 2294 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
2292 (dev->addr_len && 2295 (dev->addr_len &&
2293 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 2296 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
2294 (dev->ifindex != dev->iflink && 2297 (dev->ifindex != dev->iflink &&
@@ -2320,23 +2323,31 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2320 int idx = 0; 2323 int idx = 0;
2321 u32 portid = NETLINK_CB(cb->skb).portid; 2324 u32 portid = NETLINK_CB(cb->skb).portid;
2322 u32 seq = cb->nlh->nlmsg_seq; 2325 u32 seq = cb->nlh->nlmsg_seq;
2326 struct nlattr *extfilt;
2327 u32 filter_mask = 0;
2328
2329 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
2330 IFLA_EXT_MASK);
2331 if (extfilt)
2332 filter_mask = nla_get_u32(extfilt);
2323 2333
2324 rcu_read_lock(); 2334 rcu_read_lock();
2325 for_each_netdev_rcu(net, dev) { 2335 for_each_netdev_rcu(net, dev) {
2326 const struct net_device_ops *ops = dev->netdev_ops; 2336 const struct net_device_ops *ops = dev->netdev_ops;
2327 struct net_device *master = dev->master; 2337 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2328 2338
2329 if (master && master->netdev_ops->ndo_bridge_getlink) { 2339 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
2330 if (idx >= cb->args[0] && 2340 if (idx >= cb->args[0] &&
2331 master->netdev_ops->ndo_bridge_getlink( 2341 br_dev->netdev_ops->ndo_bridge_getlink(
2332 skb, portid, seq, dev) < 0) 2342 skb, portid, seq, dev, filter_mask) < 0)
2333 break; 2343 break;
2334 idx++; 2344 idx++;
2335 } 2345 }
2336 2346
2337 if (ops->ndo_bridge_getlink) { 2347 if (ops->ndo_bridge_getlink) {
2338 if (idx >= cb->args[0] && 2348 if (idx >= cb->args[0] &&
2339 ops->ndo_bridge_getlink(skb, portid, seq, dev) < 0) 2349 ops->ndo_bridge_getlink(skb, portid, seq, dev,
2350 filter_mask) < 0)
2340 break; 2351 break;
2341 idx++; 2352 idx++;
2342 } 2353 }
@@ -2365,7 +2376,7 @@ static inline size_t bridge_nlmsg_size(void)
2365static int rtnl_bridge_notify(struct net_device *dev, u16 flags) 2376static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
2366{ 2377{
2367 struct net *net = dev_net(dev); 2378 struct net *net = dev_net(dev);
2368 struct net_device *master = dev->master; 2379 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2369 struct sk_buff *skb; 2380 struct sk_buff *skb;
2370 int err = -EOPNOTSUPP; 2381 int err = -EOPNOTSUPP;
2371 2382
@@ -2376,15 +2387,15 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
2376 } 2387 }
2377 2388
2378 if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) && 2389 if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
2379 master && master->netdev_ops->ndo_bridge_getlink) { 2390 br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
2380 err = master->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev); 2391 err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
2381 if (err < 0) 2392 if (err < 0)
2382 goto errout; 2393 goto errout;
2383 } 2394 }
2384 2395
2385 if ((flags & BRIDGE_FLAGS_SELF) && 2396 if ((flags & BRIDGE_FLAGS_SELF) &&
2386 dev->netdev_ops->ndo_bridge_getlink) { 2397 dev->netdev_ops->ndo_bridge_getlink) {
2387 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev); 2398 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
2388 if (err < 0) 2399 if (err < 0)
2389 goto errout; 2400 goto errout;
2390 } 2401 }
@@ -2436,13 +2447,14 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2436 oflags = flags; 2447 oflags = flags;
2437 2448
2438 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 2449 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
2439 if (!dev->master || 2450 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2440 !dev->master->netdev_ops->ndo_bridge_setlink) { 2451
2452 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
2441 err = -EOPNOTSUPP; 2453 err = -EOPNOTSUPP;
2442 goto out; 2454 goto out;
2443 } 2455 }
2444 2456
2445 err = dev->master->netdev_ops->ndo_bridge_setlink(dev, nlh); 2457 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
2446 if (err) 2458 if (err)
2447 goto out; 2459 goto out;
2448 2460
@@ -2468,6 +2480,77 @@ out:
2468 return err; 2480 return err;
2469} 2481}
2470 2482
2483static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2484 void *arg)
2485{
2486 struct net *net = sock_net(skb->sk);
2487 struct ifinfomsg *ifm;
2488 struct net_device *dev;
2489 struct nlattr *br_spec, *attr = NULL;
2490 int rem, err = -EOPNOTSUPP;
2491 u16 oflags, flags = 0;
2492 bool have_flags = false;
2493
2494 if (nlmsg_len(nlh) < sizeof(*ifm))
2495 return -EINVAL;
2496
2497 ifm = nlmsg_data(nlh);
2498 if (ifm->ifi_family != AF_BRIDGE)
2499 return -EPFNOSUPPORT;
2500
2501 dev = __dev_get_by_index(net, ifm->ifi_index);
2502 if (!dev) {
2503 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
2504 return -ENODEV;
2505 }
2506
2507 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
2508 if (br_spec) {
2509 nla_for_each_nested(attr, br_spec, rem) {
2510 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
2511 have_flags = true;
2512 flags = nla_get_u16(attr);
2513 break;
2514 }
2515 }
2516 }
2517
2518 oflags = flags;
2519
2520 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
2521 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2522
2523 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
2524 err = -EOPNOTSUPP;
2525 goto out;
2526 }
2527
2528 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
2529 if (err)
2530 goto out;
2531
2532 flags &= ~BRIDGE_FLAGS_MASTER;
2533 }
2534
2535 if ((flags & BRIDGE_FLAGS_SELF)) {
2536 if (!dev->netdev_ops->ndo_bridge_dellink)
2537 err = -EOPNOTSUPP;
2538 else
2539 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
2540
2541 if (!err)
2542 flags &= ~BRIDGE_FLAGS_SELF;
2543 }
2544
2545 if (have_flags)
2546 memcpy(nla_data(attr), &flags, sizeof(flags));
2547 /* Generate event to notify upper layer of bridge change */
2548 if (!err)
2549 err = rtnl_bridge_notify(dev, oflags);
2550out:
2551 return err;
2552}
2553
2471/* Protected by RTNL sempahore. */ 2554/* Protected by RTNL sempahore. */
2472static struct rtattr **rta_buf; 2555static struct rtattr **rta_buf;
2473static int rtattr_max; 2556static int rtattr_max;
@@ -2651,6 +2734,7 @@ void __init rtnetlink_init(void)
2651 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); 2734 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
2652 2735
2653 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL); 2736 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
2737 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
2654 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL); 2738 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
2655} 2739}
2656 2740
diff --git a/net/core/scm.c b/net/core/scm.c
index 57fb1ee6649f..905dcc6ad1e3 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -35,6 +35,7 @@
35#include <net/sock.h> 35#include <net/sock.h>
36#include <net/compat.h> 36#include <net/compat.h>
37#include <net/scm.h> 37#include <net/scm.h>
38#include <net/cls_cgroup.h>
38 39
39 40
40/* 41/*
@@ -302,8 +303,10 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
302 } 303 }
303 /* Bump the usage count and install the file. */ 304 /* Bump the usage count and install the file. */
304 sock = sock_from_file(fp[i], &err); 305 sock = sock_from_file(fp[i], &err);
305 if (sock) 306 if (sock) {
306 sock_update_netprioidx(sock->sk, current); 307 sock_update_netprioidx(sock->sk, current);
308 sock_update_classid(sock->sk, current);
309 }
307 fd_install(new_fd, get_file(fp[i])); 310 fd_install(new_fd, get_file(fp[i]));
308 } 311 }
309 312
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ab989b0de42..33245ef54c3b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -104,47 +104,37 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
104 .get = sock_pipe_buf_get, 104 .get = sock_pipe_buf_get,
105}; 105};
106 106
107/*
108 * Keep out-of-line to prevent kernel bloat.
109 * __builtin_return_address is not used because it is not always
110 * reliable.
111 */
112
113/** 107/**
114 * skb_over_panic - private function 108 * skb_panic - private function for out-of-line support
115 * @skb: buffer 109 * @skb: buffer
116 * @sz: size 110 * @sz: size
117 * @here: address 111 * @addr: address
118 * 112 * @msg: skb_over_panic or skb_under_panic
119 * Out of line support code for skb_put(). Not user callable. 113 *
114 * Out-of-line support for skb_put() and skb_push().
115 * Called via the wrapper skb_over_panic() or skb_under_panic().
116 * Keep out of line to prevent kernel bloat.
117 * __builtin_return_address is not used because it is not always reliable.
120 */ 118 */
121static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 119static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
120 const char msg[])
122{ 121{
123 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 122 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
124 __func__, here, skb->len, sz, skb->head, skb->data, 123 msg, addr, skb->len, sz, skb->head, skb->data,
125 (unsigned long)skb->tail, (unsigned long)skb->end, 124 (unsigned long)skb->tail, (unsigned long)skb->end,
126 skb->dev ? skb->dev->name : "<NULL>"); 125 skb->dev ? skb->dev->name : "<NULL>");
127 BUG(); 126 BUG();
128} 127}
129 128
130/** 129static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
131 * skb_under_panic - private function
132 * @skb: buffer
133 * @sz: size
134 * @here: address
135 *
136 * Out of line support code for skb_push(). Not user callable.
137 */
138
139static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
140{ 130{
141 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 131 skb_panic(skb, sz, addr, __func__);
142 __func__, here, skb->len, sz, skb->head, skb->data,
143 (unsigned long)skb->tail, (unsigned long)skb->end,
144 skb->dev ? skb->dev->name : "<NULL>");
145 BUG();
146} 132}
147 133
134static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
135{
136 skb_panic(skb, sz, addr, __func__);
137}
148 138
149/* 139/*
150 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 140 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
@@ -155,8 +145,9 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
155 */ 145 */
156#define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 146#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
157 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 147 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
158void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, 148
159 bool *pfmemalloc) 149static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
150 unsigned long ip, bool *pfmemalloc)
160{ 151{
161 void *obj; 152 void *obj;
162 bool ret_pfmemalloc = false; 153 bool ret_pfmemalloc = false;
@@ -259,6 +250,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
259 skb->end = skb->tail + size; 250 skb->end = skb->tail + size;
260#ifdef NET_SKBUFF_DATA_USES_OFFSET 251#ifdef NET_SKBUFF_DATA_USES_OFFSET
261 skb->mac_header = ~0U; 252 skb->mac_header = ~0U;
253 skb->transport_header = ~0U;
262#endif 254#endif
263 255
264 /* make sure we initialize shinfo sequentially */ 256 /* make sure we initialize shinfo sequentially */
@@ -327,6 +319,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
327 skb->end = skb->tail + size; 319 skb->end = skb->tail + size;
328#ifdef NET_SKBUFF_DATA_USES_OFFSET 320#ifdef NET_SKBUFF_DATA_USES_OFFSET
329 skb->mac_header = ~0U; 321 skb->mac_header = ~0U;
322 skb->transport_header = ~0U;
330#endif 323#endif
331 324
332 /* make sure we initialize shinfo sequentially */ 325 /* make sure we initialize shinfo sequentially */
@@ -348,10 +341,6 @@ struct netdev_alloc_cache {
348}; 341};
349static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 342static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
350 343
351#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
352#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
353#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
354
355static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 344static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
356{ 345{
357 struct netdev_alloc_cache *nc; 346 struct netdev_alloc_cache *nc;
@@ -683,7 +672,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
683 new->network_header = old->network_header; 672 new->network_header = old->network_header;
684 new->mac_header = old->mac_header; 673 new->mac_header = old->mac_header;
685 new->inner_transport_header = old->inner_transport_header; 674 new->inner_transport_header = old->inner_transport_header;
686 new->inner_network_header = old->inner_transport_header; 675 new->inner_network_header = old->inner_network_header;
687 skb_dst_copy(new, old); 676 skb_dst_copy(new, old);
688 new->rxhash = old->rxhash; 677 new->rxhash = old->rxhash;
689 new->ooo_okay = old->ooo_okay; 678 new->ooo_okay = old->ooo_okay;
@@ -1649,7 +1638,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1649 1638
1650static struct page *linear_to_page(struct page *page, unsigned int *len, 1639static struct page *linear_to_page(struct page *page, unsigned int *len,
1651 unsigned int *offset, 1640 unsigned int *offset,
1652 struct sk_buff *skb, struct sock *sk) 1641 struct sock *sk)
1653{ 1642{
1654 struct page_frag *pfrag = sk_page_frag(sk); 1643 struct page_frag *pfrag = sk_page_frag(sk);
1655 1644
@@ -1682,14 +1671,14 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1682static bool spd_fill_page(struct splice_pipe_desc *spd, 1671static bool spd_fill_page(struct splice_pipe_desc *spd,
1683 struct pipe_inode_info *pipe, struct page *page, 1672 struct pipe_inode_info *pipe, struct page *page,
1684 unsigned int *len, unsigned int offset, 1673 unsigned int *len, unsigned int offset,
1685 struct sk_buff *skb, bool linear, 1674 bool linear,
1686 struct sock *sk) 1675 struct sock *sk)
1687{ 1676{
1688 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1677 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1689 return true; 1678 return true;
1690 1679
1691 if (linear) { 1680 if (linear) {
1692 page = linear_to_page(page, len, &offset, skb, sk); 1681 page = linear_to_page(page, len, &offset, sk);
1693 if (!page) 1682 if (!page)
1694 return true; 1683 return true;
1695 } 1684 }
@@ -1706,23 +1695,9 @@ static bool spd_fill_page(struct splice_pipe_desc *spd,
1706 return false; 1695 return false;
1707} 1696}
1708 1697
1709static inline void __segment_seek(struct page **page, unsigned int *poff,
1710 unsigned int *plen, unsigned int off)
1711{
1712 unsigned long n;
1713
1714 *poff += off;
1715 n = *poff / PAGE_SIZE;
1716 if (n)
1717 *page = nth_page(*page, n);
1718
1719 *poff = *poff % PAGE_SIZE;
1720 *plen -= off;
1721}
1722
1723static bool __splice_segment(struct page *page, unsigned int poff, 1698static bool __splice_segment(struct page *page, unsigned int poff,
1724 unsigned int plen, unsigned int *off, 1699 unsigned int plen, unsigned int *off,
1725 unsigned int *len, struct sk_buff *skb, 1700 unsigned int *len,
1726 struct splice_pipe_desc *spd, bool linear, 1701 struct splice_pipe_desc *spd, bool linear,
1727 struct sock *sk, 1702 struct sock *sk,
1728 struct pipe_inode_info *pipe) 1703 struct pipe_inode_info *pipe)
@@ -1737,23 +1712,19 @@ static bool __splice_segment(struct page *page, unsigned int poff,
1737 } 1712 }
1738 1713
1739 /* ignore any bits we already processed */ 1714 /* ignore any bits we already processed */
1740 if (*off) { 1715 poff += *off;
1741 __segment_seek(&page, &poff, &plen, *off); 1716 plen -= *off;
1742 *off = 0; 1717 *off = 0;
1743 }
1744 1718
1745 do { 1719 do {
1746 unsigned int flen = min(*len, plen); 1720 unsigned int flen = min(*len, plen);
1747 1721
1748 /* the linear region may spread across several pages */ 1722 if (spd_fill_page(spd, pipe, page, &flen, poff,
1749 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1723 linear, sk))
1750
1751 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1752 return true; 1724 return true;
1753 1725 poff += flen;
1754 __segment_seek(&page, &poff, &plen, flen); 1726 plen -= flen;
1755 *len -= flen; 1727 *len -= flen;
1756
1757 } while (*len && plen); 1728 } while (*len && plen);
1758 1729
1759 return false; 1730 return false;
@@ -1777,7 +1748,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1777 if (__splice_segment(virt_to_page(skb->data), 1748 if (__splice_segment(virt_to_page(skb->data),
1778 (unsigned long) skb->data & (PAGE_SIZE - 1), 1749 (unsigned long) skb->data & (PAGE_SIZE - 1),
1779 skb_headlen(skb), 1750 skb_headlen(skb),
1780 offset, len, skb, spd, 1751 offset, len, spd,
1781 skb_head_is_locked(skb), 1752 skb_head_is_locked(skb),
1782 sk, pipe)) 1753 sk, pipe))
1783 return true; 1754 return true;
@@ -1790,7 +1761,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1790 1761
1791 if (__splice_segment(skb_frag_page(f), 1762 if (__splice_segment(skb_frag_page(f),
1792 f->page_offset, skb_frag_size(f), 1763 f->page_offset, skb_frag_size(f),
1793 offset, len, skb, spd, false, sk, pipe)) 1764 offset, len, spd, false, sk, pipe))
1794 return true; 1765 return true;
1795 } 1766 }
1796 1767
@@ -2355,6 +2326,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2355{ 2326{
2356 int pos = skb_headlen(skb); 2327 int pos = skb_headlen(skb);
2357 2328
2329 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2358 if (len < pos) /* Split line is inside header. */ 2330 if (len < pos) /* Split line is inside header. */
2359 skb_split_inside_header(skb, skb1, len, pos); 2331 skb_split_inside_header(skb, skb1, len, pos);
2360 else /* Second chunk has no header, nothing to copy. */ 2332 else /* Second chunk has no header, nothing to copy. */
@@ -2686,48 +2658,37 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2686 int len, int odd, struct sk_buff *skb), 2658 int len, int odd, struct sk_buff *skb),
2687 void *from, int length) 2659 void *from, int length)
2688{ 2660{
2689 int frg_cnt = 0; 2661 int frg_cnt = skb_shinfo(skb)->nr_frags;
2690 skb_frag_t *frag = NULL; 2662 int copy;
2691 struct page *page = NULL;
2692 int copy, left;
2693 int offset = 0; 2663 int offset = 0;
2694 int ret; 2664 int ret;
2665 struct page_frag *pfrag = &current->task_frag;
2695 2666
2696 do { 2667 do {
2697 /* Return error if we don't have space for new frag */ 2668 /* Return error if we don't have space for new frag */
2698 frg_cnt = skb_shinfo(skb)->nr_frags;
2699 if (frg_cnt >= MAX_SKB_FRAGS) 2669 if (frg_cnt >= MAX_SKB_FRAGS)
2700 return -EFAULT; 2670 return -EMSGSIZE;
2701 2671
2702 /* allocate a new page for next frag */ 2672 if (!sk_page_frag_refill(sk, pfrag))
2703 page = alloc_pages(sk->sk_allocation, 0);
2704
2705 /* If alloc_page fails just return failure and caller will
2706 * free previous allocated pages by doing kfree_skb()
2707 */
2708 if (page == NULL)
2709 return -ENOMEM; 2673 return -ENOMEM;
2710 2674
2711 /* initialize the next frag */
2712 skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2713 skb->truesize += PAGE_SIZE;
2714 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2715
2716 /* get the new initialized frag */
2717 frg_cnt = skb_shinfo(skb)->nr_frags;
2718 frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2719
2720 /* copy the user data to page */ 2675 /* copy the user data to page */
2721 left = PAGE_SIZE - frag->page_offset; 2676 copy = min_t(int, length, pfrag->size - pfrag->offset);
2722 copy = (length > left)? left : length;
2723 2677
2724 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2678 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2725 offset, copy, 0, skb); 2679 offset, copy, 0, skb);
2726 if (ret < 0) 2680 if (ret < 0)
2727 return -EFAULT; 2681 return -EFAULT;
2728 2682
2729 /* copy was successful so update the size parameters */ 2683 /* copy was successful so update the size parameters */
2730 skb_frag_size_add(frag, copy); 2684 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2685 copy);
2686 frg_cnt++;
2687 pfrag->offset += copy;
2688 get_page(pfrag->page);
2689
2690 skb->truesize += copy;
2691 atomic_add(copy, &sk->sk_wmem_alloc);
2731 skb->len += copy; 2692 skb->len += copy;
2732 skb->data_len += copy; 2693 skb->data_len += copy;
2733 offset += copy; 2694 offset += copy;
@@ -2777,6 +2738,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2777 unsigned int mss = skb_shinfo(skb)->gso_size; 2738 unsigned int mss = skb_shinfo(skb)->gso_size;
2778 unsigned int doffset = skb->data - skb_mac_header(skb); 2739 unsigned int doffset = skb->data - skb_mac_header(skb);
2779 unsigned int offset = doffset; 2740 unsigned int offset = doffset;
2741 unsigned int tnl_hlen = skb_tnl_header_len(skb);
2780 unsigned int headroom; 2742 unsigned int headroom;
2781 unsigned int len; 2743 unsigned int len;
2782 int sg = !!(features & NETIF_F_SG); 2744 int sg = !!(features & NETIF_F_SG);
@@ -2853,7 +2815,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2853 skb_set_network_header(nskb, skb->mac_len); 2815 skb_set_network_header(nskb, skb->mac_len);
2854 nskb->transport_header = (nskb->network_header + 2816 nskb->transport_header = (nskb->network_header +
2855 skb_network_header_len(skb)); 2817 skb_network_header_len(skb));
2856 skb_copy_from_linear_data(skb, nskb->data, doffset); 2818
2819 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
2820 nskb->data - tnl_hlen,
2821 doffset + tnl_hlen);
2857 2822
2858 if (fskb != skb_shinfo(skb)->frag_list) 2823 if (fskb != skb_shinfo(skb)->frag_list)
2859 continue; 2824 continue;
@@ -2871,6 +2836,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2871 skb_copy_from_linear_data_offset(skb, offset, 2836 skb_copy_from_linear_data_offset(skb, offset,
2872 skb_put(nskb, hsize), hsize); 2837 skb_put(nskb, hsize), hsize);
2873 2838
2839 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2840
2874 while (pos < offset + len && i < nfrags) { 2841 while (pos < offset + len && i < nfrags) {
2875 *frag = skb_shinfo(skb)->frags[i]; 2842 *frag = skb_shinfo(skb)->frags[i];
2876 __skb_frag_ref(frag); 2843 __skb_frag_ref(frag);
diff --git a/net/core/sock.c b/net/core/sock.c
index a692ef49c9bb..fe96c5d34299 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -583,7 +583,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
583 goto out; 583 goto out;
584 584
585retry: 585retry:
586 seq = read_seqbegin(&devnet_rename_seq); 586 seq = read_seqcount_begin(&devnet_rename_seq);
587 rcu_read_lock(); 587 rcu_read_lock();
588 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); 588 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
589 ret = -ENODEV; 589 ret = -ENODEV;
@@ -594,7 +594,7 @@ retry:
594 594
595 strcpy(devname, dev->name); 595 strcpy(devname, dev->name);
596 rcu_read_unlock(); 596 rcu_read_unlock();
597 if (read_seqretry(&devnet_rename_seq, seq)) 597 if (read_seqcount_retry(&devnet_rename_seq, seq))
598 goto retry; 598 goto retry;
599 599
600 len = strlen(devname) + 1; 600 len = strlen(devname) + 1;
@@ -665,6 +665,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
665 case SO_REUSEADDR: 665 case SO_REUSEADDR:
666 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 666 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
667 break; 667 break;
668 case SO_REUSEPORT:
669 sk->sk_reuseport = valbool;
670 break;
668 case SO_TYPE: 671 case SO_TYPE:
669 case SO_PROTOCOL: 672 case SO_PROTOCOL:
670 case SO_DOMAIN: 673 case SO_DOMAIN:
@@ -861,6 +864,13 @@ set_rcvbuf:
861 ret = sk_detach_filter(sk); 864 ret = sk_detach_filter(sk);
862 break; 865 break;
863 866
867 case SO_LOCK_FILTER:
868 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
869 ret = -EPERM;
870 else
871 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
872 break;
873
864 case SO_PASSSEC: 874 case SO_PASSSEC:
865 if (valbool) 875 if (valbool)
866 set_bit(SOCK_PASSSEC, &sock->flags); 876 set_bit(SOCK_PASSSEC, &sock->flags);
@@ -965,6 +975,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
965 v.val = sk->sk_reuse; 975 v.val = sk->sk_reuse;
966 break; 976 break;
967 977
978 case SO_REUSEPORT:
979 v.val = sk->sk_reuseport;
980 break;
981
968 case SO_KEEPALIVE: 982 case SO_KEEPALIVE:
969 v.val = sock_flag(sk, SOCK_KEEPOPEN); 983 v.val = sock_flag(sk, SOCK_KEEPOPEN);
970 break; 984 break;
@@ -1140,6 +1154,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1140 1154
1141 goto lenout; 1155 goto lenout;
1142 1156
1157 case SO_LOCK_FILTER:
1158 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1159 break;
1160
1143 default: 1161 default:
1144 return -ENOPROTOOPT; 1162 return -ENOPROTOOPT;
1145 } 1163 }
@@ -2212,7 +2230,7 @@ EXPORT_SYMBOL(sk_reset_timer);
2212 2230
2213void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2231void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2214{ 2232{
2215 if (timer_pending(timer) && del_timer(timer)) 2233 if (del_timer(timer))
2216 __sock_put(sk); 2234 __sock_put(sk);
2217} 2235}
2218EXPORT_SYMBOL(sk_stop_timer); 2236EXPORT_SYMBOL(sk_stop_timer);
@@ -2818,7 +2836,7 @@ static const struct file_operations proto_seq_fops = {
2818 2836
2819static __net_init int proto_init_net(struct net *net) 2837static __net_init int proto_init_net(struct net *net)
2820{ 2838{
2821 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2839 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2822 return -ENOMEM; 2840 return -ENOMEM;
2823 2841
2824 return 0; 2842 return 0;
@@ -2826,7 +2844,7 @@ static __net_init int proto_init_net(struct net *net)
2826 2844
2827static __net_exit void proto_exit_net(struct net *net) 2845static __net_exit void proto_exit_net(struct net *net)
2828{ 2846{
2829 proc_net_remove(net, "protocols"); 2847 remove_proc_entry("protocols", net->proc_net);
2830} 2848}
2831 2849
2832 2850
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index d1b08045a9df..cfdb46ab3a7f 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -20,6 +20,8 @@
20#include <net/sock.h> 20#include <net/sock.h>
21#include <net/net_ratelimit.h> 21#include <net/net_ratelimit.h>
22 22
23static int one = 1;
24
23#ifdef CONFIG_RPS 25#ifdef CONFIG_RPS
24static int rps_sock_flow_sysctl(ctl_table *table, int write, 26static int rps_sock_flow_sysctl(ctl_table *table, int write,
25 void __user *buffer, size_t *lenp, loff_t *ppos) 27 void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -92,28 +94,32 @@ static struct ctl_table net_core_table[] = {
92 .data = &sysctl_wmem_max, 94 .data = &sysctl_wmem_max,
93 .maxlen = sizeof(int), 95 .maxlen = sizeof(int),
94 .mode = 0644, 96 .mode = 0644,
95 .proc_handler = proc_dointvec 97 .proc_handler = proc_dointvec_minmax,
98 .extra1 = &one,
96 }, 99 },
97 { 100 {
98 .procname = "rmem_max", 101 .procname = "rmem_max",
99 .data = &sysctl_rmem_max, 102 .data = &sysctl_rmem_max,
100 .maxlen = sizeof(int), 103 .maxlen = sizeof(int),
101 .mode = 0644, 104 .mode = 0644,
102 .proc_handler = proc_dointvec 105 .proc_handler = proc_dointvec_minmax,
106 .extra1 = &one,
103 }, 107 },
104 { 108 {
105 .procname = "wmem_default", 109 .procname = "wmem_default",
106 .data = &sysctl_wmem_default, 110 .data = &sysctl_wmem_default,
107 .maxlen = sizeof(int), 111 .maxlen = sizeof(int),
108 .mode = 0644, 112 .mode = 0644,
109 .proc_handler = proc_dointvec 113 .proc_handler = proc_dointvec_minmax,
114 .extra1 = &one,
110 }, 115 },
111 { 116 {
112 .procname = "rmem_default", 117 .procname = "rmem_default",
113 .data = &sysctl_rmem_default, 118 .data = &sysctl_rmem_default,
114 .maxlen = sizeof(int), 119 .maxlen = sizeof(int),
115 .mode = 0644, 120 .mode = 0644,
116 .proc_handler = proc_dointvec 121 .proc_handler = proc_dointvec_minmax,
122 .extra1 = &one,
117 }, 123 },
118 { 124 {
119 .procname = "dev_weight", 125 .procname = "dev_weight",
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index b75968a04017..8c0ef71bed2f 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -1,6 +1,6 @@
1menuconfig IP_DCCP 1menuconfig IP_DCCP
2 tristate "The DCCP Protocol (EXPERIMENTAL)" 2 tristate "The DCCP Protocol"
3 depends on INET && EXPERIMENTAL 3 depends on INET
4 ---help--- 4 ---help---
5 Datagram Congestion Control Protocol (RFC 4340) 5 Datagram Congestion Control Protocol (RFC 4340)
6 6
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 0581143cb800..8ba3fc9d6d16 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -1,5 +1,4 @@
1menu "DCCP CCIDs Configuration (EXPERIMENTAL)" 1menu "DCCP CCIDs Configuration"
2 depends on EXPERIMENTAL
3 2
4config IP_DCCP_CCID2_DEBUG 3config IP_DCCP_CCID2_DEBUG
5 bool "CCID-2 debugging messages" 4 bool "CCID-2 debugging messages"
@@ -12,7 +11,7 @@ config IP_DCCP_CCID2_DEBUG
12 If in doubt, say N. 11 If in doubt, say N.
13 12
14config IP_DCCP_CCID3 13config IP_DCCP_CCID3
15 bool "CCID-3 (TCP-Friendly) (EXPERIMENTAL)" 14 bool "CCID-3 (TCP-Friendly)"
16 def_bool y if (IP_DCCP = y || IP_DCCP = m) 15 def_bool y if (IP_DCCP = y || IP_DCCP = m)
17 ---help--- 16 ---help---
18 CCID-3 denotes TCP-Friendly Rate Control (TFRC), an equation-based 17 CCID-3 denotes TCP-Friendly Rate Control (TFRC), an equation-based
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 176ecdba4a22..4f9f5eb478f1 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -439,8 +439,8 @@ exit:
439 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 439 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
440 return NULL; 440 return NULL;
441put_and_exit: 441put_and_exit:
442 bh_unlock_sock(newsk); 442 inet_csk_prepare_forced_close(newsk);
443 sock_put(newsk); 443 dccp_done(newsk);
444 goto exit; 444 goto exit;
445} 445}
446 446
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 56840b249f3b..6e05981f271e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -585,7 +585,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
585 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 585 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
586 586
587 if (__inet_inherit_port(sk, newsk) < 0) { 587 if (__inet_inherit_port(sk, newsk) < 0) {
588 sock_put(newsk); 588 inet_csk_prepare_forced_close(newsk);
589 dccp_done(newsk);
589 goto out; 590 goto out;
590 } 591 }
591 __inet6_hash(newsk, NULL); 592 __inet6_hash(newsk, NULL);
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 0a8d6ebd9b45..4c6bdf97a657 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -171,7 +171,7 @@ static __init int dccpprobe_init(void)
171 spin_lock_init(&dccpw.lock); 171 spin_lock_init(&dccpw.lock);
172 if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL)) 172 if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
173 return ret; 173 return ret;
174 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 174 if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
175 goto err0; 175 goto err0;
176 176
177 ret = setup_jprobe(); 177 ret = setup_jprobe();
@@ -181,7 +181,7 @@ static __init int dccpprobe_init(void)
181 pr_info("DCCP watch registered (port=%d)\n", port); 181 pr_info("DCCP watch registered (port=%d)\n", port);
182 return 0; 182 return 0;
183err1: 183err1:
184 proc_net_remove(&init_net, procname); 184 remove_proc_entry(procname, init_net.proc_net);
185err0: 185err0:
186 kfifo_free(&dccpw.fifo); 186 kfifo_free(&dccpw.fifo);
187 return ret; 187 return ret;
@@ -191,7 +191,7 @@ module_init(dccpprobe_init);
191static __exit void dccpprobe_exit(void) 191static __exit void dccpprobe_exit(void)
192{ 192{
193 kfifo_free(&dccpw.fifo); 193 kfifo_free(&dccpw.fifo);
194 proc_net_remove(&init_net, procname); 194 remove_proc_entry(procname, init_net.proc_net);
195 unregister_jprobe(&dccp_send_probe); 195 unregister_jprobe(&dccp_send_probe);
196 196
197} 197}
diff --git a/net/decnet/Kconfig b/net/decnet/Kconfig
index 7914fd619c5c..f3393e154f0f 100644
--- a/net/decnet/Kconfig
+++ b/net/decnet/Kconfig
@@ -25,8 +25,8 @@ config DECNET
25 The module is called decnet. 25 The module is called decnet.
26 26
27config DECNET_ROUTER 27config DECNET_ROUTER
28 bool "DECnet: router support (EXPERIMENTAL)" 28 bool "DECnet: router support"
29 depends on DECNET && EXPERIMENTAL 29 depends on DECNET
30 select FIB_RULES 30 select FIB_RULES
31 ---help--- 31 ---help---
32 Add support for turning your DECnet Endnode into a level 1 or 2 32 Add support for turning your DECnet Endnode into a level 1 or 2
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 307c322d53bb..c4a2def5b7bd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -909,6 +909,7 @@ static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen,
909 struct dn_scp *scp = DN_SK(sk); 909 struct dn_scp *scp = DN_SK(sk);
910 int err = -EISCONN; 910 int err = -EISCONN;
911 struct flowidn fld; 911 struct flowidn fld;
912 struct dst_entry *dst;
912 913
913 if (sock->state == SS_CONNECTED) 914 if (sock->state == SS_CONNECTED)
914 goto out; 915 goto out;
@@ -955,10 +956,11 @@ static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen,
955 fld.flowidn_proto = DNPROTO_NSP; 956 fld.flowidn_proto = DNPROTO_NSP;
956 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0) 957 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0)
957 goto out; 958 goto out;
958 sk->sk_route_caps = sk->sk_dst_cache->dev->features; 959 dst = __sk_dst_get(sk);
960 sk->sk_route_caps = dst->dev->features;
959 sock->state = SS_CONNECTING; 961 sock->state = SS_CONNECTING;
960 scp->state = DN_CI; 962 scp->state = DN_CI;
961 scp->segsize_loc = dst_metric_advmss(sk->sk_dst_cache); 963 scp->segsize_loc = dst_metric_advmss(dst);
962 964
963 dn_nsp_send_conninit(sk, NSP_CI); 965 dn_nsp_send_conninit(sk, NSP_CI);
964 err = -EINPROGRESS; 966 err = -EINPROGRESS;
@@ -2382,7 +2384,7 @@ static int __init decnet_init(void)
2382 dev_add_pack(&dn_dix_packet_type); 2384 dev_add_pack(&dn_dix_packet_type);
2383 register_netdevice_notifier(&dn_dev_notifier); 2385 register_netdevice_notifier(&dn_dev_notifier);
2384 2386
2385 proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops); 2387 proc_create("decnet", S_IRUGO, init_net.proc_net, &dn_socket_seq_fops);
2386 dn_register_sysctl(); 2388 dn_register_sysctl();
2387out: 2389out:
2388 return rc; 2390 return rc;
@@ -2411,7 +2413,7 @@ static void __exit decnet_exit(void)
2411 dn_neigh_cleanup(); 2413 dn_neigh_cleanup();
2412 dn_fib_cleanup(); 2414 dn_fib_cleanup();
2413 2415
2414 proc_net_remove(&init_net, "decnet"); 2416 remove_proc_entry("decnet", init_net.proc_net);
2415 2417
2416 proto_unregister(&dn_proto); 2418 proto_unregister(&dn_proto);
2417 2419
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index e47ba9fc4a0e..c8da116d84a4 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1412,7 +1412,7 @@ void __init dn_dev_init(void)
1412 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL); 1412 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL);
1413 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL); 1413 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL);
1414 1414
1415 proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops); 1415 proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops);
1416 1416
1417#ifdef CONFIG_SYSCTL 1417#ifdef CONFIG_SYSCTL
1418 { 1418 {
@@ -1433,7 +1433,7 @@ void __exit dn_dev_cleanup(void)
1433 } 1433 }
1434#endif /* CONFIG_SYSCTL */ 1434#endif /* CONFIG_SYSCTL */
1435 1435
1436 proc_net_remove(&init_net, "decnet_dev"); 1436 remove_proc_entry("decnet_dev", init_net.proc_net);
1437 1437
1438 dn_dev_devices_off(); 1438 dn_dev_devices_off();
1439} 1439}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 3aede1b459fd..f8637f93d318 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -95,7 +95,7 @@ static u32 dn_neigh_hash(const void *pkey,
95 95
96struct neigh_table dn_neigh_table = { 96struct neigh_table dn_neigh_table = {
97 .family = PF_DECnet, 97 .family = PF_DECnet,
98 .entry_size = sizeof(struct dn_neigh), 98 .entry_size = NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)),
99 .key_len = sizeof(__le16), 99 .key_len = sizeof(__le16),
100 .hash = dn_neigh_hash, 100 .hash = dn_neigh_hash,
101 .constructor = dn_neigh_construct, 101 .constructor = dn_neigh_construct,
@@ -590,11 +590,12 @@ static const struct file_operations dn_neigh_seq_fops = {
590void __init dn_neigh_init(void) 590void __init dn_neigh_init(void)
591{ 591{
592 neigh_table_init(&dn_neigh_table); 592 neigh_table_init(&dn_neigh_table);
593 proc_net_fops_create(&init_net, "decnet_neigh", S_IRUGO, &dn_neigh_seq_fops); 593 proc_create("decnet_neigh", S_IRUGO, init_net.proc_net,
594 &dn_neigh_seq_fops);
594} 595}
595 596
596void __exit dn_neigh_cleanup(void) 597void __exit dn_neigh_cleanup(void)
597{ 598{
598 proc_net_remove(&init_net, "decnet_neigh"); 599 remove_proc_entry("decnet_neigh", init_net.proc_net);
599 neigh_table_clear(&dn_neigh_table); 600 neigh_table_clear(&dn_neigh_table);
600} 601}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 8a96047c7c94..1aaa51ebbda6 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -598,7 +598,7 @@ void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
598 if (reason == 0) 598 if (reason == 0)
599 reason = le16_to_cpu(scp->discdata_out.opt_status); 599 reason = le16_to_cpu(scp->discdata_out.opt_status);
600 600
601 dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->sk_dst_cache, ddl, 601 dn_nsp_do_disc(sk, msgflg, reason, gfp, __sk_dst_get(sk), ddl,
602 scp->discdata_out.opt_data, scp->addrrem, scp->addrloc); 602 scp->discdata_out.opt_data, scp->addrrem, scp->addrloc);
603} 603}
604 604
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index b57419cc41a4..5ac0e153ef83 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1282,7 +1282,7 @@ static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int
1282 return err; 1282 return err;
1283} 1283}
1284 1284
1285int dn_route_output_sock(struct dst_entry **pprt, struct flowidn *fl, struct sock *sk, int flags) 1285int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, struct sock *sk, int flags)
1286{ 1286{
1287 int err; 1287 int err;
1288 1288
@@ -1901,7 +1901,8 @@ void __init dn_route_init(void)
1901 1901
1902 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1902 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1903 1903
1904 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1904 proc_create("decnet_cache", S_IRUGO, init_net.proc_net,
1905 &dn_rt_cache_seq_fops);
1905 1906
1906#ifdef CONFIG_DECNET_ROUTER 1907#ifdef CONFIG_DECNET_ROUTER
1907 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, 1908 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
@@ -1917,7 +1918,7 @@ void __exit dn_route_cleanup(void)
1917 del_timer(&dn_route_timer); 1918 del_timer(&dn_route_timer);
1918 dn_run_flush(0); 1919 dn_run_flush(0);
1919 1920
1920 proc_net_remove(&init_net, "decnet_cache"); 1921 remove_proc_entry("decnet_cache", init_net.proc_net);
1921 dst_entries_destroy(&dn_dst_ops); 1922 dst_entries_destroy(&dn_dst_ops);
1922} 1923}
1923 1924
diff --git a/net/decnet/netfilter/Kconfig b/net/decnet/netfilter/Kconfig
index 2f81de5e752f..8d7c109d5109 100644
--- a/net/decnet/netfilter/Kconfig
+++ b/net/decnet/netfilter/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menu "DECnet: Netfilter Configuration" 5menu "DECnet: Netfilter Configuration"
6 depends on DECNET && NETFILTER && EXPERIMENTAL 6 depends on DECNET && NETFILTER
7 depends on NETFILTER_ADVANCED 7 depends on NETFILTER_ADVANCED
8 8
9config DECNET_NF_GRABULATOR 9config DECNET_NF_GRABULATOR
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8aa4b1115384..0a69d0757795 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -259,20 +259,16 @@ static int __init init_dns_resolver(void)
259 if (!cred) 259 if (!cred)
260 return -ENOMEM; 260 return -ENOMEM;
261 261
262 keyring = key_alloc(&key_type_keyring, ".dns_resolver", 262 keyring = keyring_alloc(".dns_resolver",
263 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, 263 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
264 (KEY_POS_ALL & ~KEY_POS_SETATTR) | 264 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
265 KEY_USR_VIEW | KEY_USR_READ, 265 KEY_USR_VIEW | KEY_USR_READ,
266 KEY_ALLOC_NOT_IN_QUOTA); 266 KEY_ALLOC_NOT_IN_QUOTA, NULL);
267 if (IS_ERR(keyring)) { 267 if (IS_ERR(keyring)) {
268 ret = PTR_ERR(keyring); 268 ret = PTR_ERR(keyring);
269 goto failed_put_cred; 269 goto failed_put_cred;
270 } 270 }
271 271
272 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
273 if (ret < 0)
274 goto failed_put_key;
275
276 ret = register_key_type(&key_type_dns_resolver); 272 ret = register_key_type(&key_type_dns_resolver);
277 if (ret < 0) 273 if (ret < 0)
278 goto failed_put_key; 274 goto failed_put_key;
@@ -304,3 +300,4 @@ static void __exit exit_dns_resolver(void)
304module_init(init_dns_resolver) 300module_init(init_dns_resolver)
305module_exit(exit_dns_resolver) 301module_exit(exit_dns_resolver)
306MODULE_LICENSE("GPL"); 302MODULE_LICENSE("GPL");
303
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 45295ca09571..2bc62ea857c8 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -80,6 +80,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
80 int ret; 80 int ret;
81 char *name; 81 char *name;
82 int i; 82 int i;
83 bool valid_name_found = false;
83 84
84 /* 85 /*
85 * Probe for switch model. 86 * Probe for switch model.
@@ -131,8 +132,13 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
131 } else { 132 } else {
132 ds->phys_port_mask |= 1 << i; 133 ds->phys_port_mask |= 1 << i;
133 } 134 }
135 valid_name_found = true;
134 } 136 }
135 137
138 if (!valid_name_found && i == DSA_MAX_PORTS) {
139 ret = -EINVAL;
140 goto out;
141 }
136 142
137 /* 143 /*
138 * If the CPU connects to this switch, set the switch tree 144 * If the CPU connects to this switch, set the switch tree
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index e32083d5d8f8..6ebd8fbd9285 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -41,8 +41,8 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
41 ds->slave_mii_bus->name = "dsa slave smi"; 41 ds->slave_mii_bus->name = "dsa slave smi";
42 ds->slave_mii_bus->read = dsa_slave_phy_read; 42 ds->slave_mii_bus->read = dsa_slave_phy_read;
43 ds->slave_mii_bus->write = dsa_slave_phy_write; 43 ds->slave_mii_bus->write = dsa_slave_phy_write;
44 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s:%.2x", 44 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
45 ds->master_mii_bus->id, ds->pd->sw_addr); 45 ds->index, ds->pd->sw_addr);
46 ds->slave_mii_bus->parent = &ds->master_mii_bus->dev; 46 ds->slave_mii_bus->parent = &ds->master_mii_bus->dev;
47} 47}
48 48
@@ -203,10 +203,10 @@ dsa_slave_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
203static void dsa_slave_get_drvinfo(struct net_device *dev, 203static void dsa_slave_get_drvinfo(struct net_device *dev,
204 struct ethtool_drvinfo *drvinfo) 204 struct ethtool_drvinfo *drvinfo)
205{ 205{
206 strncpy(drvinfo->driver, "dsa", 32); 206 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
207 strncpy(drvinfo->version, dsa_driver_version, 32); 207 strlcpy(drvinfo->version, dsa_driver_version, sizeof(drvinfo->version));
208 strncpy(drvinfo->fw_version, "N/A", 32); 208 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
209 strncpy(drvinfo->bus_info, "platform", 32); 209 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
210} 210}
211 211
212static int dsa_slave_nway_reset(struct net_device *dev) 212static int dsa_slave_nway_reset(struct net_device *dev)
@@ -391,7 +391,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
391 391
392 if (p->phy != NULL) { 392 if (p->phy != NULL) {
393 phy_attach(slave_dev, dev_name(&p->phy->dev), 393 phy_attach(slave_dev, dev_name(&p->phy->dev),
394 0, PHY_INTERFACE_MODE_GMII); 394 PHY_INTERFACE_MODE_GMII);
395 395
396 p->phy->autoneg = AUTONEG_ENABLE; 396 p->phy->autoneg = AUTONEG_ENABLE;
397 p->phy->speed = 0; 397 p->phy->speed = 0;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 4efad533e5f6..a36c85eab5b4 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -272,6 +272,36 @@ void eth_header_cache_update(struct hh_cache *hh,
272EXPORT_SYMBOL(eth_header_cache_update); 272EXPORT_SYMBOL(eth_header_cache_update);
273 273
274/** 274/**
275 * eth_prepare_mac_addr_change - prepare for mac change
276 * @dev: network device
277 * @p: socket address
278 */
279int eth_prepare_mac_addr_change(struct net_device *dev, void *p)
280{
281 struct sockaddr *addr = p;
282
283 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
284 return -EBUSY;
285 if (!is_valid_ether_addr(addr->sa_data))
286 return -EADDRNOTAVAIL;
287 return 0;
288}
289EXPORT_SYMBOL(eth_prepare_mac_addr_change);
290
291/**
292 * eth_commit_mac_addr_change - commit mac change
293 * @dev: network device
294 * @p: socket address
295 */
296void eth_commit_mac_addr_change(struct net_device *dev, void *p)
297{
298 struct sockaddr *addr = p;
299
300 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
301}
302EXPORT_SYMBOL(eth_commit_mac_addr_change);
303
304/**
275 * eth_mac_addr - set new Ethernet hardware address 305 * eth_mac_addr - set new Ethernet hardware address
276 * @dev: network device 306 * @dev: network device
277 * @p: socket address 307 * @p: socket address
@@ -283,15 +313,12 @@ EXPORT_SYMBOL(eth_header_cache_update);
283 */ 313 */
284int eth_mac_addr(struct net_device *dev, void *p) 314int eth_mac_addr(struct net_device *dev, void *p)
285{ 315{
286 struct sockaddr *addr = p; 316 int ret;
287 317
288 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) 318 ret = eth_prepare_mac_addr_change(dev, p);
289 return -EBUSY; 319 if (ret < 0)
290 if (!is_valid_ether_addr(addr->sa_data)) 320 return ret;
291 return -EADDRNOTAVAIL; 321 eth_commit_mac_addr_change(dev, p);
292 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
293 /* if device marked as NET_ADDR_RANDOM, reset it */
294 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
295 return 0; 322 return 0;
296} 323}
297EXPORT_SYMBOL(eth_mac_addr); 324EXPORT_SYMBOL(eth_mac_addr);
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index f651da60f161..43b95ca61114 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -377,17 +377,14 @@ static int lowpan_header_create(struct sk_buff *skb,
377 struct ipv6hdr *hdr; 377 struct ipv6hdr *hdr;
378 const u8 *saddr = _saddr; 378 const u8 *saddr = _saddr;
379 const u8 *daddr = _daddr; 379 const u8 *daddr = _daddr;
380 u8 *head; 380 u8 head[100];
381 struct ieee802154_addr sa, da; 381 struct ieee802154_addr sa, da;
382 382
383 /* TODO:
384 * if this package isn't ipv6 one, where should it be routed?
385 */
383 if (type != ETH_P_IPV6) 386 if (type != ETH_P_IPV6)
384 return 0; 387 return 0;
385 /* TODO:
386 * if this package isn't ipv6 one, where should it be routed?
387 */
388 head = kzalloc(100, GFP_KERNEL);
389 if (head == NULL)
390 return -ENOMEM;
391 388
392 hdr = ipv6_hdr(skb); 389 hdr = ipv6_hdr(skb);
393 hc06_ptr = head + 2; 390 hc06_ptr = head + 2;
@@ -561,8 +558,6 @@ static int lowpan_header_create(struct sk_buff *skb,
561 skb_pull(skb, sizeof(struct ipv6hdr)); 558 skb_pull(skb, sizeof(struct ipv6hdr));
562 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head); 559 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
563 560
564 kfree(head);
565
566 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, 561 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
567 skb->len); 562 skb->len);
568 563
@@ -594,10 +589,32 @@ static int lowpan_header_create(struct sk_buff *skb,
594 } 589 }
595} 590}
596 591
592static int lowpan_give_skb_to_devices(struct sk_buff *skb)
593{
594 struct lowpan_dev_record *entry;
595 struct sk_buff *skb_cp;
596 int stat = NET_RX_SUCCESS;
597
598 rcu_read_lock();
599 list_for_each_entry_rcu(entry, &lowpan_devices, list)
600 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
601 skb_cp = skb_copy(skb, GFP_ATOMIC);
602 if (!skb_cp) {
603 stat = -ENOMEM;
604 break;
605 }
606
607 skb_cp->dev = entry->ldev;
608 stat = netif_rx(skb_cp);
609 }
610 rcu_read_unlock();
611
612 return stat;
613}
614
597static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr) 615static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
598{ 616{
599 struct sk_buff *new; 617 struct sk_buff *new;
600 struct lowpan_dev_record *entry;
601 int stat = NET_RX_SUCCESS; 618 int stat = NET_RX_SUCCESS;
602 619
603 new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb), 620 new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
@@ -614,19 +631,7 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
614 new->protocol = htons(ETH_P_IPV6); 631 new->protocol = htons(ETH_P_IPV6);
615 new->pkt_type = PACKET_HOST; 632 new->pkt_type = PACKET_HOST;
616 633
617 rcu_read_lock(); 634 stat = lowpan_give_skb_to_devices(new);
618 list_for_each_entry_rcu(entry, &lowpan_devices, list)
619 if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) {
620 skb = skb_copy(new, GFP_ATOMIC);
621 if (!skb) {
622 stat = -ENOMEM;
623 break;
624 }
625
626 skb->dev = entry->ldev;
627 stat = netif_rx(skb);
628 }
629 rcu_read_unlock();
630 635
631 kfree_skb(new); 636 kfree_skb(new);
632 637
@@ -1137,19 +1142,42 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
1137 goto drop; 1142 goto drop;
1138 1143
1139 /* check that it's our buffer */ 1144 /* check that it's our buffer */
1140 switch (skb->data[0] & 0xe0) { 1145 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
1141 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ 1146 /* Copy the packet so that the IPv6 header is
1142 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ 1147 * properly aligned.
1143 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ 1148 */
1144 local_skb = skb_clone(skb, GFP_ATOMIC); 1149 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
1150 skb_tailroom(skb), GFP_ATOMIC);
1145 if (!local_skb) 1151 if (!local_skb)
1146 goto drop; 1152 goto drop;
1147 lowpan_process_data(local_skb);
1148 1153
1154 local_skb->protocol = htons(ETH_P_IPV6);
1155 local_skb->pkt_type = PACKET_HOST;
1156
1157 /* Pull off the 1-byte of 6lowpan header. */
1158 skb_pull(local_skb, 1);
1159 skb_reset_network_header(local_skb);
1160 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
1161
1162 lowpan_give_skb_to_devices(local_skb);
1163
1164 kfree_skb(local_skb);
1149 kfree_skb(skb); 1165 kfree_skb(skb);
1150 break; 1166 } else {
1151 default: 1167 switch (skb->data[0] & 0xe0) {
1152 break; 1168 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
1169 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
1170 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
1171 local_skb = skb_clone(skb, GFP_ATOMIC);
1172 if (!local_skb)
1173 goto drop;
1174 lowpan_process_data(local_skb);
1175
1176 kfree_skb(skb);
1177 break;
1178 default:
1179 break;
1180 }
1153 } 1181 }
1154 1182
1155 return NET_RX_SUCCESS; 1183 return NET_RX_SUCCESS;
@@ -1234,7 +1262,7 @@ static inline int __init lowpan_netlink_init(void)
1234 return rtnl_link_register(&lowpan_link_ops); 1262 return rtnl_link_register(&lowpan_link_ops);
1235} 1263}
1236 1264
1237static inline void __init lowpan_netlink_fini(void) 1265static inline void lowpan_netlink_fini(void)
1238{ 1266{
1239 rtnl_link_unregister(&lowpan_link_ops); 1267 rtnl_link_unregister(&lowpan_link_ops);
1240} 1268}
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 7dee65052925..b2e06df0076c 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -1,6 +1,5 @@
1config IEEE802154 1config IEEE802154
2 tristate "IEEE Std 802.15.4 Low-Rate Wireless Personal Area Networks support (EXPERIMENTAL)" 2 tristate "IEEE Std 802.15.4 Low-Rate Wireless Personal Area Networks support"
3 depends on EXPERIMENTAL
4 ---help--- 3 ---help---
5 IEEE Std 802.15.4 defines a low data rate, low power and low 4 IEEE Std 802.15.4 defines a low data rate, low power and low
6 complexity short range wireless personal area networks. It was 5 complexity short range wireless personal area networks. It was
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 1627ef2e8522..13571eae6bae 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -91,7 +91,7 @@ static struct class wpan_phy_class = {
91static DEFINE_MUTEX(wpan_phy_mutex); 91static DEFINE_MUTEX(wpan_phy_mutex);
92static int wpan_phy_idx; 92static int wpan_phy_idx;
93 93
94static int wpan_phy_match(struct device *dev, void *data) 94static int wpan_phy_match(struct device *dev, const void *data)
95{ 95{
96 return !strcmp(dev_name(dev), (const char *)data); 96 return !strcmp(dev_name(dev), (const char *)data);
97} 97}
@@ -103,8 +103,7 @@ struct wpan_phy *wpan_phy_find(const char *str)
103 if (WARN_ON(!str)) 103 if (WARN_ON(!str))
104 return NULL; 104 return NULL;
105 105
106 dev = class_find_device(&wpan_phy_class, NULL, 106 dev = class_find_device(&wpan_phy_class, NULL, str, wpan_phy_match);
107 (void *)str, wpan_phy_match);
108 if (!dev) 107 if (!dev)
109 return NULL; 108 return NULL;
110 109
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 5a19aeb86094..7944df768454 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -488,7 +488,6 @@ config TCP_CONG_HTCP
488 488
489config TCP_CONG_HSTCP 489config TCP_CONG_HSTCP
490 tristate "High Speed TCP" 490 tristate "High Speed TCP"
491 depends on EXPERIMENTAL
492 default n 491 default n
493 ---help--- 492 ---help---
494 Sally Floyd's High Speed TCP (RFC 3649) congestion control. 493 Sally Floyd's High Speed TCP (RFC 3649) congestion control.
@@ -499,7 +498,6 @@ config TCP_CONG_HSTCP
499 498
500config TCP_CONG_HYBLA 499config TCP_CONG_HYBLA
501 tristate "TCP-Hybla congestion control algorithm" 500 tristate "TCP-Hybla congestion control algorithm"
502 depends on EXPERIMENTAL
503 default n 501 default n
504 ---help--- 502 ---help---
505 TCP-Hybla is a sender-side only change that eliminates penalization of 503 TCP-Hybla is a sender-side only change that eliminates penalization of
@@ -509,7 +507,6 @@ config TCP_CONG_HYBLA
509 507
510config TCP_CONG_VEGAS 508config TCP_CONG_VEGAS
511 tristate "TCP Vegas" 509 tristate "TCP Vegas"
512 depends on EXPERIMENTAL
513 default n 510 default n
514 ---help--- 511 ---help---
515 TCP Vegas is a sender-side only change to TCP that anticipates 512 TCP Vegas is a sender-side only change to TCP that anticipates
@@ -520,7 +517,6 @@ config TCP_CONG_VEGAS
520 517
521config TCP_CONG_SCALABLE 518config TCP_CONG_SCALABLE
522 tristate "Scalable TCP" 519 tristate "Scalable TCP"
523 depends on EXPERIMENTAL
524 default n 520 default n
525 ---help--- 521 ---help---
526 Scalable TCP is a sender-side only change to TCP which uses a 522 Scalable TCP is a sender-side only change to TCP which uses a
@@ -530,7 +526,6 @@ config TCP_CONG_SCALABLE
530 526
531config TCP_CONG_LP 527config TCP_CONG_LP
532 tristate "TCP Low Priority" 528 tristate "TCP Low Priority"
533 depends on EXPERIMENTAL
534 default n 529 default n
535 ---help--- 530 ---help---
536 TCP Low Priority (TCP-LP), a distributed algorithm whose goal is 531 TCP Low Priority (TCP-LP), a distributed algorithm whose goal is
@@ -540,7 +535,6 @@ config TCP_CONG_LP
540 535
541config TCP_CONG_VENO 536config TCP_CONG_VENO
542 tristate "TCP Veno" 537 tristate "TCP Veno"
543 depends on EXPERIMENTAL
544 default n 538 default n
545 ---help--- 539 ---help---
546 TCP Veno is a sender-side only enhancement of TCP to obtain better 540 TCP Veno is a sender-side only enhancement of TCP to obtain better
@@ -552,7 +546,6 @@ config TCP_CONG_VENO
552 546
553config TCP_CONG_YEAH 547config TCP_CONG_YEAH
554 tristate "YeAH TCP" 548 tristate "YeAH TCP"
555 depends on EXPERIMENTAL
556 select TCP_CONG_VEGAS 549 select TCP_CONG_VEGAS
557 default n 550 default n
558 ---help--- 551 ---help---
@@ -567,7 +560,6 @@ config TCP_CONG_YEAH
567 560
568config TCP_CONG_ILLINOIS 561config TCP_CONG_ILLINOIS
569 tristate "TCP Illinois" 562 tristate "TCP Illinois"
570 depends on EXPERIMENTAL
571 default n 563 default n
572 ---help--- 564 ---help---
573 TCP-Illinois is a sender-side modification of TCP Reno for 565 TCP-Illinois is a sender-side modification of TCP Reno for
@@ -631,8 +623,7 @@ config DEFAULT_TCP_CONG
631 default "cubic" 623 default "cubic"
632 624
633config TCP_MD5SIG 625config TCP_MD5SIG
634 bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)" 626 bool "TCP: MD5 Signature Option support (RFC2385)"
635 depends on EXPERIMENTAL
636 select CRYPTO 627 select CRYPTO
637 select CRYPTO_MD5 628 select CRYPTO_MD5
638 ---help--- 629 ---help---
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 24b384b7903e..e225a4e5b572 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -263,21 +263,6 @@ void build_ehash_secret(void)
263} 263}
264EXPORT_SYMBOL(build_ehash_secret); 264EXPORT_SYMBOL(build_ehash_secret);
265 265
266static inline int inet_netns_ok(struct net *net, __u8 protocol)
267{
268 const struct net_protocol *ipprot;
269
270 if (net_eq(net, &init_net))
271 return 1;
272
273 ipprot = rcu_dereference(inet_protos[protocol]);
274 if (ipprot == NULL) {
275 /* raw IP is OK */
276 return 1;
277 }
278 return ipprot->netns_ok;
279}
280
281/* 266/*
282 * Create an inet socket. 267 * Create an inet socket.
283 */ 268 */
@@ -350,10 +335,6 @@ lookup_protocol:
350 !ns_capable(net->user_ns, CAP_NET_RAW)) 335 !ns_capable(net->user_ns, CAP_NET_RAW))
351 goto out_rcu_unlock; 336 goto out_rcu_unlock;
352 337
353 err = -EAFNOSUPPORT;
354 if (!inet_netns_ok(net, protocol))
355 goto out_rcu_unlock;
356
357 sock->ops = answer->ops; 338 sock->ops = answer->ops;
358 answer_prot = answer->prot; 339 answer_prot = answer->prot;
359 answer_no_check = answer->no_check; 340 answer_no_check = answer->no_check;
@@ -1306,6 +1287,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1306 SKB_GSO_UDP | 1287 SKB_GSO_UDP |
1307 SKB_GSO_DODGY | 1288 SKB_GSO_DODGY |
1308 SKB_GSO_TCP_ECN | 1289 SKB_GSO_TCP_ECN |
1290 SKB_GSO_GRE |
1309 0))) 1291 0)))
1310 goto out; 1292 goto out;
1311 1293
@@ -1333,7 +1315,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1333 segs = ops->callbacks.gso_segment(skb, features); 1315 segs = ops->callbacks.gso_segment(skb, features);
1334 rcu_read_unlock(); 1316 rcu_read_unlock();
1335 1317
1336 if (!segs || IS_ERR(segs)) 1318 if (IS_ERR_OR_NULL(segs))
1337 goto out; 1319 goto out;
1338 1320
1339 skb = segs; 1321 skb = segs;
@@ -1705,12 +1687,11 @@ static struct packet_type ip_packet_type __read_mostly = {
1705 1687
1706static int __init inet_init(void) 1688static int __init inet_init(void)
1707{ 1689{
1708 struct sk_buff *dummy_skb;
1709 struct inet_protosw *q; 1690 struct inet_protosw *q;
1710 struct list_head *r; 1691 struct list_head *r;
1711 int rc = -EINVAL; 1692 int rc = -EINVAL;
1712 1693
1713 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb)); 1694 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
1714 1695
1715 sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); 1696 sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
1716 if (!sysctl_local_reserved_ports) 1697 if (!sysctl_local_reserved_ports)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index a0d8392491c3..2e7f1948216f 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -269,7 +269,11 @@ static void ah_input_done(struct crypto_async_request *base, int err)
269 skb->network_header += ah_hlen; 269 skb->network_header += ah_hlen;
270 memcpy(skb_network_header(skb), work_iph, ihl); 270 memcpy(skb_network_header(skb), work_iph, ihl);
271 __skb_pull(skb, ah_hlen + ihl); 271 __skb_pull(skb, ah_hlen + ihl);
272 skb_set_transport_header(skb, -ihl); 272
273 if (x->props.mode == XFRM_MODE_TUNNEL)
274 skb_reset_transport_header(skb);
275 else
276 skb_set_transport_header(skb, -ihl);
273out: 277out:
274 kfree(AH_SKB_CB(skb)->tmp); 278 kfree(AH_SKB_CB(skb)->tmp);
275 xfrm_input_resume(skb, err); 279 xfrm_input_resume(skb, err);
@@ -317,8 +321,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
317 321
318 /* We are going to _remove_ AH header to keep sockets happy, 322 /* We are going to _remove_ AH header to keep sockets happy,
319 * so... Later this can change. */ 323 * so... Later this can change. */
320 if (skb_cloned(skb) && 324 if (skb_unclone(skb, GFP_ATOMIC))
321 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
322 goto out; 325 goto out;
323 326
324 skb->ip_summed = CHECKSUM_NONE; 327 skb->ip_summed = CHECKSUM_NONE;
@@ -381,7 +384,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
381 skb->network_header += ah_hlen; 384 skb->network_header += ah_hlen;
382 memcpy(skb_network_header(skb), work_iph, ihl); 385 memcpy(skb_network_header(skb), work_iph, ihl);
383 __skb_pull(skb, ah_hlen + ihl); 386 __skb_pull(skb, ah_hlen + ihl);
384 skb_set_transport_header(skb, -ihl); 387 if (x->props.mode == XFRM_MODE_TUNNEL)
388 skb_reset_transport_header(skb);
389 else
390 skb_set_transport_header(skb, -ihl);
385 391
386 err = nexthdr; 392 err = nexthdr;
387 393
@@ -413,9 +419,12 @@ static void ah4_err(struct sk_buff *skb, u32 info)
413 if (!x) 419 if (!x)
414 return; 420 return;
415 421
416 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 422 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
423 atomic_inc(&flow_cache_genid);
424 rt_genid_bump(net);
425
417 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); 426 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
418 else 427 } else
419 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); 428 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
420 xfrm_state_put(x); 429 xfrm_state_put(x);
421} 430}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index ce6fbdfd40b8..fea4929f6200 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -321,7 +321,7 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
321static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) 321static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
322{ 322{
323 __be32 saddr = 0; 323 __be32 saddr = 0;
324 u8 *dst_ha = NULL; 324 u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL;
325 struct net_device *dev = neigh->dev; 325 struct net_device *dev = neigh->dev;
326 __be32 target = *(__be32 *)neigh->primary_key; 326 __be32 target = *(__be32 *)neigh->primary_key;
327 int probes = atomic_read(&neigh->probes); 327 int probes = atomic_read(&neigh->probes);
@@ -363,8 +363,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
363 if (probes < 0) { 363 if (probes < 0) {
364 if (!(neigh->nud_state & NUD_VALID)) 364 if (!(neigh->nud_state & NUD_VALID))
365 pr_debug("trying to ucast probe in NUD_INVALID\n"); 365 pr_debug("trying to ucast probe in NUD_INVALID\n");
366 dst_ha = neigh->ha; 366 neigh_ha_snapshot(dst_ha, neigh, dev);
367 read_lock_bh(&neigh->lock); 367 dst_hw = dst_ha;
368 } else { 368 } else {
369 probes -= neigh->parms->app_probes; 369 probes -= neigh->parms->app_probes;
370 if (probes < 0) { 370 if (probes < 0) {
@@ -376,9 +376,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
376 } 376 }
377 377
378 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, 378 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
379 dst_ha, dev->dev_addr, NULL); 379 dst_hw, dev->dev_addr, NULL);
380 if (dst_ha)
381 read_unlock_bh(&neigh->lock);
382} 380}
383 381
384static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) 382static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -930,24 +928,25 @@ static void parp_redo(struct sk_buff *skb)
930static int arp_rcv(struct sk_buff *skb, struct net_device *dev, 928static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
931 struct packet_type *pt, struct net_device *orig_dev) 929 struct packet_type *pt, struct net_device *orig_dev)
932{ 930{
933 struct arphdr *arp; 931 const struct arphdr *arp;
932
933 if (dev->flags & IFF_NOARP ||
934 skb->pkt_type == PACKET_OTHERHOST ||
935 skb->pkt_type == PACKET_LOOPBACK)
936 goto freeskb;
937
938 skb = skb_share_check(skb, GFP_ATOMIC);
939 if (!skb)
940 goto out_of_mem;
934 941
935 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 942 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
936 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 943 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
937 goto freeskb; 944 goto freeskb;
938 945
939 arp = arp_hdr(skb); 946 arp = arp_hdr(skb);
940 if (arp->ar_hln != dev->addr_len || 947 if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
941 dev->flags & IFF_NOARP ||
942 skb->pkt_type == PACKET_OTHERHOST ||
943 skb->pkt_type == PACKET_LOOPBACK ||
944 arp->ar_pln != 4)
945 goto freeskb; 948 goto freeskb;
946 949
947 skb = skb_share_check(skb, GFP_ATOMIC);
948 if (skb == NULL)
949 goto out_of_mem;
950
951 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); 950 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
952 951
953 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); 952 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
@@ -1406,14 +1405,14 @@ static const struct file_operations arp_seq_fops = {
1406 1405
1407static int __net_init arp_net_init(struct net *net) 1406static int __net_init arp_net_init(struct net *net)
1408{ 1407{
1409 if (!proc_net_fops_create(net, "arp", S_IRUGO, &arp_seq_fops)) 1408 if (!proc_create("arp", S_IRUGO, net->proc_net, &arp_seq_fops))
1410 return -ENOMEM; 1409 return -ENOMEM;
1411 return 0; 1410 return 0;
1412} 1411}
1413 1412
1414static void __net_exit arp_net_exit(struct net *net) 1413static void __net_exit arp_net_exit(struct net *net)
1415{ 1414{
1416 proc_net_remove(net, "arp"); 1415 remove_proc_entry("arp", net->proc_net);
1417} 1416}
1418 1417
1419static struct pernet_operations arp_net_ops = { 1418static struct pernet_operations arp_net_ops = {
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 424fafbc8cb0..b28e863fe0a7 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -85,3 +85,28 @@ out:
85 return err; 85 return err;
86} 86}
87EXPORT_SYMBOL(ip4_datagram_connect); 87EXPORT_SYMBOL(ip4_datagram_connect);
88
89void ip4_datagram_release_cb(struct sock *sk)
90{
91 const struct inet_sock *inet = inet_sk(sk);
92 const struct ip_options_rcu *inet_opt;
93 __be32 daddr = inet->inet_daddr;
94 struct flowi4 fl4;
95 struct rtable *rt;
96
97 if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
98 return;
99
100 rcu_read_lock();
101 inet_opt = rcu_dereference(inet->inet_opt);
102 if (inet_opt && inet_opt->opt.srr)
103 daddr = inet_opt->opt.faddr;
104 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
105 inet->inet_saddr, inet->inet_dport,
106 inet->inet_sport, sk->sk_protocol,
107 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
108 if (!IS_ERR(rt))
109 __sk_dst_set(sk, &rt->dst);
110 rcu_read_unlock();
111}
112EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cc06a47f1216..5281314886c1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -63,6 +63,7 @@
63#include <net/ip_fib.h> 63#include <net/ip_fib.h>
64#include <net/rtnetlink.h> 64#include <net/rtnetlink.h>
65#include <net/net_namespace.h> 65#include <net/net_namespace.h>
66#include <net/addrconf.h>
66 67
67#include "fib_lookup.h" 68#include "fib_lookup.h"
68 69
@@ -93,6 +94,7 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
93 [IFA_ADDRESS] = { .type = NLA_U32 }, 94 [IFA_ADDRESS] = { .type = NLA_U32 },
94 [IFA_BROADCAST] = { .type = NLA_U32 }, 95 [IFA_BROADCAST] = { .type = NLA_U32 },
95 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 96 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
97 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
96}; 98};
97 99
98#define IN4_ADDR_HSIZE_SHIFT 8 100#define IN4_ADDR_HSIZE_SHIFT 8
@@ -417,6 +419,10 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
417 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0); 419 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
418} 420}
419 421
422static void check_lifetime(struct work_struct *work);
423
424static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
425
420static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, 426static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
421 u32 portid) 427 u32 portid)
422{ 428{
@@ -462,6 +468,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
462 468
463 inet_hash_insert(dev_net(in_dev->dev), ifa); 469 inet_hash_insert(dev_net(in_dev->dev), ifa);
464 470
471 cancel_delayed_work(&check_lifetime_work);
472 schedule_delayed_work(&check_lifetime_work, 0);
473
465 /* Send message first, then call notifier. 474 /* Send message first, then call notifier.
466 Notifier will trigger FIB update, so that 475 Notifier will trigger FIB update, so that
467 listeners of netlink will know about new ifaddr */ 476 listeners of netlink will know about new ifaddr */
@@ -573,7 +582,107 @@ errout:
573 return err; 582 return err;
574} 583}
575 584
576static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh) 585#define INFINITY_LIFE_TIME 0xFFFFFFFF
586
587static void check_lifetime(struct work_struct *work)
588{
589 unsigned long now, next, next_sec, next_sched;
590 struct in_ifaddr *ifa;
591 struct hlist_node *node;
592 int i;
593
594 now = jiffies;
595 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
596
597 rcu_read_lock();
598 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
599 hlist_for_each_entry_rcu(ifa, node,
600 &inet_addr_lst[i], hash) {
601 unsigned long age;
602
603 if (ifa->ifa_flags & IFA_F_PERMANENT)
604 continue;
605
606 /* We try to batch several events at once. */
607 age = (now - ifa->ifa_tstamp +
608 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
609
610 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
611 age >= ifa->ifa_valid_lft) {
612 struct in_ifaddr **ifap ;
613
614 rtnl_lock();
615 for (ifap = &ifa->ifa_dev->ifa_list;
616 *ifap != NULL; ifap = &ifa->ifa_next) {
617 if (*ifap == ifa)
618 inet_del_ifa(ifa->ifa_dev,
619 ifap, 1);
620 }
621 rtnl_unlock();
622 } else if (ifa->ifa_preferred_lft ==
623 INFINITY_LIFE_TIME) {
624 continue;
625 } else if (age >= ifa->ifa_preferred_lft) {
626 if (time_before(ifa->ifa_tstamp +
627 ifa->ifa_valid_lft * HZ, next))
628 next = ifa->ifa_tstamp +
629 ifa->ifa_valid_lft * HZ;
630
631 if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) {
632 ifa->ifa_flags |= IFA_F_DEPRECATED;
633 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
634 }
635 } else if (time_before(ifa->ifa_tstamp +
636 ifa->ifa_preferred_lft * HZ,
637 next)) {
638 next = ifa->ifa_tstamp +
639 ifa->ifa_preferred_lft * HZ;
640 }
641 }
642 }
643 rcu_read_unlock();
644
645 next_sec = round_jiffies_up(next);
646 next_sched = next;
647
648 /* If rounded timeout is accurate enough, accept it. */
649 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
650 next_sched = next_sec;
651
652 now = jiffies;
653 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
654 if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
655 next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
656
657 schedule_delayed_work(&check_lifetime_work, next_sched - now);
658}
659
660static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
661 __u32 prefered_lft)
662{
663 unsigned long timeout;
664
665 ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
666
667 timeout = addrconf_timeout_fixup(valid_lft, HZ);
668 if (addrconf_finite_timeout(timeout))
669 ifa->ifa_valid_lft = timeout;
670 else
671 ifa->ifa_flags |= IFA_F_PERMANENT;
672
673 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
674 if (addrconf_finite_timeout(timeout)) {
675 if (timeout == 0)
676 ifa->ifa_flags |= IFA_F_DEPRECATED;
677 ifa->ifa_preferred_lft = timeout;
678 }
679 ifa->ifa_tstamp = jiffies;
680 if (!ifa->ifa_cstamp)
681 ifa->ifa_cstamp = ifa->ifa_tstamp;
682}
683
684static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
685 __u32 *pvalid_lft, __u32 *pprefered_lft)
577{ 686{
578 struct nlattr *tb[IFA_MAX+1]; 687 struct nlattr *tb[IFA_MAX+1];
579 struct in_ifaddr *ifa; 688 struct in_ifaddr *ifa;
@@ -633,24 +742,73 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
633 else 742 else
634 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); 743 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
635 744
745 if (tb[IFA_CACHEINFO]) {
746 struct ifa_cacheinfo *ci;
747
748 ci = nla_data(tb[IFA_CACHEINFO]);
749 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
750 err = -EINVAL;
751 goto errout;
752 }
753 *pvalid_lft = ci->ifa_valid;
754 *pprefered_lft = ci->ifa_prefered;
755 }
756
636 return ifa; 757 return ifa;
637 758
638errout: 759errout:
639 return ERR_PTR(err); 760 return ERR_PTR(err);
640} 761}
641 762
763static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
764{
765 struct in_device *in_dev = ifa->ifa_dev;
766 struct in_ifaddr *ifa1, **ifap;
767
768 if (!ifa->ifa_local)
769 return NULL;
770
771 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
772 ifap = &ifa1->ifa_next) {
773 if (ifa1->ifa_mask == ifa->ifa_mask &&
774 inet_ifa_match(ifa1->ifa_address, ifa) &&
775 ifa1->ifa_local == ifa->ifa_local)
776 return ifa1;
777 }
778 return NULL;
779}
780
642static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 781static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
643{ 782{
644 struct net *net = sock_net(skb->sk); 783 struct net *net = sock_net(skb->sk);
645 struct in_ifaddr *ifa; 784 struct in_ifaddr *ifa;
785 struct in_ifaddr *ifa_existing;
786 __u32 valid_lft = INFINITY_LIFE_TIME;
787 __u32 prefered_lft = INFINITY_LIFE_TIME;
646 788
647 ASSERT_RTNL(); 789 ASSERT_RTNL();
648 790
649 ifa = rtm_to_ifaddr(net, nlh); 791 ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
650 if (IS_ERR(ifa)) 792 if (IS_ERR(ifa))
651 return PTR_ERR(ifa); 793 return PTR_ERR(ifa);
652 794
653 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid); 795 ifa_existing = find_matching_ifa(ifa);
796 if (!ifa_existing) {
797 /* It would be best to check for !NLM_F_CREATE here but
798 * userspace alreay relies on not having to provide this.
799 */
800 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
801 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
802 } else {
803 inet_free_ifa(ifa);
804
805 if (nlh->nlmsg_flags & NLM_F_EXCL ||
806 !(nlh->nlmsg_flags & NLM_F_REPLACE))
807 return -EEXIST;
808
809 set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft);
810 }
811 return 0;
654} 812}
655 813
656/* 814/*
@@ -823,9 +981,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
823 if (!ifa) { 981 if (!ifa) {
824 ret = -ENOBUFS; 982 ret = -ENOBUFS;
825 ifa = inet_alloc_ifa(); 983 ifa = inet_alloc_ifa();
826 INIT_HLIST_NODE(&ifa->hash);
827 if (!ifa) 984 if (!ifa)
828 break; 985 break;
986 INIT_HLIST_NODE(&ifa->hash);
829 if (colon) 987 if (colon)
830 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ); 988 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
831 else 989 else
@@ -852,6 +1010,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
852 ifa->ifa_prefixlen = 32; 1010 ifa->ifa_prefixlen = 32;
853 ifa->ifa_mask = inet_make_mask(32); 1011 ifa->ifa_mask = inet_make_mask(32);
854 } 1012 }
1013 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
855 ret = inet_set_ifa(dev, ifa); 1014 ret = inet_set_ifa(dev, ifa);
856 break; 1015 break;
857 1016
@@ -1190,6 +1349,8 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1190 ifa->ifa_dev = in_dev; 1349 ifa->ifa_dev = in_dev;
1191 ifa->ifa_scope = RT_SCOPE_HOST; 1350 ifa->ifa_scope = RT_SCOPE_HOST;
1192 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); 1351 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1352 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1353 INFINITY_LIFE_TIME);
1193 inet_insert_ifa(ifa); 1354 inet_insert_ifa(ifa);
1194 } 1355 }
1195 } 1356 }
@@ -1246,11 +1407,30 @@ static size_t inet_nlmsg_size(void)
1246 + nla_total_size(IFNAMSIZ); /* IFA_LABEL */ 1407 + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
1247} 1408}
1248 1409
1410static inline u32 cstamp_delta(unsigned long cstamp)
1411{
1412 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1413}
1414
1415static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1416 unsigned long tstamp, u32 preferred, u32 valid)
1417{
1418 struct ifa_cacheinfo ci;
1419
1420 ci.cstamp = cstamp_delta(cstamp);
1421 ci.tstamp = cstamp_delta(tstamp);
1422 ci.ifa_prefered = preferred;
1423 ci.ifa_valid = valid;
1424
1425 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1426}
1427
1249static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, 1428static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1250 u32 portid, u32 seq, int event, unsigned int flags) 1429 u32 portid, u32 seq, int event, unsigned int flags)
1251{ 1430{
1252 struct ifaddrmsg *ifm; 1431 struct ifaddrmsg *ifm;
1253 struct nlmsghdr *nlh; 1432 struct nlmsghdr *nlh;
1433 u32 preferred, valid;
1254 1434
1255 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); 1435 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1256 if (nlh == NULL) 1436 if (nlh == NULL)
@@ -1259,10 +1439,31 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1259 ifm = nlmsg_data(nlh); 1439 ifm = nlmsg_data(nlh);
1260 ifm->ifa_family = AF_INET; 1440 ifm->ifa_family = AF_INET;
1261 ifm->ifa_prefixlen = ifa->ifa_prefixlen; 1441 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1262 ifm->ifa_flags = ifa->ifa_flags|IFA_F_PERMANENT; 1442 ifm->ifa_flags = ifa->ifa_flags;
1263 ifm->ifa_scope = ifa->ifa_scope; 1443 ifm->ifa_scope = ifa->ifa_scope;
1264 ifm->ifa_index = ifa->ifa_dev->dev->ifindex; 1444 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1265 1445
1446 if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1447 preferred = ifa->ifa_preferred_lft;
1448 valid = ifa->ifa_valid_lft;
1449 if (preferred != INFINITY_LIFE_TIME) {
1450 long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1451
1452 if (preferred > tval)
1453 preferred -= tval;
1454 else
1455 preferred = 0;
1456 if (valid != INFINITY_LIFE_TIME) {
1457 if (valid > tval)
1458 valid -= tval;
1459 else
1460 valid = 0;
1461 }
1462 }
1463 } else {
1464 preferred = INFINITY_LIFE_TIME;
1465 valid = INFINITY_LIFE_TIME;
1466 }
1266 if ((ifa->ifa_address && 1467 if ((ifa->ifa_address &&
1267 nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) || 1468 nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1268 (ifa->ifa_local && 1469 (ifa->ifa_local &&
@@ -1270,7 +1471,9 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1270 (ifa->ifa_broadcast && 1471 (ifa->ifa_broadcast &&
1271 nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) || 1472 nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1272 (ifa->ifa_label[0] && 1473 (ifa->ifa_label[0] &&
1273 nla_put_string(skb, IFA_LABEL, ifa->ifa_label))) 1474 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1475 put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1476 preferred, valid))
1274 goto nla_put_failure; 1477 goto nla_put_failure;
1275 1478
1276 return nlmsg_end(skb, nlh); 1479 return nlmsg_end(skb, nlh);
@@ -1988,6 +2191,8 @@ void __init devinet_init(void)
1988 register_gifconf(PF_INET, inet_gifconf); 2191 register_gifconf(PF_INET, inet_gifconf);
1989 register_netdevice_notifier(&ip_netdev_notifier); 2192 register_netdevice_notifier(&ip_netdev_notifier);
1990 2193
2194 schedule_delayed_work(&check_lifetime_work, 0);
2195
1991 rtnl_af_register(&inet_af_ops); 2196 rtnl_af_register(&inet_af_ops);
1992 2197
1993 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL); 2198 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b61e9deb7c7e..3b4f0cd2e63e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -346,7 +346,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
346 346
347 pskb_trim(skb, skb->len - alen - padlen - 2); 347 pskb_trim(skb, skb->len - alen - padlen - 2);
348 __skb_pull(skb, hlen); 348 __skb_pull(skb, hlen);
349 skb_set_transport_header(skb, -ihl); 349 if (x->props.mode == XFRM_MODE_TUNNEL)
350 skb_reset_transport_header(skb);
351 else
352 skb_set_transport_header(skb, -ihl);
350 353
351 err = nexthdr[1]; 354 err = nexthdr[1];
352 355
@@ -499,9 +502,12 @@ static void esp4_err(struct sk_buff *skb, u32 info)
499 if (!x) 502 if (!x)
500 return; 503 return;
501 504
502 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 505 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
506 atomic_inc(&flow_cache_genid);
507 rt_genid_bump(net);
508
503 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); 509 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
504 else 510 } else
505 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0); 511 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
506 xfrm_state_put(x); 512 xfrm_state_put(x);
507} 513}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 5cd75e2dab2c..99f00d39d10b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -974,7 +974,7 @@ static void nl_fib_input(struct sk_buff *skb)
974 974
975 nl_fib_lookup(frn, tb); 975 nl_fib_lookup(frn, tb);
976 976
977 portid = NETLINK_CB(skb).portid; /* pid of sending process */ 977 portid = NETLINK_CB(skb).portid; /* netlink portid */
978 NETLINK_CB(skb).portid = 0; /* from kernel */ 978 NETLINK_CB(skb).portid = 0; /* from kernel */
979 NETLINK_CB(skb).dst_group = 0; /* unicast */ 979 NETLINK_CB(skb).dst_group = 0; /* unicast */
980 netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT); 980 netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 31d771ca9a70..61e03da3e1f5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2607,31 +2607,31 @@ static const struct file_operations fib_route_fops = {
2607 2607
2608int __net_init fib_proc_init(struct net *net) 2608int __net_init fib_proc_init(struct net *net)
2609{ 2609{
2610 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops)) 2610 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
2611 goto out1; 2611 goto out1;
2612 2612
2613 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO, 2613 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2614 &fib_triestat_fops)) 2614 &fib_triestat_fops))
2615 goto out2; 2615 goto out2;
2616 2616
2617 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops)) 2617 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
2618 goto out3; 2618 goto out3;
2619 2619
2620 return 0; 2620 return 0;
2621 2621
2622out3: 2622out3:
2623 proc_net_remove(net, "fib_triestat"); 2623 remove_proc_entry("fib_triestat", net->proc_net);
2624out2: 2624out2:
2625 proc_net_remove(net, "fib_trie"); 2625 remove_proc_entry("fib_trie", net->proc_net);
2626out1: 2626out1:
2627 return -ENOMEM; 2627 return -ENOMEM;
2628} 2628}
2629 2629
2630void __net_exit fib_proc_exit(struct net *net) 2630void __net_exit fib_proc_exit(struct net *net)
2631{ 2631{
2632 proc_net_remove(net, "fib_trie"); 2632 remove_proc_entry("fib_trie", net->proc_net);
2633 proc_net_remove(net, "fib_triestat"); 2633 remove_proc_entry("fib_triestat", net->proc_net);
2634 proc_net_remove(net, "route"); 2634 remove_proc_entry("route", net->proc_net);
2635} 2635}
2636 2636
2637#endif /* CONFIG_PROC_FS */ 2637#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index 42a491055c76..7a4c710c4cdd 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -19,6 +19,7 @@
19#include <linux/in.h> 19#include <linux/in.h>
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/if_tunnel.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23#include <net/protocol.h> 24#include <net/protocol.h>
24#include <net/gre.h> 25#include <net/gre.h>
@@ -26,6 +27,11 @@
26 27
27static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; 28static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
28static DEFINE_SPINLOCK(gre_proto_lock); 29static DEFINE_SPINLOCK(gre_proto_lock);
30struct gre_base_hdr {
31 __be16 flags;
32 __be16 protocol;
33};
34#define GRE_HEADER_SECTION 4
29 35
30int gre_add_protocol(const struct gre_protocol *proto, u8 version) 36int gre_add_protocol(const struct gre_protocol *proto, u8 version)
31{ 37{
@@ -112,12 +118,117 @@ static void gre_err(struct sk_buff *skb, u32 info)
112 rcu_read_unlock(); 118 rcu_read_unlock();
113} 119}
114 120
121static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
122 netdev_features_t features)
123{
124 struct sk_buff *segs = ERR_PTR(-EINVAL);
125 netdev_features_t enc_features;
126 int ghl = GRE_HEADER_SECTION;
127 struct gre_base_hdr *greh;
128 int mac_len = skb->mac_len;
129 int tnl_hlen;
130 bool csum;
131
132 if (unlikely(skb_shinfo(skb)->gso_type &
133 ~(SKB_GSO_TCPV4 |
134 SKB_GSO_TCPV6 |
135 SKB_GSO_UDP |
136 SKB_GSO_DODGY |
137 SKB_GSO_TCP_ECN |
138 SKB_GSO_GRE)))
139 goto out;
140
141 if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
142 goto out;
143
144 greh = (struct gre_base_hdr *)skb_transport_header(skb);
145
146 if (greh->flags & GRE_KEY)
147 ghl += GRE_HEADER_SECTION;
148 if (greh->flags & GRE_SEQ)
149 ghl += GRE_HEADER_SECTION;
150 if (greh->flags & GRE_CSUM) {
151 ghl += GRE_HEADER_SECTION;
152 csum = true;
153 } else
154 csum = false;
155
156 /* setup inner skb. */
157 if (greh->protocol == htons(ETH_P_TEB)) {
158 struct ethhdr *eth = eth_hdr(skb);
159 skb->protocol = eth->h_proto;
160 } else {
161 skb->protocol = greh->protocol;
162 }
163
164 skb->encapsulation = 0;
165
166 if (unlikely(!pskb_may_pull(skb, ghl)))
167 goto out;
168 __skb_pull(skb, ghl);
169 skb_reset_mac_header(skb);
170 skb_set_network_header(skb, skb_inner_network_offset(skb));
171 skb->mac_len = skb_inner_network_offset(skb);
172
173 /* segment inner packet. */
174 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
175 segs = skb_mac_gso_segment(skb, enc_features);
176 if (!segs || IS_ERR(segs))
177 goto out;
178
179 skb = segs;
180 tnl_hlen = skb_tnl_header_len(skb);
181 do {
182 __skb_push(skb, ghl);
183 if (csum) {
184 __be32 *pcsum;
185
186 if (skb_has_shared_frag(skb)) {
187 int err;
188
189 err = __skb_linearize(skb);
190 if (err) {
191 kfree_skb(segs);
192 segs = ERR_PTR(err);
193 goto out;
194 }
195 }
196
197 greh = (struct gre_base_hdr *)(skb->data);
198 pcsum = (__be32 *)(greh + 1);
199 *pcsum = 0;
200 *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0));
201 }
202 __skb_push(skb, tnl_hlen - ghl);
203
204 skb_reset_mac_header(skb);
205 skb_set_network_header(skb, mac_len);
206 skb->mac_len = mac_len;
207 } while ((skb = skb->next));
208out:
209 return segs;
210}
211
212static int gre_gso_send_check(struct sk_buff *skb)
213{
214 if (!skb->encapsulation)
215 return -EINVAL;
216 return 0;
217}
218
115static const struct net_protocol net_gre_protocol = { 219static const struct net_protocol net_gre_protocol = {
116 .handler = gre_rcv, 220 .handler = gre_rcv,
117 .err_handler = gre_err, 221 .err_handler = gre_err,
118 .netns_ok = 1, 222 .netns_ok = 1,
119}; 223};
120 224
225static const struct net_offload gre_offload = {
226 .callbacks = {
227 .gso_send_check = gre_gso_send_check,
228 .gso_segment = gre_gso_segment,
229 },
230};
231
121static int __init gre_init(void) 232static int __init gre_init(void)
122{ 233{
123 pr_info("GRE over IPv4 demultiplexor driver\n"); 234 pr_info("GRE over IPv4 demultiplexor driver\n");
@@ -127,11 +238,18 @@ static int __init gre_init(void)
127 return -EAGAIN; 238 return -EAGAIN;
128 } 239 }
129 240
241 if (inet_add_offload(&gre_offload, IPPROTO_GRE)) {
242 pr_err("can't add protocol offload\n");
243 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
244 return -EAGAIN;
245 }
246
130 return 0; 247 return 0;
131} 248}
132 249
133static void __exit gre_exit(void) 250static void __exit gre_exit(void)
134{ 251{
252 inet_del_offload(&gre_offload, IPPROTO_GRE);
135 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); 253 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
136} 254}
137 255
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 736ab70fd179..d8c232794bcb 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2646,24 +2646,25 @@ static int __net_init igmp_net_init(struct net *net)
2646{ 2646{
2647 struct proc_dir_entry *pde; 2647 struct proc_dir_entry *pde;
2648 2648
2649 pde = proc_net_fops_create(net, "igmp", S_IRUGO, &igmp_mc_seq_fops); 2649 pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
2650 if (!pde) 2650 if (!pde)
2651 goto out_igmp; 2651 goto out_igmp;
2652 pde = proc_net_fops_create(net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops); 2652 pde = proc_create("mcfilter", S_IRUGO, net->proc_net,
2653 &igmp_mcf_seq_fops);
2653 if (!pde) 2654 if (!pde)
2654 goto out_mcfilter; 2655 goto out_mcfilter;
2655 return 0; 2656 return 0;
2656 2657
2657out_mcfilter: 2658out_mcfilter:
2658 proc_net_remove(net, "igmp"); 2659 remove_proc_entry("igmp", net->proc_net);
2659out_igmp: 2660out_igmp:
2660 return -ENOMEM; 2661 return -ENOMEM;
2661} 2662}
2662 2663
2663static void __net_exit igmp_net_exit(struct net *net) 2664static void __net_exit igmp_net_exit(struct net *net)
2664{ 2665{
2665 proc_net_remove(net, "mcfilter"); 2666 remove_proc_entry("mcfilter", net->proc_net);
2666 proc_net_remove(net, "igmp"); 2667 remove_proc_entry("igmp", net->proc_net);
2667} 2668}
2668 2669
2669static struct pernet_operations igmp_net_ops = { 2670static struct pernet_operations igmp_net_ops = {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 2026542d6836..11cb4979a465 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -59,6 +59,8 @@ int inet_csk_bind_conflict(const struct sock *sk,
59 struct sock *sk2; 59 struct sock *sk2;
60 struct hlist_node *node; 60 struct hlist_node *node;
61 int reuse = sk->sk_reuse; 61 int reuse = sk->sk_reuse;
62 int reuseport = sk->sk_reuseport;
63 kuid_t uid = sock_i_uid((struct sock *)sk);
62 64
63 /* 65 /*
64 * Unlike other sk lookup places we do not check 66 * Unlike other sk lookup places we do not check
@@ -73,8 +75,11 @@ int inet_csk_bind_conflict(const struct sock *sk,
73 (!sk->sk_bound_dev_if || 75 (!sk->sk_bound_dev_if ||
74 !sk2->sk_bound_dev_if || 76 !sk2->sk_bound_dev_if ||
75 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 77 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
76 if (!reuse || !sk2->sk_reuse || 78 if ((!reuse || !sk2->sk_reuse ||
77 sk2->sk_state == TCP_LISTEN) { 79 sk2->sk_state == TCP_LISTEN) &&
80 (!reuseport || !sk2->sk_reuseport ||
81 (sk2->sk_state != TCP_TIME_WAIT &&
82 !uid_eq(uid, sock_i_uid(sk2))))) {
78 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 83 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
79 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 84 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
80 sk2_rcv_saddr == sk_rcv_saddr(sk)) 85 sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -106,6 +111,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
106 int ret, attempts = 5; 111 int ret, attempts = 5;
107 struct net *net = sock_net(sk); 112 struct net *net = sock_net(sk);
108 int smallest_size = -1, smallest_rover; 113 int smallest_size = -1, smallest_rover;
114 kuid_t uid = sock_i_uid(sk);
109 115
110 local_bh_disable(); 116 local_bh_disable();
111 if (!snum) { 117 if (!snum) {
@@ -125,9 +131,12 @@ again:
125 spin_lock(&head->lock); 131 spin_lock(&head->lock);
126 inet_bind_bucket_for_each(tb, node, &head->chain) 132 inet_bind_bucket_for_each(tb, node, &head->chain)
127 if (net_eq(ib_net(tb), net) && tb->port == rover) { 133 if (net_eq(ib_net(tb), net) && tb->port == rover) {
128 if (tb->fastreuse > 0 && 134 if (((tb->fastreuse > 0 &&
129 sk->sk_reuse && 135 sk->sk_reuse &&
130 sk->sk_state != TCP_LISTEN && 136 sk->sk_state != TCP_LISTEN) ||
137 (tb->fastreuseport > 0 &&
138 sk->sk_reuseport &&
139 uid_eq(tb->fastuid, uid))) &&
131 (tb->num_owners < smallest_size || smallest_size == -1)) { 140 (tb->num_owners < smallest_size || smallest_size == -1)) {
132 smallest_size = tb->num_owners; 141 smallest_size = tb->num_owners;
133 smallest_rover = rover; 142 smallest_rover = rover;
@@ -185,14 +194,18 @@ tb_found:
185 if (sk->sk_reuse == SK_FORCE_REUSE) 194 if (sk->sk_reuse == SK_FORCE_REUSE)
186 goto success; 195 goto success;
187 196
188 if (tb->fastreuse > 0 && 197 if (((tb->fastreuse > 0 &&
189 sk->sk_reuse && sk->sk_state != TCP_LISTEN && 198 sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
199 (tb->fastreuseport > 0 &&
200 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
190 smallest_size == -1) { 201 smallest_size == -1) {
191 goto success; 202 goto success;
192 } else { 203 } else {
193 ret = 1; 204 ret = 1;
194 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { 205 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
195 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && 206 if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
207 (tb->fastreuseport > 0 &&
208 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
196 smallest_size != -1 && --attempts >= 0) { 209 smallest_size != -1 && --attempts >= 0) {
197 spin_unlock(&head->lock); 210 spin_unlock(&head->lock);
198 goto again; 211 goto again;
@@ -212,9 +225,19 @@ tb_not_found:
212 tb->fastreuse = 1; 225 tb->fastreuse = 1;
213 else 226 else
214 tb->fastreuse = 0; 227 tb->fastreuse = 0;
215 } else if (tb->fastreuse && 228 if (sk->sk_reuseport) {
216 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) 229 tb->fastreuseport = 1;
217 tb->fastreuse = 0; 230 tb->fastuid = uid;
231 } else
232 tb->fastreuseport = 0;
233 } else {
234 if (tb->fastreuse &&
235 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
236 tb->fastreuse = 0;
237 if (tb->fastreuseport &&
238 (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
239 tb->fastreuseport = 0;
240 }
218success: 241success:
219 if (!inet_csk(sk)->icsk_bind_hash) 242 if (!inet_csk(sk)->icsk_bind_hash)
220 inet_bind_hash(sk, tb, snum); 243 inet_bind_hash(sk, tb, snum);
@@ -710,6 +733,22 @@ void inet_csk_destroy_sock(struct sock *sk)
710} 733}
711EXPORT_SYMBOL(inet_csk_destroy_sock); 734EXPORT_SYMBOL(inet_csk_destroy_sock);
712 735
736/* This function allows to force a closure of a socket after the call to
737 * tcp/dccp_create_openreq_child().
738 */
739void inet_csk_prepare_forced_close(struct sock *sk)
740{
741 /* sk_clone_lock locked the socket and set refcnt to 2 */
742 bh_unlock_sock(sk);
743 sock_put(sk);
744
745 /* The below has to be done to allow calling inet_csk_destroy_sock */
746 sock_set_flag(sk, SOCK_DEAD);
747 percpu_counter_inc(sk->sk_prot->orphan_count);
748 inet_sk(sk)->inet_num = 0;
749}
750EXPORT_SYMBOL(inet_csk_prepare_forced_close);
751
713int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) 752int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
714{ 753{
715 struct inet_sock *inet = inet_sk(sk); 754 struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 4750d2b74d79..2e453bde6992 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -73,8 +73,9 @@ EXPORT_SYMBOL(inet_frags_init);
73void inet_frags_init_net(struct netns_frags *nf) 73void inet_frags_init_net(struct netns_frags *nf)
74{ 74{
75 nf->nqueues = 0; 75 nf->nqueues = 0;
76 atomic_set(&nf->mem, 0); 76 init_frag_mem_limit(nf);
77 INIT_LIST_HEAD(&nf->lru_list); 77 INIT_LIST_HEAD(&nf->lru_list);
78 spin_lock_init(&nf->lru_lock);
78} 79}
79EXPORT_SYMBOL(inet_frags_init_net); 80EXPORT_SYMBOL(inet_frags_init_net);
80 81
@@ -91,6 +92,8 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
91 local_bh_disable(); 92 local_bh_disable();
92 inet_frag_evictor(nf, f, true); 93 inet_frag_evictor(nf, f, true);
93 local_bh_enable(); 94 local_bh_enable();
95
96 percpu_counter_destroy(&nf->mem);
94} 97}
95EXPORT_SYMBOL(inet_frags_exit_net); 98EXPORT_SYMBOL(inet_frags_exit_net);
96 99
@@ -98,9 +101,9 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
98{ 101{
99 write_lock(&f->lock); 102 write_lock(&f->lock);
100 hlist_del(&fq->list); 103 hlist_del(&fq->list);
101 list_del(&fq->lru_list);
102 fq->net->nqueues--; 104 fq->net->nqueues--;
103 write_unlock(&f->lock); 105 write_unlock(&f->lock);
106 inet_frag_lru_del(fq);
104} 107}
105 108
106void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) 109void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
@@ -117,12 +120,8 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
117EXPORT_SYMBOL(inet_frag_kill); 120EXPORT_SYMBOL(inet_frag_kill);
118 121
119static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, 122static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
120 struct sk_buff *skb, int *work) 123 struct sk_buff *skb)
121{ 124{
122 if (work)
123 *work -= skb->truesize;
124
125 atomic_sub(skb->truesize, &nf->mem);
126 if (f->skb_free) 125 if (f->skb_free)
127 f->skb_free(skb); 126 f->skb_free(skb);
128 kfree_skb(skb); 127 kfree_skb(skb);
@@ -133,6 +132,7 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
133{ 132{
134 struct sk_buff *fp; 133 struct sk_buff *fp;
135 struct netns_frags *nf; 134 struct netns_frags *nf;
135 unsigned int sum, sum_truesize = 0;
136 136
137 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE)); 137 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
138 WARN_ON(del_timer(&q->timer) != 0); 138 WARN_ON(del_timer(&q->timer) != 0);
@@ -143,13 +143,14 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
143 while (fp) { 143 while (fp) {
144 struct sk_buff *xp = fp->next; 144 struct sk_buff *xp = fp->next;
145 145
146 frag_kfree_skb(nf, f, fp, work); 146 sum_truesize += fp->truesize;
147 frag_kfree_skb(nf, f, fp);
147 fp = xp; 148 fp = xp;
148 } 149 }
149 150 sum = sum_truesize + f->qsize;
150 if (work) 151 if (work)
151 *work -= f->qsize; 152 *work -= sum;
152 atomic_sub(f->qsize, &nf->mem); 153 sub_frag_mem_limit(q, sum);
153 154
154 if (f->destructor) 155 if (f->destructor)
155 f->destructor(q); 156 f->destructor(q);
@@ -164,22 +165,23 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
164 int work, evicted = 0; 165 int work, evicted = 0;
165 166
166 if (!force) { 167 if (!force) {
167 if (atomic_read(&nf->mem) <= nf->high_thresh) 168 if (frag_mem_limit(nf) <= nf->high_thresh)
168 return 0; 169 return 0;
169 } 170 }
170 171
171 work = atomic_read(&nf->mem) - nf->low_thresh; 172 work = frag_mem_limit(nf) - nf->low_thresh;
172 while (work > 0) { 173 while (work > 0) {
173 read_lock(&f->lock); 174 spin_lock(&nf->lru_lock);
175
174 if (list_empty(&nf->lru_list)) { 176 if (list_empty(&nf->lru_list)) {
175 read_unlock(&f->lock); 177 spin_unlock(&nf->lru_lock);
176 break; 178 break;
177 } 179 }
178 180
179 q = list_first_entry(&nf->lru_list, 181 q = list_first_entry(&nf->lru_list,
180 struct inet_frag_queue, lru_list); 182 struct inet_frag_queue, lru_list);
181 atomic_inc(&q->refcnt); 183 atomic_inc(&q->refcnt);
182 read_unlock(&f->lock); 184 spin_unlock(&nf->lru_lock);
183 185
184 spin_lock(&q->lock); 186 spin_lock(&q->lock);
185 if (!(q->last_in & INET_FRAG_COMPLETE)) 187 if (!(q->last_in & INET_FRAG_COMPLETE))
@@ -233,9 +235,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
233 235
234 atomic_inc(&qp->refcnt); 236 atomic_inc(&qp->refcnt);
235 hlist_add_head(&qp->list, &f->hash[hash]); 237 hlist_add_head(&qp->list, &f->hash[hash]);
236 list_add_tail(&qp->lru_list, &nf->lru_list);
237 nf->nqueues++; 238 nf->nqueues++;
238 write_unlock(&f->lock); 239 write_unlock(&f->lock);
240 inet_frag_lru_add(nf, qp);
239 return qp; 241 return qp;
240} 242}
241 243
@@ -250,7 +252,8 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
250 252
251 q->net = nf; 253 q->net = nf;
252 f->constructor(q, arg); 254 f->constructor(q, arg);
253 atomic_add(f->qsize, &nf->mem); 255 add_frag_mem_limit(q, f->qsize);
256
254 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 257 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
255 spin_lock_init(&q->lock); 258 spin_lock_init(&q->lock);
256 atomic_set(&q->refcnt, 1); 259 atomic_set(&q->refcnt, 1);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index fa3ae8148710..0ce0595d9861 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -39,6 +39,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
39 write_pnet(&tb->ib_net, hold_net(net)); 39 write_pnet(&tb->ib_net, hold_net(net));
40 tb->port = snum; 40 tb->port = snum;
41 tb->fastreuse = 0; 41 tb->fastreuse = 0;
42 tb->fastreuseport = 0;
42 tb->num_owners = 0; 43 tb->num_owners = 0;
43 INIT_HLIST_HEAD(&tb->owners); 44 INIT_HLIST_HEAD(&tb->owners);
44 hlist_add_head(&tb->node, &head->chain); 45 hlist_add_head(&tb->node, &head->chain);
@@ -151,16 +152,16 @@ static inline int compute_score(struct sock *sk, struct net *net,
151 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum && 152 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
152 !ipv6_only_sock(sk)) { 153 !ipv6_only_sock(sk)) {
153 __be32 rcv_saddr = inet->inet_rcv_saddr; 154 __be32 rcv_saddr = inet->inet_rcv_saddr;
154 score = sk->sk_family == PF_INET ? 1 : 0; 155 score = sk->sk_family == PF_INET ? 2 : 1;
155 if (rcv_saddr) { 156 if (rcv_saddr) {
156 if (rcv_saddr != daddr) 157 if (rcv_saddr != daddr)
157 return -1; 158 return -1;
158 score += 2; 159 score += 4;
159 } 160 }
160 if (sk->sk_bound_dev_if) { 161 if (sk->sk_bound_dev_if) {
161 if (sk->sk_bound_dev_if != dif) 162 if (sk->sk_bound_dev_if != dif)
162 return -1; 163 return -1;
163 score += 2; 164 score += 4;
164 } 165 }
165 } 166 }
166 return score; 167 return score;
@@ -176,6 +177,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
176 177
177struct sock *__inet_lookup_listener(struct net *net, 178struct sock *__inet_lookup_listener(struct net *net,
178 struct inet_hashinfo *hashinfo, 179 struct inet_hashinfo *hashinfo,
180 const __be32 saddr, __be16 sport,
179 const __be32 daddr, const unsigned short hnum, 181 const __be32 daddr, const unsigned short hnum,
180 const int dif) 182 const int dif)
181{ 183{
@@ -183,17 +185,29 @@ struct sock *__inet_lookup_listener(struct net *net,
183 struct hlist_nulls_node *node; 185 struct hlist_nulls_node *node;
184 unsigned int hash = inet_lhashfn(net, hnum); 186 unsigned int hash = inet_lhashfn(net, hnum);
185 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; 187 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
186 int score, hiscore; 188 int score, hiscore, matches = 0, reuseport = 0;
189 u32 phash = 0;
187 190
188 rcu_read_lock(); 191 rcu_read_lock();
189begin: 192begin:
190 result = NULL; 193 result = NULL;
191 hiscore = -1; 194 hiscore = 0;
192 sk_nulls_for_each_rcu(sk, node, &ilb->head) { 195 sk_nulls_for_each_rcu(sk, node, &ilb->head) {
193 score = compute_score(sk, net, hnum, daddr, dif); 196 score = compute_score(sk, net, hnum, daddr, dif);
194 if (score > hiscore) { 197 if (score > hiscore) {
195 result = sk; 198 result = sk;
196 hiscore = score; 199 hiscore = score;
200 reuseport = sk->sk_reuseport;
201 if (reuseport) {
202 phash = inet_ehashfn(net, daddr, hnum,
203 saddr, sport);
204 matches = 1;
205 }
206 } else if (score == hiscore && reuseport) {
207 matches++;
208 if (((u64)phash * matches) >> 32 == 0)
209 result = sk;
210 phash = next_pseudo_random32(phash);
197 } 211 }
198 } 212 }
199 /* 213 /*
@@ -501,7 +515,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
501 inet_bind_bucket_for_each(tb, node, &head->chain) { 515 inet_bind_bucket_for_each(tb, node, &head->chain) {
502 if (net_eq(ib_net(tb), net) && 516 if (net_eq(ib_net(tb), net) &&
503 tb->port == port) { 517 tb->port == port) {
504 if (tb->fastreuse >= 0) 518 if (tb->fastreuse >= 0 ||
519 tb->fastreuseport >= 0)
505 goto next_port; 520 goto next_port;
506 WARN_ON(hlist_empty(&tb->owners)); 521 WARN_ON(hlist_empty(&tb->owners));
507 if (!check_established(death_row, sk, 522 if (!check_established(death_row, sk,
@@ -518,6 +533,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
518 break; 533 break;
519 } 534 }
520 tb->fastreuse = -1; 535 tb->fastreuse = -1;
536 tb->fastreuseport = -1;
521 goto ok; 537 goto ok;
522 538
523 next_port: 539 next_port:
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index eb9d63a570cd..b6d30acb600c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -122,7 +122,7 @@ int ip_frag_nqueues(struct net *net)
122 122
123int ip_frag_mem(struct net *net) 123int ip_frag_mem(struct net *net)
124{ 124{
125 return atomic_read(&net->ipv4.frags.mem); 125 return sum_frag_mem_limit(&net->ipv4.frags);
126} 126}
127 127
128static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 128static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
@@ -161,13 +161,6 @@ static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
161 qp->user == arg->user; 161 qp->user == arg->user;
162} 162}
163 163
164/* Memory Tracking Functions. */
165static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
166{
167 atomic_sub(skb->truesize, &nf->mem);
168 kfree_skb(skb);
169}
170
171static void ip4_frag_init(struct inet_frag_queue *q, void *a) 164static void ip4_frag_init(struct inet_frag_queue *q, void *a)
172{ 165{
173 struct ipq *qp = container_of(q, struct ipq, q); 166 struct ipq *qp = container_of(q, struct ipq, q);
@@ -340,6 +333,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
340static int ip_frag_reinit(struct ipq *qp) 333static int ip_frag_reinit(struct ipq *qp)
341{ 334{
342 struct sk_buff *fp; 335 struct sk_buff *fp;
336 unsigned int sum_truesize = 0;
343 337
344 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { 338 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
345 atomic_inc(&qp->q.refcnt); 339 atomic_inc(&qp->q.refcnt);
@@ -349,9 +343,12 @@ static int ip_frag_reinit(struct ipq *qp)
349 fp = qp->q.fragments; 343 fp = qp->q.fragments;
350 do { 344 do {
351 struct sk_buff *xp = fp->next; 345 struct sk_buff *xp = fp->next;
352 frag_kfree_skb(qp->q.net, fp); 346
347 sum_truesize += fp->truesize;
348 kfree_skb(fp);
353 fp = xp; 349 fp = xp;
354 } while (fp); 350 } while (fp);
351 sub_frag_mem_limit(&qp->q, sum_truesize);
355 352
356 qp->q.last_in = 0; 353 qp->q.last_in = 0;
357 qp->q.len = 0; 354 qp->q.len = 0;
@@ -496,7 +493,8 @@ found:
496 qp->q.fragments = next; 493 qp->q.fragments = next;
497 494
498 qp->q.meat -= free_it->len; 495 qp->q.meat -= free_it->len;
499 frag_kfree_skb(qp->q.net, free_it); 496 sub_frag_mem_limit(&qp->q, free_it->truesize);
497 kfree_skb(free_it);
500 } 498 }
501 } 499 }
502 500
@@ -519,7 +517,7 @@ found:
519 qp->q.stamp = skb->tstamp; 517 qp->q.stamp = skb->tstamp;
520 qp->q.meat += skb->len; 518 qp->q.meat += skb->len;
521 qp->ecn |= ecn; 519 qp->ecn |= ecn;
522 atomic_add(skb->truesize, &qp->q.net->mem); 520 add_frag_mem_limit(&qp->q, skb->truesize);
523 if (offset == 0) 521 if (offset == 0)
524 qp->q.last_in |= INET_FRAG_FIRST_IN; 522 qp->q.last_in |= INET_FRAG_FIRST_IN;
525 523
@@ -531,9 +529,7 @@ found:
531 qp->q.meat == qp->q.len) 529 qp->q.meat == qp->q.len)
532 return ip_frag_reasm(qp, prev, dev); 530 return ip_frag_reasm(qp, prev, dev);
533 531
534 write_lock(&ip4_frags.lock); 532 inet_frag_lru_move(&qp->q);
535 list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
536 write_unlock(&ip4_frags.lock);
537 return -EINPROGRESS; 533 return -EINPROGRESS;
538 534
539err: 535err:
@@ -594,7 +590,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
594 goto out_oversize; 590 goto out_oversize;
595 591
596 /* Head of list must not be cloned. */ 592 /* Head of list must not be cloned. */
597 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 593 if (skb_unclone(head, GFP_ATOMIC))
598 goto out_nomem; 594 goto out_nomem;
599 595
600 /* If the first fragment is fragmented itself, we split 596 /* If the first fragment is fragmented itself, we split
@@ -617,7 +613,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
617 head->len -= clone->len; 613 head->len -= clone->len;
618 clone->csum = 0; 614 clone->csum = 0;
619 clone->ip_summed = head->ip_summed; 615 clone->ip_summed = head->ip_summed;
620 atomic_add(clone->truesize, &qp->q.net->mem); 616 add_frag_mem_limit(&qp->q, clone->truesize);
621 } 617 }
622 618
623 skb_push(head, head->data - skb_network_header(head)); 619 skb_push(head, head->data - skb_network_header(head));
@@ -645,7 +641,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
645 } 641 }
646 fp = next; 642 fp = next;
647 } 643 }
648 atomic_sub(sum_truesize, &qp->q.net->mem); 644 sub_frag_mem_limit(&qp->q, sum_truesize);
649 645
650 head->next = NULL; 646 head->next = NULL;
651 head->dev = dev; 647 head->dev = dev;
@@ -851,14 +847,22 @@ static inline void ip4_frags_ctl_register(void)
851 847
852static int __net_init ipv4_frags_init_net(struct net *net) 848static int __net_init ipv4_frags_init_net(struct net *net)
853{ 849{
854 /* 850 /* Fragment cache limits.
855 * Fragment cache limits. We will commit 256K at one time. Should we 851 *
856 * cross that limit we will prune down to 192K. This should cope with 852 * The fragment memory accounting code, (tries to) account for
857 * even the most extreme cases without allowing an attacker to 853 * the real memory usage, by measuring both the size of frag
858 * measurably harm machine performance. 854 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
855 * and the SKB's truesize.
856 *
857 * A 64K fragment consumes 129736 bytes (44*2944)+200
858 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
859 *
860 * We will commit 4MB at one time. Should we cross that limit
861 * we will prune down to 3MB, making room for approx 8 big 64K
862 * fragments 8x128k.
859 */ 863 */
860 net->ipv4.frags.high_thresh = 256 * 1024; 864 net->ipv4.frags.high_thresh = 4 * 1024 * 1024;
861 net->ipv4.frags.low_thresh = 192 * 1024; 865 net->ipv4.frags.low_thresh = 3 * 1024 * 1024;
862 /* 866 /*
863 * Important NOTE! Fragment queue must be destroyed before MSL expires. 867 * Important NOTE! Fragment queue must be destroyed before MSL expires.
864 * RFC791 is wrong proposing to prolongate timer each fragment arrival 868 * RFC791 is wrong proposing to prolongate timer each fragment arrival
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a85ae2f7a21c..5ef4da780ac1 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -735,10 +735,32 @@ drop:
735 return 0; 735 return 0;
736} 736}
737 737
738static struct sk_buff *handle_offloads(struct sk_buff *skb)
739{
740 int err;
741
742 if (skb_is_gso(skb)) {
743 err = skb_unclone(skb, GFP_ATOMIC);
744 if (unlikely(err))
745 goto error;
746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
747 return skb;
748 }
749 if (skb->ip_summed != CHECKSUM_PARTIAL)
750 skb->ip_summed = CHECKSUM_NONE;
751
752 return skb;
753
754error:
755 kfree_skb(skb);
756 return ERR_PTR(err);
757}
758
738static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 759static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
739{ 760{
761 struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
740 struct ip_tunnel *tunnel = netdev_priv(dev); 762 struct ip_tunnel *tunnel = netdev_priv(dev);
741 const struct iphdr *old_iph = ip_hdr(skb); 763 const struct iphdr *old_iph;
742 const struct iphdr *tiph; 764 const struct iphdr *tiph;
743 struct flowi4 fl4; 765 struct flowi4 fl4;
744 u8 tos; 766 u8 tos;
@@ -750,17 +772,32 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
750 int gre_hlen; 772 int gre_hlen;
751 __be32 dst; 773 __be32 dst;
752 int mtu; 774 int mtu;
775 u8 ttl;
776 int err;
777 int pkt_len;
753 778
754 if (skb->ip_summed == CHECKSUM_PARTIAL && 779 skb = handle_offloads(skb);
755 skb_checksum_help(skb)) 780 if (IS_ERR(skb)) {
756 goto tx_error; 781 dev->stats.tx_dropped++;
782 return NETDEV_TX_OK;
783 }
784
785 if (!skb->encapsulation) {
786 skb_reset_inner_headers(skb);
787 skb->encapsulation = 1;
788 }
789
790 old_iph = ip_hdr(skb);
757 791
758 if (dev->type == ARPHRD_ETHER) 792 if (dev->type == ARPHRD_ETHER)
759 IPCB(skb)->flags = 0; 793 IPCB(skb)->flags = 0;
760 794
761 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 795 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
762 gre_hlen = 0; 796 gre_hlen = 0;
763 tiph = (const struct iphdr *)skb->data; 797 if (skb->protocol == htons(ETH_P_IP))
798 tiph = (const struct iphdr *)skb->data;
799 else
800 tiph = &tunnel->parms.iph;
764 } else { 801 } else {
765 gre_hlen = tunnel->hlen; 802 gre_hlen = tunnel->hlen;
766 tiph = &tunnel->parms.iph; 803 tiph = &tunnel->parms.iph;
@@ -812,9 +849,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
812 goto tx_error; 849 goto tx_error;
813 } 850 }
814 851
852 ttl = tiph->ttl;
815 tos = tiph->tos; 853 tos = tiph->tos;
816 if (tos == 1) { 854 if (tos & 0x1) {
817 tos = 0; 855 tos &= ~0x1;
818 if (skb->protocol == htons(ETH_P_IP)) 856 if (skb->protocol == htons(ETH_P_IP))
819 tos = old_iph->tos; 857 tos = old_iph->tos;
820 else if (skb->protocol == htons(ETH_P_IPV6)) 858 else if (skb->protocol == htons(ETH_P_IPV6))
@@ -848,7 +886,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
848 if (skb->protocol == htons(ETH_P_IP)) { 886 if (skb->protocol == htons(ETH_P_IP)) {
849 df |= (old_iph->frag_off&htons(IP_DF)); 887 df |= (old_iph->frag_off&htons(IP_DF));
850 888
851 if ((old_iph->frag_off&htons(IP_DF)) && 889 if (!skb_is_gso(skb) &&
890 (old_iph->frag_off&htons(IP_DF)) &&
852 mtu < ntohs(old_iph->tot_len)) { 891 mtu < ntohs(old_iph->tot_len)) {
853 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 892 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
854 ip_rt_put(rt); 893 ip_rt_put(rt);
@@ -868,7 +907,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
868 } 907 }
869 } 908 }
870 909
871 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { 910 if (!skb_is_gso(skb) &&
911 mtu >= IPV6_MIN_MTU &&
912 mtu < skb->len - tunnel->hlen + gre_hlen) {
872 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 913 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
873 ip_rt_put(rt); 914 ip_rt_put(rt);
874 goto tx_error; 915 goto tx_error;
@@ -904,11 +945,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
904 dev_kfree_skb(skb); 945 dev_kfree_skb(skb);
905 skb = new_skb; 946 skb = new_skb;
906 old_iph = ip_hdr(skb); 947 old_iph = ip_hdr(skb);
948 /* Warning : tiph value might point to freed memory */
907 } 949 }
908 950
909 skb_reset_transport_header(skb);
910 skb_push(skb, gre_hlen); 951 skb_push(skb, gre_hlen);
911 skb_reset_network_header(skb); 952 skb_reset_network_header(skb);
953 skb_set_transport_header(skb, sizeof(*iph));
912 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 954 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
913 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 955 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
914 IPSKB_REROUTED); 956 IPSKB_REROUTED);
@@ -927,8 +969,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
927 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb); 969 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
928 iph->daddr = fl4.daddr; 970 iph->daddr = fl4.daddr;
929 iph->saddr = fl4.saddr; 971 iph->saddr = fl4.saddr;
972 iph->ttl = ttl;
973 iph->id = 0;
930 974
931 if ((iph->ttl = tiph->ttl) == 0) { 975 if (ttl == 0) {
932 if (skb->protocol == htons(ETH_P_IP)) 976 if (skb->protocol == htons(ETH_P_IP))
933 iph->ttl = old_iph->ttl; 977 iph->ttl = old_iph->ttl;
934#if IS_ENABLED(CONFIG_IPV6) 978#if IS_ENABLED(CONFIG_IPV6)
@@ -955,13 +999,37 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
955 *ptr = tunnel->parms.o_key; 999 *ptr = tunnel->parms.o_key;
956 ptr--; 1000 ptr--;
957 } 1001 }
958 if (tunnel->parms.o_flags&GRE_CSUM) { 1002 /* Skip GRE checksum if skb is getting offloaded. */
1003 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) &&
1004 (tunnel->parms.o_flags&GRE_CSUM)) {
1005 int offset = skb_transport_offset(skb);
1006
1007 if (skb_has_shared_frag(skb)) {
1008 err = __skb_linearize(skb);
1009 if (err)
1010 goto tx_error;
1011 }
1012
959 *ptr = 0; 1013 *ptr = 0;
960 *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr)); 1014 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
1015 skb->len - offset,
1016 0));
961 } 1017 }
962 } 1018 }
963 1019
964 iptunnel_xmit(skb, dev); 1020 nf_reset(skb);
1021
1022 pkt_len = skb->len - skb_transport_offset(skb);
1023 err = ip_local_out(skb);
1024 if (likely(net_xmit_eval(err) == 0)) {
1025 u64_stats_update_begin(&tstats->syncp);
1026 tstats->tx_bytes += pkt_len;
1027 tstats->tx_packets++;
1028 u64_stats_update_end(&tstats->syncp);
1029 } else {
1030 dev->stats.tx_errors++;
1031 dev->stats.tx_aborted_errors++;
1032 }
965 return NETDEV_TX_OK; 1033 return NETDEV_TX_OK;
966 1034
967#if IS_ENABLED(CONFIG_IPV6) 1035#if IS_ENABLED(CONFIG_IPV6)
@@ -1031,6 +1099,17 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
1031 mtu = 68; 1099 mtu = 68;
1032 1100
1033 tunnel->hlen = addend; 1101 tunnel->hlen = addend;
1102 /* TCP offload with GRE SEQ is not supported. */
1103 if (!(tunnel->parms.o_flags & GRE_SEQ)) {
1104 /* device supports enc gso offload*/
1105 if (tdev->hw_enc_features & NETIF_F_GRE_GSO) {
1106 dev->features |= NETIF_F_TSO;
1107 dev->hw_features |= NETIF_F_TSO;
1108 } else {
1109 dev->features |= NETIF_F_GSO_SOFTWARE;
1110 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1111 }
1112 }
1034 1113
1035 return mtu; 1114 return mtu;
1036} 1115}
@@ -1580,6 +1659,9 @@ static void ipgre_tap_setup(struct net_device *dev)
1580 1659
1581 dev->iflink = 0; 1660 dev->iflink = 0;
1582 dev->features |= NETIF_F_NETNS_LOCAL; 1661 dev->features |= NETIF_F_NETNS_LOCAL;
1662
1663 dev->features |= GRE_FEATURES;
1664 dev->hw_features |= GRE_FEATURES;
1583} 1665}
1584 1666
1585static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], 1667static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index f1395a6fb35f..87abd3e2bd32 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -208,13 +208,6 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
208 if (ipprot != NULL) { 208 if (ipprot != NULL) {
209 int ret; 209 int ret;
210 210
211 if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
212 net_info_ratelimited("%s: proto %d isn't netns-ready\n",
213 __func__, protocol);
214 kfree_skb(skb);
215 goto out;
216 }
217
218 if (!ipprot->no_policy) { 211 if (!ipprot->no_policy) {
219 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 212 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
220 kfree_skb(skb); 213 kfree_skb(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3e98ed2bff55..5e12dca7b3dd 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -598,6 +598,7 @@ slow_path:
598 /* for offloaded checksums cleanup checksum before fragmentation */ 598 /* for offloaded checksums cleanup checksum before fragmentation */
599 if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb)) 599 if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
600 goto fail; 600 goto fail;
601 iph = ip_hdr(skb);
601 602
602 left = skb->len - hlen; /* Space per frame */ 603 left = skb->len - hlen; /* Space per frame */
603 ptr = hlen; /* Where to start from */ 604 ptr = hlen; /* Where to start from */
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3c9d20880283..d9c4f113d709 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -590,7 +590,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
590 case IP_TTL: 590 case IP_TTL:
591 if (optlen < 1) 591 if (optlen < 1)
592 goto e_inval; 592 goto e_inval;
593 if (val != -1 && (val < 0 || val > 255)) 593 if (val != -1 && (val < 1 || val > 255))
594 goto e_inval; 594 goto e_inval;
595 inet->uc_ttl = val; 595 inet->uc_ttl = val;
596 break; 596 break;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index d3ab47e19a89..f01d1b1aff7f 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -47,9 +47,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
47 if (!x) 47 if (!x)
48 return; 48 return;
49 49
50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
51 atomic_inc(&flow_cache_genid);
52 rt_genid_bump(net);
53
51 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0); 54 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
52 else 55 } else
53 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0); 56 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
54 xfrm_state_put(x); 57 xfrm_state_put(x);
55} 58}
@@ -160,6 +163,7 @@ static const struct net_protocol ipcomp4_protocol = {
160 .handler = xfrm4_rcv, 163 .handler = xfrm4_rcv,
161 .err_handler = ipcomp4_err, 164 .err_handler = ipcomp4_err,
162 .no_policy = 1, 165 .no_policy = 1,
166 .netns_ok = 1,
163}; 167};
164 168
165static int __init ipcomp4_init(void) 169static int __init ipcomp4_init(void)
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index d763701cff1b..98cbc6877019 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -136,6 +136,8 @@ __be32 ic_myaddr = NONE; /* My IP address */
136static __be32 ic_netmask = NONE; /* Netmask for local subnet */ 136static __be32 ic_netmask = NONE; /* Netmask for local subnet */
137__be32 ic_gateway = NONE; /* Gateway IP address */ 137__be32 ic_gateway = NONE; /* Gateway IP address */
138 138
139__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
140
139__be32 ic_servaddr = NONE; /* Boot server IP address */ 141__be32 ic_servaddr = NONE; /* Boot server IP address */
140 142
141__be32 root_server_addr = NONE; /* Address of NFS server */ 143__be32 root_server_addr = NONE; /* Address of NFS server */
@@ -558,6 +560,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
558 if (ic_myaddr == NONE) 560 if (ic_myaddr == NONE)
559 ic_myaddr = tip; 561 ic_myaddr = tip;
560 ic_servaddr = sip; 562 ic_servaddr = sip;
563 ic_addrservaddr = sip;
561 ic_got_reply = IC_RARP; 564 ic_got_reply = IC_RARP;
562 565
563drop_unlock: 566drop_unlock:
@@ -1068,7 +1071,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
1068 ic_servaddr = server_id; 1071 ic_servaddr = server_id;
1069#ifdef IPCONFIG_DEBUG 1072#ifdef IPCONFIG_DEBUG
1070 printk("DHCP: Offered address %pI4 by server %pI4\n", 1073 printk("DHCP: Offered address %pI4 by server %pI4\n",
1071 &ic_myaddr, &ic_servaddr); 1074 &ic_myaddr, &b->iph.saddr);
1072#endif 1075#endif
1073 /* The DHCP indicated server address takes 1076 /* The DHCP indicated server address takes
1074 * precedence over the bootp header one if 1077 * precedence over the bootp header one if
@@ -1113,6 +1116,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
1113 ic_dev = dev; 1116 ic_dev = dev;
1114 ic_myaddr = b->your_ip; 1117 ic_myaddr = b->your_ip;
1115 ic_servaddr = b->server_ip; 1118 ic_servaddr = b->server_ip;
1119 ic_addrservaddr = b->iph.saddr;
1116 if (ic_gateway == NONE && b->relay_ip) 1120 if (ic_gateway == NONE && b->relay_ip)
1117 ic_gateway = b->relay_ip; 1121 ic_gateway = b->relay_ip;
1118 if (ic_nameservers[0] == NONE) 1122 if (ic_nameservers[0] == NONE)
@@ -1268,7 +1272,7 @@ static int __init ic_dynamic(void)
1268 printk("IP-Config: Got %s answer from %pI4, ", 1272 printk("IP-Config: Got %s answer from %pI4, ",
1269 ((ic_got_reply & IC_RARP) ? "RARP" 1273 ((ic_got_reply & IC_RARP) ? "RARP"
1270 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1274 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1271 &ic_servaddr); 1275 &ic_addrservaddr);
1272 pr_cont("my address is %pI4\n", &ic_myaddr); 1276 pr_cont("my address is %pI4\n", &ic_myaddr);
1273 1277
1274 return 0; 1278 return 0;
@@ -1390,7 +1394,7 @@ static int __init ip_auto_config(void)
1390 unsigned int i; 1394 unsigned int i;
1391 1395
1392#ifdef CONFIG_PROC_FS 1396#ifdef CONFIG_PROC_FS
1393 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); 1397 proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops);
1394#endif /* CONFIG_PROC_FS */ 1398#endif /* CONFIG_PROC_FS */
1395 1399
1396 if (!ic_enable) 1400 if (!ic_enable)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 191fc24a745a..8f024d41eefa 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -472,7 +472,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
472 __be16 df = tiph->frag_off; 472 __be16 df = tiph->frag_off;
473 struct rtable *rt; /* Route to the other host */ 473 struct rtable *rt; /* Route to the other host */
474 struct net_device *tdev; /* Device to other host */ 474 struct net_device *tdev; /* Device to other host */
475 const struct iphdr *old_iph = ip_hdr(skb); 475 const struct iphdr *old_iph;
476 struct iphdr *iph; /* Our new IP header */ 476 struct iphdr *iph; /* Our new IP header */
477 unsigned int max_headroom; /* The extra header space needed */ 477 unsigned int max_headroom; /* The extra header space needed */
478 __be32 dst = tiph->daddr; 478 __be32 dst = tiph->daddr;
@@ -486,6 +486,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
486 skb_checksum_help(skb)) 486 skb_checksum_help(skb))
487 goto tx_error; 487 goto tx_error;
488 488
489 old_iph = ip_hdr(skb);
490
489 if (tos & 1) 491 if (tos & 1)
490 tos = old_iph->tos; 492 tos = old_iph->tos;
491 493
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a9454cbd953c..5f95b3aa579e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -828,6 +828,49 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
828 return NULL; 828 return NULL;
829} 829}
830 830
831/* Look for a (*,*,oif) entry */
832static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
833 int vifi)
834{
835 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
836 struct mfc_cache *c;
837
838 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
839 if (c->mfc_origin == htonl(INADDR_ANY) &&
840 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
841 c->mfc_un.res.ttls[vifi] < 255)
842 return c;
843
844 return NULL;
845}
846
847/* Look for a (*,G) entry */
848static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
849 __be32 mcastgrp, int vifi)
850{
851 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
852 struct mfc_cache *c, *proxy;
853
854 if (mcastgrp == htonl(INADDR_ANY))
855 goto skip;
856
857 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
858 if (c->mfc_origin == htonl(INADDR_ANY) &&
859 c->mfc_mcastgrp == mcastgrp) {
860 if (c->mfc_un.res.ttls[vifi] < 255)
861 return c;
862
863 /* It's ok if the vifi is part of the static tree */
864 proxy = ipmr_cache_find_any_parent(mrt,
865 c->mfc_parent);
866 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
867 return c;
868 }
869
870skip:
871 return ipmr_cache_find_any_parent(mrt, vifi);
872}
873
831/* 874/*
832 * Allocate a multicast cache entry 875 * Allocate a multicast cache entry
833 */ 876 */
@@ -1053,7 +1096,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1053 * MFC cache manipulation by user space mroute daemon 1096 * MFC cache manipulation by user space mroute daemon
1054 */ 1097 */
1055 1098
1056static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) 1099static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1057{ 1100{
1058 int line; 1101 int line;
1059 struct mfc_cache *c, *next; 1102 struct mfc_cache *c, *next;
@@ -1062,7 +1105,8 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1062 1105
1063 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { 1106 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1064 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1107 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1065 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1108 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1109 (parent == -1 || parent == c->mfc_parent)) {
1066 list_del_rcu(&c->list); 1110 list_del_rcu(&c->list);
1067 mroute_netlink_event(mrt, c, RTM_DELROUTE); 1111 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1068 ipmr_cache_free(c); 1112 ipmr_cache_free(c);
@@ -1073,7 +1117,7 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1073} 1117}
1074 1118
1075static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, 1119static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1076 struct mfcctl *mfc, int mrtsock) 1120 struct mfcctl *mfc, int mrtsock, int parent)
1077{ 1121{
1078 bool found = false; 1122 bool found = false;
1079 int line; 1123 int line;
@@ -1086,7 +1130,8 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1086 1130
1087 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { 1131 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1088 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1132 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1089 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1133 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1134 (parent == -1 || parent == c->mfc_parent)) {
1090 found = true; 1135 found = true;
1091 break; 1136 break;
1092 } 1137 }
@@ -1103,7 +1148,8 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1103 return 0; 1148 return 0;
1104 } 1149 }
1105 1150
1106 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 1151 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1152 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1107 return -EINVAL; 1153 return -EINVAL;
1108 1154
1109 c = ipmr_cache_alloc(); 1155 c = ipmr_cache_alloc();
@@ -1218,7 +1264,7 @@ static void mrtsock_destruct(struct sock *sk)
1218 1264
1219int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) 1265int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1220{ 1266{
1221 int ret; 1267 int ret, parent = 0;
1222 struct vifctl vif; 1268 struct vifctl vif;
1223 struct mfcctl mfc; 1269 struct mfcctl mfc;
1224 struct net *net = sock_net(sk); 1270 struct net *net = sock_net(sk);
@@ -1287,16 +1333,22 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1287 */ 1333 */
1288 case MRT_ADD_MFC: 1334 case MRT_ADD_MFC:
1289 case MRT_DEL_MFC: 1335 case MRT_DEL_MFC:
1336 parent = -1;
1337 case MRT_ADD_MFC_PROXY:
1338 case MRT_DEL_MFC_PROXY:
1290 if (optlen != sizeof(mfc)) 1339 if (optlen != sizeof(mfc))
1291 return -EINVAL; 1340 return -EINVAL;
1292 if (copy_from_user(&mfc, optval, sizeof(mfc))) 1341 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1293 return -EFAULT; 1342 return -EFAULT;
1343 if (parent == 0)
1344 parent = mfc.mfcc_parent;
1294 rtnl_lock(); 1345 rtnl_lock();
1295 if (optname == MRT_DEL_MFC) 1346 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1296 ret = ipmr_mfc_delete(mrt, &mfc); 1347 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1297 else 1348 else
1298 ret = ipmr_mfc_add(net, mrt, &mfc, 1349 ret = ipmr_mfc_add(net, mrt, &mfc,
1299 sk == rtnl_dereference(mrt->mroute_sk)); 1350 sk == rtnl_dereference(mrt->mroute_sk),
1351 parent);
1300 rtnl_unlock(); 1352 rtnl_unlock();
1301 return ret; 1353 return ret;
1302 /* 1354 /*
@@ -1749,17 +1801,28 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1749{ 1801{
1750 int psend = -1; 1802 int psend = -1;
1751 int vif, ct; 1803 int vif, ct;
1804 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1752 1805
1753 vif = cache->mfc_parent; 1806 vif = cache->mfc_parent;
1754 cache->mfc_un.res.pkt++; 1807 cache->mfc_un.res.pkt++;
1755 cache->mfc_un.res.bytes += skb->len; 1808 cache->mfc_un.res.bytes += skb->len;
1756 1809
1810 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1811 struct mfc_cache *cache_proxy;
1812
1813 /* For an (*,G) entry, we only check that the incomming
1814 * interface is part of the static tree.
1815 */
1816 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1817 if (cache_proxy &&
1818 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1819 goto forward;
1820 }
1821
1757 /* 1822 /*
1758 * Wrong interface: drop packet and (maybe) send PIM assert. 1823 * Wrong interface: drop packet and (maybe) send PIM assert.
1759 */ 1824 */
1760 if (mrt->vif_table[vif].dev != skb->dev) { 1825 if (mrt->vif_table[vif].dev != skb->dev) {
1761 int true_vifi;
1762
1763 if (rt_is_output_route(skb_rtable(skb))) { 1826 if (rt_is_output_route(skb_rtable(skb))) {
1764 /* It is our own packet, looped back. 1827 /* It is our own packet, looped back.
1765 * Very complicated situation... 1828 * Very complicated situation...
@@ -1776,7 +1839,6 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1776 } 1839 }
1777 1840
1778 cache->mfc_un.res.wrong_if++; 1841 cache->mfc_un.res.wrong_if++;
1779 true_vifi = ipmr_find_vif(mrt, skb->dev);
1780 1842
1781 if (true_vifi >= 0 && mrt->mroute_do_assert && 1843 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1782 /* pimsm uses asserts, when switching from RPT to SPT, 1844 /* pimsm uses asserts, when switching from RPT to SPT,
@@ -1794,15 +1856,34 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1794 goto dont_forward; 1856 goto dont_forward;
1795 } 1857 }
1796 1858
1859forward:
1797 mrt->vif_table[vif].pkt_in++; 1860 mrt->vif_table[vif].pkt_in++;
1798 mrt->vif_table[vif].bytes_in += skb->len; 1861 mrt->vif_table[vif].bytes_in += skb->len;
1799 1862
1800 /* 1863 /*
1801 * Forward the frame 1864 * Forward the frame
1802 */ 1865 */
1866 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1867 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1868 if (true_vifi >= 0 &&
1869 true_vifi != cache->mfc_parent &&
1870 ip_hdr(skb)->ttl >
1871 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1872 /* It's an (*,*) entry and the packet is not coming from
1873 * the upstream: forward the packet to the upstream
1874 * only.
1875 */
1876 psend = cache->mfc_parent;
1877 goto last_forward;
1878 }
1879 goto dont_forward;
1880 }
1803 for (ct = cache->mfc_un.res.maxvif - 1; 1881 for (ct = cache->mfc_un.res.maxvif - 1;
1804 ct >= cache->mfc_un.res.minvif; ct--) { 1882 ct >= cache->mfc_un.res.minvif; ct--) {
1805 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { 1883 /* For (*,G) entry, don't forward to the incoming interface */
1884 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1885 ct != true_vifi) &&
1886 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1806 if (psend != -1) { 1887 if (psend != -1) {
1807 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1888 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1808 1889
@@ -1813,6 +1894,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1813 psend = ct; 1894 psend = ct;
1814 } 1895 }
1815 } 1896 }
1897last_forward:
1816 if (psend != -1) { 1898 if (psend != -1) {
1817 if (local) { 1899 if (local) {
1818 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1900 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -1902,6 +1984,13 @@ int ip_mr_input(struct sk_buff *skb)
1902 1984
1903 /* already under rcu_read_lock() */ 1985 /* already under rcu_read_lock() */
1904 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1986 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1987 if (cache == NULL) {
1988 int vif = ipmr_find_vif(mrt, skb->dev);
1989
1990 if (vif >= 0)
1991 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1992 vif);
1993 }
1905 1994
1906 /* 1995 /*
1907 * No usable cache entry 1996 * No usable cache entry
@@ -2107,7 +2196,12 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2107 2196
2108 rcu_read_lock(); 2197 rcu_read_lock();
2109 cache = ipmr_cache_find(mrt, saddr, daddr); 2198 cache = ipmr_cache_find(mrt, saddr, daddr);
2199 if (cache == NULL && skb->dev) {
2200 int vif = ipmr_find_vif(mrt, skb->dev);
2110 2201
2202 if (vif >= 0)
2203 cache = ipmr_cache_find_any(mrt, daddr, vif);
2204 }
2111 if (cache == NULL) { 2205 if (cache == NULL) {
2112 struct sk_buff *skb2; 2206 struct sk_buff *skb2;
2113 struct iphdr *iph; 2207 struct iphdr *iph;
@@ -2609,16 +2703,16 @@ static int __net_init ipmr_net_init(struct net *net)
2609 2703
2610#ifdef CONFIG_PROC_FS 2704#ifdef CONFIG_PROC_FS
2611 err = -ENOMEM; 2705 err = -ENOMEM;
2612 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) 2706 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2613 goto proc_vif_fail; 2707 goto proc_vif_fail;
2614 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) 2708 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2615 goto proc_cache_fail; 2709 goto proc_cache_fail;
2616#endif 2710#endif
2617 return 0; 2711 return 0;
2618 2712
2619#ifdef CONFIG_PROC_FS 2713#ifdef CONFIG_PROC_FS
2620proc_cache_fail: 2714proc_cache_fail:
2621 proc_net_remove(net, "ip_mr_vif"); 2715 remove_proc_entry("ip_mr_vif", net->proc_net);
2622proc_vif_fail: 2716proc_vif_fail:
2623 ipmr_rules_exit(net); 2717 ipmr_rules_exit(net);
2624#endif 2718#endif
@@ -2629,8 +2723,8 @@ fail:
2629static void __net_exit ipmr_net_exit(struct net *net) 2723static void __net_exit ipmr_net_exit(struct net *net)
2630{ 2724{
2631#ifdef CONFIG_PROC_FS 2725#ifdef CONFIG_PROC_FS
2632 proc_net_remove(net, "ip_mr_cache"); 2726 remove_proc_entry("ip_mr_cache", net->proc_net);
2633 proc_net_remove(net, "ip_mr_vif"); 2727 remove_proc_entry("ip_mr_vif", net->proc_net);
2634#endif 2728#endif
2635 ipmr_rules_exit(net); 2729 ipmr_rules_exit(net);
2636} 2730}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index d8d6f2a5bf12..ce2d43e1f09f 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -241,8 +241,8 @@ config IP_NF_MANGLE
241 To compile it as a module, choose M here. If unsure, say N. 241 To compile it as a module, choose M here. If unsure, say N.
242 242
243config IP_NF_TARGET_CLUSTERIP 243config IP_NF_TARGET_CLUSTERIP
244 tristate "CLUSTERIP target support (EXPERIMENTAL)" 244 tristate "CLUSTERIP target support"
245 depends on IP_NF_MANGLE && EXPERIMENTAL 245 depends on IP_NF_MANGLE
246 depends on NF_CONNTRACK_IPV4 246 depends on NF_CONNTRACK_IPV4
247 depends on NETFILTER_ADVANCED 247 depends on NETFILTER_ADVANCED
248 select NF_CONNTRACK_MARK 248 select NF_CONNTRACK_MARK
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3ea4127404d6..7dc6a9743592 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -901,7 +901,7 @@ static int get_info(struct net *net, void __user *user,
901#endif 901#endif
902 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), 902 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
903 "arptable_%s", name); 903 "arptable_%s", name);
904 if (t && !IS_ERR(t)) { 904 if (!IS_ERR_OR_NULL(t)) {
905 struct arpt_getinfo info; 905 struct arpt_getinfo info;
906 const struct xt_table_info *private = t->private; 906 const struct xt_table_info *private = t->private;
907#ifdef CONFIG_COMPAT 907#ifdef CONFIG_COMPAT
@@ -958,7 +958,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
958 } 958 }
959 959
960 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 960 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
961 if (t && !IS_ERR(t)) { 961 if (!IS_ERR_OR_NULL(t)) {
962 const struct xt_table_info *private = t->private; 962 const struct xt_table_info *private = t->private;
963 963
964 duprintf("t->private->number = %u\n", 964 duprintf("t->private->number = %u\n",
@@ -1001,7 +1001,7 @@ static int __do_replace(struct net *net, const char *name,
1001 1001
1002 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), 1002 t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
1003 "arptable_%s", name); 1003 "arptable_%s", name);
1004 if (!t || IS_ERR(t)) { 1004 if (IS_ERR_OR_NULL(t)) {
1005 ret = t ? PTR_ERR(t) : -ENOENT; 1005 ret = t ? PTR_ERR(t) : -ENOENT;
1006 goto free_newinfo_counters_untrans; 1006 goto free_newinfo_counters_untrans;
1007 } 1007 }
@@ -1158,7 +1158,7 @@ static int do_add_counters(struct net *net, const void __user *user,
1158 } 1158 }
1159 1159
1160 t = xt_find_table_lock(net, NFPROTO_ARP, name); 1160 t = xt_find_table_lock(net, NFPROTO_ARP, name);
1161 if (!t || IS_ERR(t)) { 1161 if (IS_ERR_OR_NULL(t)) {
1162 ret = t ? PTR_ERR(t) : -ENOENT; 1162 ret = t ? PTR_ERR(t) : -ENOENT;
1163 goto free; 1163 goto free;
1164 } 1164 }
@@ -1646,7 +1646,7 @@ static int compat_get_entries(struct net *net,
1646 1646
1647 xt_compat_lock(NFPROTO_ARP); 1647 xt_compat_lock(NFPROTO_ARP);
1648 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 1648 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
1649 if (t && !IS_ERR(t)) { 1649 if (!IS_ERR_OR_NULL(t)) {
1650 const struct xt_table_info *private = t->private; 1650 const struct xt_table_info *private = t->private;
1651 struct xt_table_info info; 1651 struct xt_table_info info;
1652 1652
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 17c5e06da662..3efcf87400c3 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1090,7 +1090,7 @@ static int get_info(struct net *net, void __user *user,
1090#endif 1090#endif
1091 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), 1091 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1092 "iptable_%s", name); 1092 "iptable_%s", name);
1093 if (t && !IS_ERR(t)) { 1093 if (!IS_ERR_OR_NULL(t)) {
1094 struct ipt_getinfo info; 1094 struct ipt_getinfo info;
1095 const struct xt_table_info *private = t->private; 1095 const struct xt_table_info *private = t->private;
1096#ifdef CONFIG_COMPAT 1096#ifdef CONFIG_COMPAT
@@ -1149,7 +1149,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1149 } 1149 }
1150 1150
1151 t = xt_find_table_lock(net, AF_INET, get.name); 1151 t = xt_find_table_lock(net, AF_INET, get.name);
1152 if (t && !IS_ERR(t)) { 1152 if (!IS_ERR_OR_NULL(t)) {
1153 const struct xt_table_info *private = t->private; 1153 const struct xt_table_info *private = t->private;
1154 duprintf("t->private->number = %u\n", private->number); 1154 duprintf("t->private->number = %u\n", private->number);
1155 if (get.size == private->size) 1155 if (get.size == private->size)
@@ -1189,7 +1189,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1189 1189
1190 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), 1190 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1191 "iptable_%s", name); 1191 "iptable_%s", name);
1192 if (!t || IS_ERR(t)) { 1192 if (IS_ERR_OR_NULL(t)) {
1193 ret = t ? PTR_ERR(t) : -ENOENT; 1193 ret = t ? PTR_ERR(t) : -ENOENT;
1194 goto free_newinfo_counters_untrans; 1194 goto free_newinfo_counters_untrans;
1195 } 1195 }
@@ -1347,7 +1347,7 @@ do_add_counters(struct net *net, const void __user *user,
1347 } 1347 }
1348 1348
1349 t = xt_find_table_lock(net, AF_INET, name); 1349 t = xt_find_table_lock(net, AF_INET, name);
1350 if (!t || IS_ERR(t)) { 1350 if (IS_ERR_OR_NULL(t)) {
1351 ret = t ? PTR_ERR(t) : -ENOENT; 1351 ret = t ? PTR_ERR(t) : -ENOENT;
1352 goto free; 1352 goto free;
1353 } 1353 }
@@ -1931,7 +1931,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1931 1931
1932 xt_compat_lock(AF_INET); 1932 xt_compat_lock(AF_INET);
1933 t = xt_find_table_lock(net, AF_INET, get.name); 1933 t = xt_find_table_lock(net, AF_INET, get.name);
1934 if (t && !IS_ERR(t)) { 1934 if (!IS_ERR_OR_NULL(t)) {
1935 const struct xt_table_info *private = t->private; 1935 const struct xt_table_info *private = t->private;
1936 struct xt_table_info info; 1936 struct xt_table_info info;
1937 duprintf("t->private->number = %u\n", private->number); 1937 duprintf("t->private->number = %u\n", private->number);
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 51f13f8ec724..04b18c1ac345 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -81,6 +81,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
81 niph->saddr = oiph->daddr; 81 niph->saddr = oiph->daddr;
82 niph->daddr = oiph->saddr; 82 niph->daddr = oiph->saddr;
83 83
84 skb_reset_transport_header(nskb);
84 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 85 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
85 memset(tcph, 0, sizeof(*tcph)); 86 memset(tcph, 0, sizeof(*tcph));
86 tcph->source = oth->dest; 87 tcph->source = oth->dest;
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index b5ef3cba2250..7d168dcbd135 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -88,10 +88,8 @@ static void ulog_send(unsigned int nlgroupnum)
88{ 88{
89 ulog_buff_t *ub = &ulog_buffers[nlgroupnum]; 89 ulog_buff_t *ub = &ulog_buffers[nlgroupnum];
90 90
91 if (timer_pending(&ub->timer)) { 91 pr_debug("ulog_send: timer is deleting\n");
92 pr_debug("ulog_send: timer was pending, deleting\n"); 92 del_timer(&ub->timer);
93 del_timer(&ub->timer);
94 }
95 93
96 if (!ub->skb) { 94 if (!ub->skb) {
97 pr_debug("ulog_send: nothing to send\n"); 95 pr_debug("ulog_send: nothing to send\n");
@@ -426,10 +424,8 @@ static void __exit ulog_tg_exit(void)
426 /* remove pending timers and free allocated skb's */ 424 /* remove pending timers and free allocated skb's */
427 for (i = 0; i < ULOG_MAXNLGROUPS; i++) { 425 for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
428 ub = &ulog_buffers[i]; 426 ub = &ulog_buffers[i];
429 if (timer_pending(&ub->timer)) { 427 pr_debug("timer is deleting\n");
430 pr_debug("timer was pending, deleting\n"); 428 del_timer(&ub->timer);
431 del_timer(&ub->timer);
432 }
433 429
434 if (ub->skb) { 430 if (ub->skb) {
435 kfree_skb(ub->skb); 431 kfree_skb(ub->skb);
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index da2c8a368f68..eeaff7e4acb5 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -124,23 +124,28 @@ nf_nat_ipv4_fn(unsigned int hooknum,
124 ret = nf_nat_rule_find(skb, hooknum, in, out, ct); 124 ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
125 if (ret != NF_ACCEPT) 125 if (ret != NF_ACCEPT)
126 return ret; 126 return ret;
127 } else 127 } else {
128 pr_debug("Already setup manip %s for ct %p\n", 128 pr_debug("Already setup manip %s for ct %p\n",
129 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", 129 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
130 ct); 130 ct);
131 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
132 goto oif_changed;
133 }
131 break; 134 break;
132 135
133 default: 136 default:
134 /* ESTABLISHED */ 137 /* ESTABLISHED */
135 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 138 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
136 ctinfo == IP_CT_ESTABLISHED_REPLY); 139 ctinfo == IP_CT_ESTABLISHED_REPLY);
137 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out)) { 140 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
138 nf_ct_kill_acct(ct, ctinfo, skb); 141 goto oif_changed;
139 return NF_DROP;
140 }
141 } 142 }
142 143
143 return nf_nat_packet(ct, ctinfo, hooknum, skb); 144 return nf_nat_packet(ct, ctinfo, hooknum, skb);
145
146oif_changed:
147 nf_ct_kill_acct(ct, ctinfo, skb);
148 return NF_DROP;
144} 149}
145 150
146static unsigned int 151static unsigned int
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index fcdd0c2406e6..2820aa18b542 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -100,7 +100,6 @@ static unsigned int ipv4_helper(unsigned int hooknum,
100 enum ip_conntrack_info ctinfo; 100 enum ip_conntrack_info ctinfo;
101 const struct nf_conn_help *help; 101 const struct nf_conn_help *help;
102 const struct nf_conntrack_helper *helper; 102 const struct nf_conntrack_helper *helper;
103 unsigned int ret;
104 103
105 /* This is where we call the helper: as the packet goes out. */ 104 /* This is where we call the helper: as the packet goes out. */
106 ct = nf_ct_get(skb, &ctinfo); 105 ct = nf_ct_get(skb, &ctinfo);
@@ -116,13 +115,8 @@ static unsigned int ipv4_helper(unsigned int hooknum,
116 if (!helper) 115 if (!helper)
117 return NF_ACCEPT; 116 return NF_ACCEPT;
118 117
119 ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), 118 return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
120 ct, ctinfo); 119 ct, ctinfo);
121 if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
122 nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL,
123 "nf_ct_%s: dropping packet", helper->name);
124 }
125 return ret;
126} 120}
127 121
128static unsigned int ipv4_confirm(unsigned int hooknum, 122static unsigned int ipv4_confirm(unsigned int hooknum,
@@ -420,54 +414,43 @@ static int ipv4_net_init(struct net *net)
420{ 414{
421 int ret = 0; 415 int ret = 0;
422 416
423 ret = nf_conntrack_l4proto_register(net, 417 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_tcp4);
424 &nf_conntrack_l4proto_tcp4);
425 if (ret < 0) { 418 if (ret < 0) {
426 pr_err("nf_conntrack_l4proto_tcp4 :protocol register failed\n"); 419 pr_err("nf_conntrack_tcp4: pernet registration failed\n");
427 goto out_tcp; 420 goto out_tcp;
428 } 421 }
429 ret = nf_conntrack_l4proto_register(net, 422 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udp4);
430 &nf_conntrack_l4proto_udp4);
431 if (ret < 0) { 423 if (ret < 0) {
432 pr_err("nf_conntrack_l4proto_udp4 :protocol register failed\n"); 424 pr_err("nf_conntrack_udp4: pernet registration failed\n");
433 goto out_udp; 425 goto out_udp;
434 } 426 }
435 ret = nf_conntrack_l4proto_register(net, 427 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_icmp);
436 &nf_conntrack_l4proto_icmp);
437 if (ret < 0) { 428 if (ret < 0) {
438 pr_err("nf_conntrack_l4proto_icmp4 :protocol register failed\n"); 429 pr_err("nf_conntrack_icmp4: pernet registration failed\n");
439 goto out_icmp; 430 goto out_icmp;
440 } 431 }
441 ret = nf_conntrack_l3proto_register(net, 432 ret = nf_ct_l3proto_pernet_register(net, &nf_conntrack_l3proto_ipv4);
442 &nf_conntrack_l3proto_ipv4);
443 if (ret < 0) { 433 if (ret < 0) {
444 pr_err("nf_conntrack_l3proto_ipv4 :protocol register failed\n"); 434 pr_err("nf_conntrack_ipv4: pernet registration failed\n");
445 goto out_ipv4; 435 goto out_ipv4;
446 } 436 }
447 return 0; 437 return 0;
448out_ipv4: 438out_ipv4:
449 nf_conntrack_l4proto_unregister(net, 439 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_icmp);
450 &nf_conntrack_l4proto_icmp);
451out_icmp: 440out_icmp:
452 nf_conntrack_l4proto_unregister(net, 441 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udp4);
453 &nf_conntrack_l4proto_udp4);
454out_udp: 442out_udp:
455 nf_conntrack_l4proto_unregister(net, 443 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_tcp4);
456 &nf_conntrack_l4proto_tcp4);
457out_tcp: 444out_tcp:
458 return ret; 445 return ret;
459} 446}
460 447
461static void ipv4_net_exit(struct net *net) 448static void ipv4_net_exit(struct net *net)
462{ 449{
463 nf_conntrack_l3proto_unregister(net, 450 nf_ct_l3proto_pernet_unregister(net, &nf_conntrack_l3proto_ipv4);
464 &nf_conntrack_l3proto_ipv4); 451 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_icmp);
465 nf_conntrack_l4proto_unregister(net, 452 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udp4);
466 &nf_conntrack_l4proto_icmp); 453 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_tcp4);
467 nf_conntrack_l4proto_unregister(net,
468 &nf_conntrack_l4proto_udp4);
469 nf_conntrack_l4proto_unregister(net,
470 &nf_conntrack_l4proto_tcp4);
471} 454}
472 455
473static struct pernet_operations ipv4_net_ops = { 456static struct pernet_operations ipv4_net_ops = {
@@ -500,16 +483,49 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
500 pr_err("nf_conntrack_ipv4: can't register hooks.\n"); 483 pr_err("nf_conntrack_ipv4: can't register hooks.\n");
501 goto cleanup_pernet; 484 goto cleanup_pernet;
502 } 485 }
486
487 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_tcp4);
488 if (ret < 0) {
489 pr_err("nf_conntrack_ipv4: can't register tcp4 proto.\n");
490 goto cleanup_hooks;
491 }
492
493 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udp4);
494 if (ret < 0) {
495 pr_err("nf_conntrack_ipv4: can't register udp4 proto.\n");
496 goto cleanup_tcp4;
497 }
498
499 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_icmp);
500 if (ret < 0) {
501 pr_err("nf_conntrack_ipv4: can't register icmpv4 proto.\n");
502 goto cleanup_udp4;
503 }
504
505 ret = nf_ct_l3proto_register(&nf_conntrack_l3proto_ipv4);
506 if (ret < 0) {
507 pr_err("nf_conntrack_ipv4: can't register ipv4 proto.\n");
508 goto cleanup_icmpv4;
509 }
510
503#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 511#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
504 ret = nf_conntrack_ipv4_compat_init(); 512 ret = nf_conntrack_ipv4_compat_init();
505 if (ret < 0) 513 if (ret < 0)
506 goto cleanup_hooks; 514 goto cleanup_proto;
507#endif 515#endif
508 return ret; 516 return ret;
509#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 517#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
518 cleanup_proto:
519 nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
520#endif
521 cleanup_icmpv4:
522 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_icmp);
523 cleanup_udp4:
524 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udp4);
525 cleanup_tcp4:
526 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
510 cleanup_hooks: 527 cleanup_hooks:
511 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); 528 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
512#endif
513 cleanup_pernet: 529 cleanup_pernet:
514 unregister_pernet_subsys(&ipv4_net_ops); 530 unregister_pernet_subsys(&ipv4_net_ops);
515 cleanup_sockopt: 531 cleanup_sockopt:
@@ -523,6 +539,10 @@ static void __exit nf_conntrack_l3proto_ipv4_fini(void)
523#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 539#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
524 nf_conntrack_ipv4_compat_fini(); 540 nf_conntrack_ipv4_compat_fini();
525#endif 541#endif
542 nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
543 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_icmp);
544 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udp4);
545 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
526 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); 546 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
527 unregister_pernet_subsys(&ipv4_net_ops); 547 unregister_pernet_subsys(&ipv4_net_ops);
528 nf_unregister_sockopt(&so_getorigdst); 548 nf_unregister_sockopt(&so_getorigdst);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 9682b36df38c..f2ca12794081 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -417,12 +417,12 @@ static int __net_init ip_conntrack_net_init(struct net *net)
417{ 417{
418 struct proc_dir_entry *proc, *proc_exp, *proc_stat; 418 struct proc_dir_entry *proc, *proc_exp, *proc_stat;
419 419
420 proc = proc_net_fops_create(net, "ip_conntrack", 0440, &ct_file_ops); 420 proc = proc_create("ip_conntrack", 0440, net->proc_net, &ct_file_ops);
421 if (!proc) 421 if (!proc)
422 goto err1; 422 goto err1;
423 423
424 proc_exp = proc_net_fops_create(net, "ip_conntrack_expect", 0440, 424 proc_exp = proc_create("ip_conntrack_expect", 0440, net->proc_net,
425 &ip_exp_file_ops); 425 &ip_exp_file_ops);
426 if (!proc_exp) 426 if (!proc_exp)
427 goto err2; 427 goto err2;
428 428
@@ -433,9 +433,9 @@ static int __net_init ip_conntrack_net_init(struct net *net)
433 return 0; 433 return 0;
434 434
435err3: 435err3:
436 proc_net_remove(net, "ip_conntrack_expect"); 436 remove_proc_entry("ip_conntrack_expect", net->proc_net);
437err2: 437err2:
438 proc_net_remove(net, "ip_conntrack"); 438 remove_proc_entry("ip_conntrack", net->proc_net);
439err1: 439err1:
440 return -ENOMEM; 440 return -ENOMEM;
441} 441}
@@ -443,8 +443,8 @@ err1:
443static void __net_exit ip_conntrack_net_exit(struct net *net) 443static void __net_exit ip_conntrack_net_exit(struct net *net)
444{ 444{
445 remove_proc_entry("ip_conntrack", net->proc_net_stat); 445 remove_proc_entry("ip_conntrack", net->proc_net_stat);
446 proc_net_remove(net, "ip_conntrack_expect"); 446 remove_proc_entry("ip_conntrack_expect", net->proc_net);
447 proc_net_remove(net, "ip_conntrack"); 447 remove_proc_entry("ip_conntrack", net->proc_net);
448} 448}
449 449
450static struct pernet_operations ip_conntrack_net_ops = { 450static struct pernet_operations ip_conntrack_net_ops = {
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8f3d05424a3e..55c4ee1bba06 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -738,6 +738,7 @@ struct proto ping_prot = {
738 .recvmsg = ping_recvmsg, 738 .recvmsg = ping_recvmsg,
739 .bind = ping_bind, 739 .bind = ping_bind,
740 .backlog_rcv = ping_queue_rcv_skb, 740 .backlog_rcv = ping_queue_rcv_skb,
741 .release_cb = ip4_datagram_release_cb,
741 .hash = ping_v4_hash, 742 .hash = ping_v4_hash,
742 .unhash = ping_v4_unhash, 743 .unhash = ping_v4_unhash,
743 .get_port = ping_v4_get_port, 744 .get_port = ping_v4_get_port,
@@ -888,7 +889,7 @@ static int ping_proc_register(struct net *net)
888 struct proc_dir_entry *p; 889 struct proc_dir_entry *p;
889 int rc = 0; 890 int rc = 0;
890 891
891 p = proc_net_fops_create(net, "icmp", S_IRUGO, &ping_seq_fops); 892 p = proc_create("icmp", S_IRUGO, net->proc_net, &ping_seq_fops);
892 if (!p) 893 if (!p)
893 rc = -ENOMEM; 894 rc = -ENOMEM;
894 return rc; 895 return rc;
@@ -896,7 +897,7 @@ static int ping_proc_register(struct net *net)
896 897
897static void ping_proc_unregister(struct net *net) 898static void ping_proc_unregister(struct net *net)
898{ 899{
899 proc_net_remove(net, "icmp"); 900 remove_proc_entry("icmp", net->proc_net);
900} 901}
901 902
902 903
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8de53e1ddd54..32030a24e776 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -471,28 +471,29 @@ static const struct file_operations netstat_seq_fops = {
471 471
472static __net_init int ip_proc_init_net(struct net *net) 472static __net_init int ip_proc_init_net(struct net *net)
473{ 473{
474 if (!proc_net_fops_create(net, "sockstat", S_IRUGO, &sockstat_seq_fops)) 474 if (!proc_create("sockstat", S_IRUGO, net->proc_net,
475 &sockstat_seq_fops))
475 goto out_sockstat; 476 goto out_sockstat;
476 if (!proc_net_fops_create(net, "netstat", S_IRUGO, &netstat_seq_fops)) 477 if (!proc_create("netstat", S_IRUGO, net->proc_net, &netstat_seq_fops))
477 goto out_netstat; 478 goto out_netstat;
478 if (!proc_net_fops_create(net, "snmp", S_IRUGO, &snmp_seq_fops)) 479 if (!proc_create("snmp", S_IRUGO, net->proc_net, &snmp_seq_fops))
479 goto out_snmp; 480 goto out_snmp;
480 481
481 return 0; 482 return 0;
482 483
483out_snmp: 484out_snmp:
484 proc_net_remove(net, "netstat"); 485 remove_proc_entry("netstat", net->proc_net);
485out_netstat: 486out_netstat:
486 proc_net_remove(net, "sockstat"); 487 remove_proc_entry("sockstat", net->proc_net);
487out_sockstat: 488out_sockstat:
488 return -ENOMEM; 489 return -ENOMEM;
489} 490}
490 491
491static __net_exit void ip_proc_exit_net(struct net *net) 492static __net_exit void ip_proc_exit_net(struct net *net)
492{ 493{
493 proc_net_remove(net, "snmp"); 494 remove_proc_entry("snmp", net->proc_net);
494 proc_net_remove(net, "netstat"); 495 remove_proc_entry("netstat", net->proc_net);
495 proc_net_remove(net, "sockstat"); 496 remove_proc_entry("sockstat", net->proc_net);
496} 497}
497 498
498static __net_initdata struct pernet_operations ip_proc_ops = { 499static __net_initdata struct pernet_operations ip_proc_ops = {
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 0f9d09f54bd9..ce848461acbb 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -37,6 +37,12 @@ const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
37 37
38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
39{ 39{
40 if (!prot->netns_ok) {
41 pr_err("Protocol %u is not namespace aware, cannot register.\n",
42 protocol);
43 return -EINVAL;
44 }
45
40 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol], 46 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol],
41 NULL, prot) ? 0 : -1; 47 NULL, prot) ? 0 : -1;
42} 48}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 73d1e4df4bf6..53ddebc292b6 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -894,6 +894,7 @@ struct proto raw_prot = {
894 .recvmsg = raw_recvmsg, 894 .recvmsg = raw_recvmsg,
895 .bind = raw_bind, 895 .bind = raw_bind,
896 .backlog_rcv = raw_rcv_skb, 896 .backlog_rcv = raw_rcv_skb,
897 .release_cb = ip4_datagram_release_cb,
897 .hash = raw_hash_sk, 898 .hash = raw_hash_sk,
898 .unhash = raw_unhash_sk, 899 .unhash = raw_unhash_sk,
899 .obj_size = sizeof(struct raw_sock), 900 .obj_size = sizeof(struct raw_sock),
@@ -1049,7 +1050,7 @@ static const struct file_operations raw_seq_fops = {
1049 1050
1050static __net_init int raw_init_net(struct net *net) 1051static __net_init int raw_init_net(struct net *net)
1051{ 1052{
1052 if (!proc_net_fops_create(net, "raw", S_IRUGO, &raw_seq_fops)) 1053 if (!proc_create("raw", S_IRUGO, net->proc_net, &raw_seq_fops))
1053 return -ENOMEM; 1054 return -ENOMEM;
1054 1055
1055 return 0; 1056 return 0;
@@ -1057,7 +1058,7 @@ static __net_init int raw_init_net(struct net *net)
1057 1058
1058static __net_exit void raw_exit_net(struct net *net) 1059static __net_exit void raw_exit_net(struct net *net)
1059{ 1060{
1060 proc_net_remove(net, "raw"); 1061 remove_proc_entry("raw", net->proc_net);
1061} 1062}
1062 1063
1063static __net_initdata struct pernet_operations raw_net_ops = { 1064static __net_initdata struct pernet_operations raw_net_ops = {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 844a9ef60dbd..6e2851464f8f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -117,15 +117,11 @@
117#define RT_GC_TIMEOUT (300*HZ) 117#define RT_GC_TIMEOUT (300*HZ)
118 118
119static int ip_rt_max_size; 119static int ip_rt_max_size;
120static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123static int ip_rt_redirect_number __read_mostly = 9; 120static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50; 121static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); 122static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ; 123static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ; 124static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 125static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 126static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256; 127static int ip_rt_min_advmss __read_mostly = 256;
@@ -384,8 +380,8 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
384{ 380{
385 struct proc_dir_entry *pde; 381 struct proc_dir_entry *pde;
386 382
387 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO, 383 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
388 &rt_cache_seq_fops); 384 &rt_cache_seq_fops);
389 if (!pde) 385 if (!pde)
390 goto err1; 386 goto err1;
391 387
@@ -912,6 +908,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
912 struct dst_entry *dst = &rt->dst; 908 struct dst_entry *dst = &rt->dst;
913 struct fib_result res; 909 struct fib_result res;
914 910
911 if (dst_metric_locked(dst, RTAX_MTU))
912 return;
913
915 if (dst->dev->mtu < mtu) 914 if (dst->dev->mtu < mtu)
916 return; 915 return;
917 916
@@ -962,7 +961,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
962} 961}
963EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 962EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
964 963
965void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 964static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
966{ 965{
967 const struct iphdr *iph = (const struct iphdr *) skb->data; 966 const struct iphdr *iph = (const struct iphdr *) skb->data;
968 struct flowi4 fl4; 967 struct flowi4 fl4;
@@ -975,6 +974,53 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
975 ip_rt_put(rt); 974 ip_rt_put(rt);
976 } 975 }
977} 976}
977
978void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
979{
980 const struct iphdr *iph = (const struct iphdr *) skb->data;
981 struct flowi4 fl4;
982 struct rtable *rt;
983 struct dst_entry *dst;
984 bool new = false;
985
986 bh_lock_sock(sk);
987 rt = (struct rtable *) __sk_dst_get(sk);
988
989 if (sock_owned_by_user(sk) || !rt) {
990 __ipv4_sk_update_pmtu(skb, sk, mtu);
991 goto out;
992 }
993
994 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
995
996 if (!__sk_dst_check(sk, 0)) {
997 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
998 if (IS_ERR(rt))
999 goto out;
1000
1001 new = true;
1002 }
1003
1004 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1005
1006 dst = dst_check(&rt->dst, 0);
1007 if (!dst) {
1008 if (new)
1009 dst_release(&rt->dst);
1010
1011 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1012 if (IS_ERR(rt))
1013 goto out;
1014
1015 new = true;
1016 }
1017
1018 if (new)
1019 __sk_dst_set(sk, &rt->dst);
1020
1021out:
1022 bh_unlock_sock(sk);
1023}
978EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1024EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
979 1025
980void ipv4_redirect(struct sk_buff *skb, struct net *net, 1026void ipv4_redirect(struct sk_buff *skb, struct net *net,
@@ -1120,7 +1166,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1120 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) 1166 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1121 mtu = dst_metric_raw(dst, RTAX_MTU); 1167 mtu = dst_metric_raw(dst, RTAX_MTU);
1122 1168
1123 if (mtu && rt_is_output_route(rt)) 1169 if (mtu)
1124 return mtu; 1170 return mtu;
1125 1171
1126 mtu = dst->dev->mtu; 1172 mtu = dst->dev->mtu;
@@ -2373,6 +2419,11 @@ void ip_rt_multicast_event(struct in_device *in_dev)
2373} 2419}
2374 2420
2375#ifdef CONFIG_SYSCTL 2421#ifdef CONFIG_SYSCTL
2422static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
2423static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2424static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2425static int ip_rt_gc_elasticity __read_mostly = 8;
2426
2376static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, 2427static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2377 void __user *buffer, 2428 void __user *buffer,
2378 size_t *lenp, loff_t *ppos) 2429 size_t *lenp, loff_t *ppos)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b236ef04914f..ef54377fb11c 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -232,7 +232,8 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
232 * 232 *
233 * return false if we decode an option that should not be. 233 * return false if we decode an option that should not be.
234 */ 234 */
235bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok) 235bool cookie_check_timestamp(struct tcp_options_received *tcp_opt,
236 struct net *net, bool *ecn_ok)
236{ 237{
237 /* echoed timestamp, lowest bits contain options */ 238 /* echoed timestamp, lowest bits contain options */
238 u32 options = tcp_opt->rcv_tsecr & TSMASK; 239 u32 options = tcp_opt->rcv_tsecr & TSMASK;
@@ -247,7 +248,7 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
247 248
248 tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0; 249 tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
249 *ecn_ok = (options >> 5) & 1; 250 *ecn_ok = (options >> 5) & 1;
250 if (*ecn_ok && !sysctl_tcp_ecn) 251 if (*ecn_ok && !net->ipv4.sysctl_tcp_ecn)
251 return false; 252 return false;
252 253
253 if (tcp_opt->sack_ok && !sysctl_tcp_sack) 254 if (tcp_opt->sack_ok && !sysctl_tcp_sack)
@@ -295,7 +296,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
295 memset(&tcp_opt, 0, sizeof(tcp_opt)); 296 memset(&tcp_opt, 0, sizeof(tcp_opt));
296 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); 297 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
297 298
298 if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) 299 if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
299 goto out; 300 goto out;
300 301
301 ret = NULL; 302 ret = NULL;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d84400b65049..960fd29d9b8e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -27,6 +27,7 @@
27#include <net/tcp_memcontrol.h> 27#include <net/tcp_memcontrol.h>
28 28
29static int zero; 29static int zero;
30static int one = 1;
30static int two = 2; 31static int two = 2;
31static int tcp_retr1_max = 255; 32static int tcp_retr1_max = 255;
32static int ip_local_port_range_min[] = { 1, 1 }; 33static int ip_local_port_range_min[] = { 1, 1 };
@@ -232,8 +233,8 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
232 return 0; 233 return 0;
233} 234}
234 235
235int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer, 236static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
236 size_t *lenp, loff_t *ppos) 237 size_t *lenp, loff_t *ppos)
237{ 238{
238 ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; 239 ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
239 struct tcp_fastopen_context *ctxt; 240 struct tcp_fastopen_context *ctxt;
@@ -538,13 +539,6 @@ static struct ctl_table ipv4_table[] = {
538 .proc_handler = proc_dointvec 539 .proc_handler = proc_dointvec
539 }, 540 },
540 { 541 {
541 .procname = "tcp_ecn",
542 .data = &sysctl_tcp_ecn,
543 .maxlen = sizeof(int),
544 .mode = 0644,
545 .proc_handler = proc_dointvec
546 },
547 {
548 .procname = "tcp_dsack", 542 .procname = "tcp_dsack",
549 .data = &sysctl_tcp_dsack, 543 .data = &sysctl_tcp_dsack,
550 .maxlen = sizeof(int), 544 .maxlen = sizeof(int),
@@ -556,14 +550,16 @@ static struct ctl_table ipv4_table[] = {
556 .data = &sysctl_tcp_wmem, 550 .data = &sysctl_tcp_wmem,
557 .maxlen = sizeof(sysctl_tcp_wmem), 551 .maxlen = sizeof(sysctl_tcp_wmem),
558 .mode = 0644, 552 .mode = 0644,
559 .proc_handler = proc_dointvec 553 .proc_handler = proc_dointvec_minmax,
554 .extra1 = &one,
560 }, 555 },
561 { 556 {
562 .procname = "tcp_rmem", 557 .procname = "tcp_rmem",
563 .data = &sysctl_tcp_rmem, 558 .data = &sysctl_tcp_rmem,
564 .maxlen = sizeof(sysctl_tcp_rmem), 559 .maxlen = sizeof(sysctl_tcp_rmem),
565 .mode = 0644, 560 .mode = 0644,
566 .proc_handler = proc_dointvec 561 .proc_handler = proc_dointvec_minmax,
562 .extra1 = &one,
567 }, 563 },
568 { 564 {
569 .procname = "tcp_app_win", 565 .procname = "tcp_app_win",
@@ -637,13 +633,6 @@ static struct ctl_table ipv4_table[] = {
637 .proc_handler = proc_tcp_congestion_control, 633 .proc_handler = proc_tcp_congestion_control,
638 }, 634 },
639 { 635 {
640 .procname = "tcp_abc",
641 .data = &sysctl_tcp_abc,
642 .maxlen = sizeof(int),
643 .mode = 0644,
644 .proc_handler = proc_dointvec,
645 },
646 {
647 .procname = "tcp_mtu_probing", 636 .procname = "tcp_mtu_probing",
648 .data = &sysctl_tcp_mtu_probing, 637 .data = &sysctl_tcp_mtu_probing,
649 .maxlen = sizeof(int), 638 .maxlen = sizeof(int),
@@ -786,7 +775,7 @@ static struct ctl_table ipv4_table[] = {
786 .maxlen = sizeof(sysctl_udp_rmem_min), 775 .maxlen = sizeof(sysctl_udp_rmem_min),
787 .mode = 0644, 776 .mode = 0644,
788 .proc_handler = proc_dointvec_minmax, 777 .proc_handler = proc_dointvec_minmax,
789 .extra1 = &zero 778 .extra1 = &one
790 }, 779 },
791 { 780 {
792 .procname = "udp_wmem_min", 781 .procname = "udp_wmem_min",
@@ -794,7 +783,7 @@ static struct ctl_table ipv4_table[] = {
794 .maxlen = sizeof(sysctl_udp_wmem_min), 783 .maxlen = sizeof(sysctl_udp_wmem_min),
795 .mode = 0644, 784 .mode = 0644,
796 .proc_handler = proc_dointvec_minmax, 785 .proc_handler = proc_dointvec_minmax,
797 .extra1 = &zero 786 .extra1 = &one
798 }, 787 },
799 { } 788 { }
800}; 789};
@@ -850,6 +839,13 @@ static struct ctl_table ipv4_net_table[] = {
850 .proc_handler = ipv4_ping_group_range, 839 .proc_handler = ipv4_ping_group_range,
851 }, 840 },
852 { 841 {
842 .procname = "tcp_ecn",
843 .data = &init_net.ipv4.sysctl_tcp_ecn,
844 .maxlen = sizeof(int),
845 .mode = 0644,
846 .proc_handler = proc_dointvec
847 },
848 {
853 .procname = "tcp_mem", 849 .procname = "tcp_mem",
854 .maxlen = sizeof(init_net.ipv4.sysctl_tcp_mem), 850 .maxlen = sizeof(init_net.ipv4.sysctl_tcp_mem),
855 .mode = 0644, 851 .mode = 0644,
@@ -882,6 +878,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
882 &net->ipv4.sysctl_icmp_ratemask; 878 &net->ipv4.sysctl_icmp_ratemask;
883 table[6].data = 879 table[6].data =
884 &net->ipv4.sysctl_ping_group_range; 880 &net->ipv4.sysctl_ping_group_range;
881 table[7].data =
882 &net->ipv4.sysctl_tcp_ecn;
885 883
886 /* Don't export sysctls to unprivileged users */ 884 /* Don't export sysctls to unprivileged users */
887 if (net->user_ns != &init_user_ns) 885 if (net->user_ns != &init_user_ns)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1ca253635f7a..7a5ba48c2cc9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -400,6 +400,8 @@ void tcp_init_sock(struct sock *sk)
400 tcp_enable_early_retrans(tp); 400 tcp_enable_early_retrans(tp);
401 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 401 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
402 402
403 tp->tsoffset = 0;
404
403 sk->sk_state = TCP_CLOSE; 405 sk->sk_state = TCP_CLOSE;
404 406
405 sk->sk_write_space = sk_stream_write_space; 407 sk->sk_write_space = sk_stream_write_space;
@@ -895,6 +897,7 @@ new_segment:
895 get_page(page); 897 get_page(page);
896 skb_fill_page_desc(skb, i, page, offset, copy); 898 skb_fill_page_desc(skb, i, page, offset, copy);
897 } 899 }
900 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
898 901
899 skb->len += copy; 902 skb->len += copy;
900 skb->data_len += copy; 903 skb->data_len += copy;
@@ -1428,12 +1431,12 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
1428} 1431}
1429#endif 1432#endif
1430 1433
1431static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1434static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1432{ 1435{
1433 struct sk_buff *skb; 1436 struct sk_buff *skb;
1434 u32 offset; 1437 u32 offset;
1435 1438
1436 skb_queue_walk(&sk->sk_receive_queue, skb) { 1439 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1437 offset = seq - TCP_SKB_CB(skb)->seq; 1440 offset = seq - TCP_SKB_CB(skb)->seq;
1438 if (tcp_hdr(skb)->syn) 1441 if (tcp_hdr(skb)->syn)
1439 offset--; 1442 offset--;
@@ -1441,6 +1444,11 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1441 *off = offset; 1444 *off = offset;
1442 return skb; 1445 return skb;
1443 } 1446 }
1447 /* This looks weird, but this can happen if TCP collapsing
1448 * splitted a fat GRO packet, while we released socket lock
1449 * in skb_splice_bits()
1450 */
1451 sk_eat_skb(sk, skb, false);
1444 } 1452 }
1445 return NULL; 1453 return NULL;
1446} 1454}
@@ -1482,7 +1490,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1482 break; 1490 break;
1483 } 1491 }
1484 used = recv_actor(desc, skb, offset, len); 1492 used = recv_actor(desc, skb, offset, len);
1485 if (used < 0) { 1493 if (used <= 0) {
1486 if (!copied) 1494 if (!copied)
1487 copied = used; 1495 copied = used;
1488 break; 1496 break;
@@ -1520,8 +1528,10 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1520 tcp_rcv_space_adjust(sk); 1528 tcp_rcv_space_adjust(sk);
1521 1529
1522 /* Clean up data we have read: This will do ACK frames. */ 1530 /* Clean up data we have read: This will do ACK frames. */
1523 if (copied > 0) 1531 if (copied > 0) {
1532 tcp_recv_skb(sk, seq, &offset);
1524 tcp_cleanup_rbuf(sk, copied); 1533 tcp_cleanup_rbuf(sk, copied);
1534 }
1525 return copied; 1535 return copied;
1526} 1536}
1527EXPORT_SYMBOL(tcp_read_sock); 1537EXPORT_SYMBOL(tcp_read_sock);
@@ -2280,7 +2290,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2280 tp->packets_out = 0; 2290 tp->packets_out = 0;
2281 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2291 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2282 tp->snd_cwnd_cnt = 0; 2292 tp->snd_cwnd_cnt = 0;
2283 tp->bytes_acked = 0;
2284 tp->window_clamp = 0; 2293 tp->window_clamp = 0;
2285 tcp_set_ca_state(sk, TCP_CA_Open); 2294 tcp_set_ca_state(sk, TCP_CA_Open);
2286 tcp_clear_retrans(tp); 2295 tcp_clear_retrans(tp);
@@ -2704,6 +2713,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2704 else 2713 else
2705 err = -EINVAL; 2714 err = -EINVAL;
2706 break; 2715 break;
2716 case TCP_TIMESTAMP:
2717 if (!tp->repair)
2718 err = -EPERM;
2719 else
2720 tp->tsoffset = val - tcp_time_stamp;
2721 break;
2707 default: 2722 default:
2708 err = -ENOPROTOOPT; 2723 err = -ENOPROTOOPT;
2709 break; 2724 break;
@@ -2952,6 +2967,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2952 case TCP_USER_TIMEOUT: 2967 case TCP_USER_TIMEOUT:
2953 val = jiffies_to_msecs(icsk->icsk_user_timeout); 2968 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2954 break; 2969 break;
2970 case TCP_TIMESTAMP:
2971 val = tcp_time_stamp + tp->tsoffset;
2972 break;
2955 default: 2973 default:
2956 return -ENOPROTOOPT; 2974 return -ENOPROTOOPT;
2957 } 2975 }
@@ -3025,6 +3043,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
3025 SKB_GSO_DODGY | 3043 SKB_GSO_DODGY |
3026 SKB_GSO_TCP_ECN | 3044 SKB_GSO_TCP_ECN |
3027 SKB_GSO_TCPV6 | 3045 SKB_GSO_TCPV6 |
3046 SKB_GSO_GRE |
3028 0) || 3047 0) ||
3029 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 3048 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
3030 goto out; 3049 goto out;
@@ -3236,7 +3255,7 @@ __tcp_alloc_md5sig_pool(struct sock *sk)
3236 struct crypto_hash *hash; 3255 struct crypto_hash *hash;
3237 3256
3238 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 3257 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
3239 if (!hash || IS_ERR(hash)) 3258 if (IS_ERR_OR_NULL(hash))
3240 goto out_free; 3259 goto out_free;
3241 3260
3242 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; 3261 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 291f2ed7cc31..019c2389a341 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -310,35 +310,24 @@ void tcp_slow_start(struct tcp_sock *tp)
310{ 310{
311 int cnt; /* increase in packets */ 311 int cnt; /* increase in packets */
312 unsigned int delta = 0; 312 unsigned int delta = 0;
313 u32 snd_cwnd = tp->snd_cwnd;
313 314
314 /* RFC3465: ABC Slow start 315 if (unlikely(!snd_cwnd)) {
315 * Increase only after a full MSS of bytes is acked 316 pr_err_once("snd_cwnd is nul, please report this bug.\n");
316 * 317 snd_cwnd = 1U;
317 * TCP sender SHOULD increase cwnd by the number of 318 }
318 * previously unacknowledged bytes ACKed by each incoming
319 * acknowledgment, provided the increase is not more than L
320 */
321 if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
322 return;
323 319
324 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 320 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
325 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 321 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
326 else 322 else
327 cnt = tp->snd_cwnd; /* exponential increase */ 323 cnt = snd_cwnd; /* exponential increase */
328
329 /* RFC3465: ABC
330 * We MAY increase by 2 if discovered delayed ack
331 */
332 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
333 cnt <<= 1;
334 tp->bytes_acked = 0;
335 324
336 tp->snd_cwnd_cnt += cnt; 325 tp->snd_cwnd_cnt += cnt;
337 while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 326 while (tp->snd_cwnd_cnt >= snd_cwnd) {
338 tp->snd_cwnd_cnt -= tp->snd_cwnd; 327 tp->snd_cwnd_cnt -= snd_cwnd;
339 delta++; 328 delta++;
340 } 329 }
341 tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); 330 tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
342} 331}
343EXPORT_SYMBOL_GPL(tcp_slow_start); 332EXPORT_SYMBOL_GPL(tcp_slow_start);
344 333
@@ -372,20 +361,9 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
372 /* In "safe" area, increase. */ 361 /* In "safe" area, increase. */
373 if (tp->snd_cwnd <= tp->snd_ssthresh) 362 if (tp->snd_cwnd <= tp->snd_ssthresh)
374 tcp_slow_start(tp); 363 tcp_slow_start(tp);
375
376 /* In dangerous area, increase slowly. */ 364 /* In dangerous area, increase slowly. */
377 else if (sysctl_tcp_abc) { 365 else
378 /* RFC3465: Appropriate Byte Count
379 * increase once for each full cwnd acked
380 */
381 if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
382 tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
383 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
384 tp->snd_cwnd++;
385 }
386 } else {
387 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 366 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
388 }
389} 367}
390EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 368EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
391 369
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a13692560e63..a759e19496d2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -81,8 +81,6 @@ int sysctl_tcp_sack __read_mostly = 1;
81int sysctl_tcp_fack __read_mostly = 1; 81int sysctl_tcp_fack __read_mostly = 1;
82int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 82int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
83EXPORT_SYMBOL(sysctl_tcp_reordering); 83EXPORT_SYMBOL(sysctl_tcp_reordering);
84int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 84int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 85int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 1; 86int sysctl_tcp_adv_win_scale __read_mostly = 1;
@@ -100,7 +98,6 @@ int sysctl_tcp_frto_response __read_mostly;
100int sysctl_tcp_thin_dupack __read_mostly; 98int sysctl_tcp_thin_dupack __read_mostly;
101 99
102int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
103int sysctl_tcp_abc __read_mostly;
104int sysctl_tcp_early_retrans __read_mostly = 2; 101int sysctl_tcp_early_retrans __read_mostly = 2;
105 102
106#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 103#define FLAG_DATA 0x01 /* Incoming frame contained data. */
@@ -2009,7 +2006,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
2009 tp->snd_cwnd_cnt = 0; 2006 tp->snd_cwnd_cnt = 0;
2010 tp->snd_cwnd_stamp = tcp_time_stamp; 2007 tp->snd_cwnd_stamp = tcp_time_stamp;
2011 tp->frto_counter = 0; 2008 tp->frto_counter = 0;
2012 tp->bytes_acked = 0;
2013 2009
2014 tp->reordering = min_t(unsigned int, tp->reordering, 2010 tp->reordering = min_t(unsigned int, tp->reordering,
2015 sysctl_tcp_reordering); 2011 sysctl_tcp_reordering);
@@ -2058,7 +2054,6 @@ void tcp_enter_loss(struct sock *sk, int how)
2058 tp->snd_cwnd_cnt = 0; 2054 tp->snd_cwnd_cnt = 0;
2059 tp->snd_cwnd_stamp = tcp_time_stamp; 2055 tp->snd_cwnd_stamp = tcp_time_stamp;
2060 2056
2061 tp->bytes_acked = 0;
2062 tcp_clear_retrans_partial(tp); 2057 tcp_clear_retrans_partial(tp);
2063 2058
2064 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
@@ -2686,7 +2681,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2686 struct tcp_sock *tp = tcp_sk(sk); 2681 struct tcp_sock *tp = tcp_sk(sk);
2687 2682
2688 tp->high_seq = tp->snd_nxt; 2683 tp->high_seq = tp->snd_nxt;
2689 tp->bytes_acked = 0;
2690 tp->snd_cwnd_cnt = 0; 2684 tp->snd_cwnd_cnt = 0;
2691 tp->prior_cwnd = tp->snd_cwnd; 2685 tp->prior_cwnd = tp->snd_cwnd;
2692 tp->prr_delivered = 0; 2686 tp->prr_delivered = 0;
@@ -2737,7 +2731,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
2737 struct tcp_sock *tp = tcp_sk(sk); 2731 struct tcp_sock *tp = tcp_sk(sk);
2738 2732
2739 tp->prior_ssthresh = 0; 2733 tp->prior_ssthresh = 0;
2740 tp->bytes_acked = 0;
2741 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2734 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2742 tp->undo_marker = 0; 2735 tp->undo_marker = 0;
2743 tcp_init_cwnd_reduction(sk, set_ssthresh); 2736 tcp_init_cwnd_reduction(sk, set_ssthresh);
@@ -3419,7 +3412,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3419{ 3412{
3420 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3413 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
3421 tp->snd_cwnd_cnt = 0; 3414 tp->snd_cwnd_cnt = 0;
3422 tp->bytes_acked = 0;
3423 TCP_ECN_queue_cwr(tp); 3415 TCP_ECN_queue_cwr(tp);
3424 tcp_moderate_cwnd(tp); 3416 tcp_moderate_cwnd(tp);
3425} 3417}
@@ -3504,6 +3496,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
3504 } 3496 }
3505 } else { 3497 } else {
3506 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3498 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3499 if (!tcp_packets_in_flight(tp)) {
3500 tcp_enter_frto_loss(sk, 2, flag);
3501 return true;
3502 }
3503
3507 /* Prevent sending of new data. */ 3504 /* Prevent sending of new data. */
3508 tp->snd_cwnd = min(tp->snd_cwnd, 3505 tp->snd_cwnd = min(tp->snd_cwnd,
3509 tcp_packets_in_flight(tp)); 3506 tcp_packets_in_flight(tp));
@@ -3610,15 +3607,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3610 if (after(ack, prior_snd_una)) 3607 if (after(ack, prior_snd_una))
3611 flag |= FLAG_SND_UNA_ADVANCED; 3608 flag |= FLAG_SND_UNA_ADVANCED;
3612 3609
3613 if (sysctl_tcp_abc) {
3614 if (icsk->icsk_ca_state < TCP_CA_CWR)
3615 tp->bytes_acked += ack - prior_snd_una;
3616 else if (icsk->icsk_ca_state == TCP_CA_Loss)
3617 /* we assume just one segment left network */
3618 tp->bytes_acked += min(ack - prior_snd_una,
3619 tp->mss_cache);
3620 }
3621
3622 prior_fackets = tp->fackets_out; 3610 prior_fackets = tp->fackets_out;
3623 prior_in_flight = tcp_packets_in_flight(tp); 3611 prior_in_flight = tcp_packets_in_flight(tp);
3624 3612
@@ -3872,7 +3860,7 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3872 ++ptr; 3860 ++ptr;
3873 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3861 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3874 ++ptr; 3862 ++ptr;
3875 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 3863 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3876 return true; 3864 return true;
3877 } 3865 }
3878 return false; 3866 return false;
@@ -3896,7 +3884,11 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3896 if (tcp_parse_aligned_timestamp(tp, th)) 3884 if (tcp_parse_aligned_timestamp(tp, th))
3897 return true; 3885 return true;
3898 } 3886 }
3887
3899 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3888 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
3889 if (tp->rx_opt.saw_tstamp)
3890 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3891
3900 return true; 3892 return true;
3901} 3893}
3902 3894
@@ -5543,6 +5535,9 @@ slow_path:
5543 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 5535 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
5544 goto csum_error; 5536 goto csum_error;
5545 5537
5538 if (!th->ack && !th->rst)
5539 goto discard;
5540
5546 /* 5541 /*
5547 * Standard slow path. 5542 * Standard slow path.
5548 */ 5543 */
@@ -5551,7 +5546,7 @@ slow_path:
5551 return 0; 5546 return 0;
5552 5547
5553step5: 5548step5:
5554 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5549 if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
5555 goto discard; 5550 goto discard;
5556 5551
5557 /* ts_recent update must be made after we are sure that the packet 5552 /* ts_recent update must be made after we are sure that the packet
@@ -5646,8 +5641,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5646 * the remote receives only the retransmitted (regular) SYNs: either 5641 * the remote receives only the retransmitted (regular) SYNs: either
5647 * the original SYN-data or the corresponding SYN-ACK is lost. 5642 * the original SYN-data or the corresponding SYN-ACK is lost.
5648 */ 5643 */
5649 syn_drop = (cookie->len <= 0 && data && 5644 syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
5650 inet_csk(sk)->icsk_retransmits);
5651 5645
5652 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5646 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5653 5647
@@ -5675,6 +5669,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5675 int saved_clamp = tp->rx_opt.mss_clamp; 5669 int saved_clamp = tp->rx_opt.mss_clamp;
5676 5670
5677 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc); 5671 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
5672 if (tp->rx_opt.saw_tstamp)
5673 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5678 5674
5679 if (th->ack) { 5675 if (th->ack) {
5680 /* rfc793: 5676 /* rfc793:
@@ -5984,11 +5980,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5984 if (tcp_check_req(sk, skb, req, NULL, true) == NULL) 5980 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
5985 goto discard; 5981 goto discard;
5986 } 5982 }
5983
5984 if (!th->ack && !th->rst)
5985 goto discard;
5986
5987 if (!tcp_validate_incoming(sk, skb, th, 0)) 5987 if (!tcp_validate_incoming(sk, skb, th, 0))
5988 return 0; 5988 return 0;
5989 5989
5990 /* step 5: check the ACK field */ 5990 /* step 5: check the ACK field */
5991 if (th->ack) { 5991 if (true) {
5992 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; 5992 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
5993 5993
5994 switch (sk->sk_state) { 5994 switch (sk->sk_state) {
@@ -6138,8 +6138,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6138 } 6138 }
6139 break; 6139 break;
6140 } 6140 }
6141 } else 6141 }
6142 goto discard;
6143 6142
6144 /* ts_recent update must be made after we are sure that the packet 6143 /* ts_recent update must be made after we are sure that the packet
6145 * is in window. 6144 * is in window.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1ed230716d51..145d3bf8df86 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -369,11 +369,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
369 * We do take care of PMTU discovery (RFC1191) special case : 369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held. 370 * we can receive locally generated ICMP messages while socket is held.
371 */ 371 */
372 if (sock_owned_by_user(sk) && 372 if (sock_owned_by_user(sk)) {
373 type != ICMP_DEST_UNREACH && 373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 code != ICMP_FRAG_NEEDED) 374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 375 }
376
377 if (sk->sk_state == TCP_CLOSE) 376 if (sk->sk_state == TCP_CLOSE)
378 goto out; 377 goto out;
379 378
@@ -497,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
497 * errors returned from accept(). 496 * errors returned from accept().
498 */ 497 */
499 inet_csk_reqsk_queue_drop(sk, req, prev); 498 inet_csk_reqsk_queue_drop(sk, req, prev);
499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
500 goto out; 500 goto out;
501 501
502 case TCP_SYN_SENT: 502 case TCP_SYN_SENT:
@@ -657,7 +657,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
657 * no RST generated if md5 hash doesn't match. 657 * no RST generated if md5 hash doesn't match.
658 */ 658 */
659 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev), 659 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
660 &tcp_hashinfo, ip_hdr(skb)->daddr, 660 &tcp_hashinfo, ip_hdr(skb)->saddr,
661 th->source, ip_hdr(skb)->daddr,
661 ntohs(th->source), inet_iif(skb)); 662 ntohs(th->source), inet_iif(skb));
662 /* don't send rst if it can't find key */ 663 /* don't send rst if it can't find key */
663 if (!sk1) 664 if (!sk1)
@@ -725,7 +726,7 @@ release_sk1:
725 */ 726 */
726 727
727static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 728static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
728 u32 win, u32 ts, int oif, 729 u32 win, u32 tsval, u32 tsecr, int oif,
729 struct tcp_md5sig_key *key, 730 struct tcp_md5sig_key *key,
730 int reply_flags, u8 tos) 731 int reply_flags, u8 tos)
731{ 732{
@@ -746,12 +747,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
746 747
747 arg.iov[0].iov_base = (unsigned char *)&rep; 748 arg.iov[0].iov_base = (unsigned char *)&rep;
748 arg.iov[0].iov_len = sizeof(rep.th); 749 arg.iov[0].iov_len = sizeof(rep.th);
749 if (ts) { 750 if (tsecr) {
750 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 751 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
751 (TCPOPT_TIMESTAMP << 8) | 752 (TCPOPT_TIMESTAMP << 8) |
752 TCPOLEN_TIMESTAMP); 753 TCPOLEN_TIMESTAMP);
753 rep.opt[1] = htonl(tcp_time_stamp); 754 rep.opt[1] = htonl(tsval);
754 rep.opt[2] = htonl(ts); 755 rep.opt[2] = htonl(tsecr);
755 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; 756 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
756 } 757 }
757 758
@@ -766,7 +767,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
766 767
767#ifdef CONFIG_TCP_MD5SIG 768#ifdef CONFIG_TCP_MD5SIG
768 if (key) { 769 if (key) {
769 int offset = (ts) ? 3 : 0; 770 int offset = (tsecr) ? 3 : 0;
770 771
771 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | 772 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
772 (TCPOPT_NOP << 16) | 773 (TCPOPT_NOP << 16) |
@@ -801,6 +802,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
801 802
802 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 803 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
803 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 804 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
805 tcp_time_stamp + tcptw->tw_ts_offset,
804 tcptw->tw_ts_recent, 806 tcptw->tw_ts_recent,
805 tw->tw_bound_dev_if, 807 tw->tw_bound_dev_if,
806 tcp_twsk_md5_key(tcptw), 808 tcp_twsk_md5_key(tcptw),
@@ -820,6 +822,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
820 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? 822 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
821 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 823 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
822 tcp_rsk(req)->rcv_nxt, req->rcv_wnd, 824 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
825 tcp_time_stamp,
823 req->ts_recent, 826 req->ts_recent,
824 0, 827 0,
825 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 828 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -1501,8 +1504,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1501 * clogging syn queue with openreqs with exponentially increasing 1504 * clogging syn queue with openreqs with exponentially increasing
1502 * timeout. 1505 * timeout.
1503 */ 1506 */
1504 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1507 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1508 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1505 goto drop; 1509 goto drop;
1510 }
1506 1511
1507 req = inet_reqsk_alloc(&tcp_request_sock_ops); 1512 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1508 if (!req) 1513 if (!req)
@@ -1568,7 +1573,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1568 goto drop_and_free; 1573 goto drop_and_free;
1569 1574
1570 if (!want_cookie || tmp_opt.tstamp_ok) 1575 if (!want_cookie || tmp_opt.tstamp_ok)
1571 TCP_ECN_create_request(req, skb); 1576 TCP_ECN_create_request(req, skb, sock_net(sk));
1572 1577
1573 if (want_cookie) { 1578 if (want_cookie) {
1574 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1579 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -1667,6 +1672,7 @@ drop_and_release:
1667drop_and_free: 1672drop_and_free:
1668 reqsk_free(req); 1673 reqsk_free(req);
1669drop: 1674drop:
1675 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1670 return 0; 1676 return 0;
1671} 1677}
1672EXPORT_SYMBOL(tcp_v4_conn_request); 1678EXPORT_SYMBOL(tcp_v4_conn_request);
@@ -1767,10 +1773,8 @@ exit:
1767 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1773 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1768 return NULL; 1774 return NULL;
1769put_and_exit: 1775put_and_exit:
1770 tcp_clear_xmit_timers(newsk); 1776 inet_csk_prepare_forced_close(newsk);
1771 tcp_cleanup_congestion_control(newsk); 1777 tcp_done(newsk);
1772 bh_unlock_sock(newsk);
1773 sock_put(newsk);
1774 goto exit; 1778 goto exit;
1775} 1779}
1776EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1780EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
@@ -2076,6 +2080,7 @@ do_time_wait:
2076 case TCP_TW_SYN: { 2080 case TCP_TW_SYN: {
2077 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), 2081 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2078 &tcp_hashinfo, 2082 &tcp_hashinfo,
2083 iph->saddr, th->source,
2079 iph->daddr, th->dest, 2084 iph->daddr, th->dest,
2080 inet_iif(skb)); 2085 inet_iif(skb));
2081 if (sk2) { 2086 if (sk2) {
@@ -2611,7 +2616,7 @@ EXPORT_SYMBOL(tcp_proc_register);
2611 2616
2612void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) 2617void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2613{ 2618{
2614 proc_net_remove(net, afinfo->name); 2619 remove_proc_entry(afinfo->name, net->proc_net);
2615} 2620}
2616EXPORT_SYMBOL(tcp_proc_unregister); 2621EXPORT_SYMBOL(tcp_proc_unregister);
2617 2622
@@ -2890,6 +2895,7 @@ EXPORT_SYMBOL(tcp_prot);
2890 2895
2891static int __net_init tcp_sk_init(struct net *net) 2896static int __net_init tcp_sk_init(struct net *net)
2892{ 2897{
2898 net->ipv4.sysctl_tcp_ecn = 2;
2893 return 0; 2899 return 0;
2894} 2900}
2895 2901
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f35f2dfb6401..b83a49cc3816 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -102,6 +102,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
102 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 102 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
103 103
104 if (tmp_opt.saw_tstamp) { 104 if (tmp_opt.saw_tstamp) {
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
105 tmp_opt.ts_recent = tcptw->tw_ts_recent; 106 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
@@ -288,6 +289,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
288 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 289 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
289 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 290 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
290 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 291 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
292 tcptw->tw_ts_offset = tp->tsoffset;
291 293
292#if IS_ENABLED(CONFIG_IPV6) 294#if IS_ENABLED(CONFIG_IPV6)
293 if (tw->tw_family == PF_INET6) { 295 if (tw->tw_family == PF_INET6) {
@@ -446,7 +448,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
446 */ 448 */
447 newtp->snd_cwnd = TCP_INIT_CWND; 449 newtp->snd_cwnd = TCP_INIT_CWND;
448 newtp->snd_cwnd_cnt = 0; 450 newtp->snd_cwnd_cnt = 0;
449 newtp->bytes_acked = 0;
450 451
451 newtp->frto_counter = 0; 452 newtp->frto_counter = 0;
452 newtp->frto_highmark = 0; 453 newtp->frto_highmark = 0;
@@ -500,6 +501,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
500 newtp->rx_opt.ts_recent_stamp = 0; 501 newtp->rx_opt.ts_recent_stamp = 0;
501 newtp->tcp_header_len = sizeof(struct tcphdr); 502 newtp->tcp_header_len = sizeof(struct tcphdr);
502 } 503 }
504 newtp->tsoffset = 0;
503#ifdef CONFIG_TCP_MD5SIG 505#ifdef CONFIG_TCP_MD5SIG
504 newtp->md5sig_info = NULL; /*XXX*/ 506 newtp->md5sig_info = NULL; /*XXX*/
505 if (newtp->af_specific->md5_lookup(sk, newsk)) 507 if (newtp->af_specific->md5_lookup(sk, newsk))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5d451593ef16..fd0cea114b5d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -314,7 +314,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
314 struct tcp_sock *tp = tcp_sk(sk); 314 struct tcp_sock *tp = tcp_sk(sk);
315 315
316 tp->ecn_flags = 0; 316 tp->ecn_flags = 0;
317 if (sysctl_tcp_ecn == 1) { 317 if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
318 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 318 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
319 tp->ecn_flags = TCP_ECN_OK; 319 tp->ecn_flags = TCP_ECN_OK;
320 } 320 }
@@ -622,7 +622,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
622 622
623 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 623 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
624 opts->options |= OPTION_TS; 624 opts->options |= OPTION_TS;
625 opts->tsval = TCP_SKB_CB(skb)->when; 625 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
626 opts->tsecr = tp->rx_opt.ts_recent; 626 opts->tsecr = tp->rx_opt.ts_recent;
627 remaining -= TCPOLEN_TSTAMP_ALIGNED; 627 remaining -= TCPOLEN_TSTAMP_ALIGNED;
628 } 628 }
@@ -806,7 +806,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
806 806
807 if (likely(tp->rx_opt.tstamp_ok)) { 807 if (likely(tp->rx_opt.tstamp_ok)) {
808 opts->options |= OPTION_TS; 808 opts->options |= OPTION_TS;
809 opts->tsval = tcb ? tcb->when : 0; 809 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
810 opts->tsecr = tp->rx_opt.ts_recent; 810 opts->tsecr = tp->rx_opt.ts_recent;
811 size += TCPOLEN_TSTAMP_ALIGNED; 811 size += TCPOLEN_TSTAMP_ALIGNED;
812 } 812 }
@@ -1331,7 +1331,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1331/* Remove acked data from a packet in the transmit queue. */ 1331/* Remove acked data from a packet in the transmit queue. */
1332int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1332int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1333{ 1333{
1334 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1334 if (skb_unclone(skb, GFP_ATOMIC))
1335 return -ENOMEM; 1335 return -ENOMEM;
1336 1336
1337 __pskb_trim_head(skb, len); 1337 __pskb_trim_head(skb, len);
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 4526fe68e60e..d4943f67aff2 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -234,7 +234,7 @@ static __init int tcpprobe_init(void)
234 if (!tcp_probe.log) 234 if (!tcp_probe.log)
235 goto err0; 235 goto err0;
236 236
237 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &tcpprobe_fops)) 237 if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops))
238 goto err0; 238 goto err0;
239 239
240 ret = register_jprobe(&tcp_jprobe); 240 ret = register_jprobe(&tcp_jprobe);
@@ -244,7 +244,7 @@ static __init int tcpprobe_init(void)
244 pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize); 244 pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize);
245 return 0; 245 return 0;
246 err1: 246 err1:
247 proc_net_remove(&init_net, procname); 247 remove_proc_entry(procname, init_net.proc_net);
248 err0: 248 err0:
249 kfree(tcp_probe.log); 249 kfree(tcp_probe.log);
250 return ret; 250 return ret;
@@ -253,7 +253,7 @@ module_init(tcpprobe_init);
253 253
254static __exit void tcpprobe_exit(void) 254static __exit void tcpprobe_exit(void)
255{ 255{
256 proc_net_remove(&init_net, procname); 256 remove_proc_entry(procname, init_net.proc_net);
257 unregister_jprobe(&tcp_jprobe); 257 unregister_jprobe(&tcp_jprobe);
258 kfree(tcp_probe.log); 258 kfree(tcp_probe.log);
259} 259}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 79c8dbe59b54..265c42cf963c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -139,6 +139,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
139{ 139{
140 struct sock *sk2; 140 struct sock *sk2;
141 struct hlist_nulls_node *node; 141 struct hlist_nulls_node *node;
142 kuid_t uid = sock_i_uid(sk);
142 143
143 sk_nulls_for_each(sk2, node, &hslot->head) 144 sk_nulls_for_each(sk2, node, &hslot->head)
144 if (net_eq(sock_net(sk2), net) && 145 if (net_eq(sock_net(sk2), net) &&
@@ -147,6 +148,8 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
147 (!sk2->sk_reuse || !sk->sk_reuse) && 148 (!sk2->sk_reuse || !sk->sk_reuse) &&
148 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 149 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
149 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 150 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
151 (!sk2->sk_reuseport || !sk->sk_reuseport ||
152 !uid_eq(uid, sock_i_uid(sk2))) &&
150 (*saddr_comp)(sk, sk2)) { 153 (*saddr_comp)(sk, sk2)) {
151 if (bitmap) 154 if (bitmap)
152 __set_bit(udp_sk(sk2)->udp_port_hash >> log, 155 __set_bit(udp_sk(sk2)->udp_port_hash >> log,
@@ -169,6 +172,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
169{ 172{
170 struct sock *sk2; 173 struct sock *sk2;
171 struct hlist_nulls_node *node; 174 struct hlist_nulls_node *node;
175 kuid_t uid = sock_i_uid(sk);
172 int res = 0; 176 int res = 0;
173 177
174 spin_lock(&hslot2->lock); 178 spin_lock(&hslot2->lock);
@@ -179,6 +183,8 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
179 (!sk2->sk_reuse || !sk->sk_reuse) && 183 (!sk2->sk_reuse || !sk->sk_reuse) &&
180 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 184 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
181 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 185 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
186 (!sk2->sk_reuseport || !sk->sk_reuseport ||
187 !uid_eq(uid, sock_i_uid(sk2))) &&
182 (*saddr_comp)(sk, sk2)) { 188 (*saddr_comp)(sk, sk2)) {
183 res = 1; 189 res = 1;
184 break; 190 break;
@@ -337,26 +343,26 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
337 !ipv6_only_sock(sk)) { 343 !ipv6_only_sock(sk)) {
338 struct inet_sock *inet = inet_sk(sk); 344 struct inet_sock *inet = inet_sk(sk);
339 345
340 score = (sk->sk_family == PF_INET ? 1 : 0); 346 score = (sk->sk_family == PF_INET ? 2 : 1);
341 if (inet->inet_rcv_saddr) { 347 if (inet->inet_rcv_saddr) {
342 if (inet->inet_rcv_saddr != daddr) 348 if (inet->inet_rcv_saddr != daddr)
343 return -1; 349 return -1;
344 score += 2; 350 score += 4;
345 } 351 }
346 if (inet->inet_daddr) { 352 if (inet->inet_daddr) {
347 if (inet->inet_daddr != saddr) 353 if (inet->inet_daddr != saddr)
348 return -1; 354 return -1;
349 score += 2; 355 score += 4;
350 } 356 }
351 if (inet->inet_dport) { 357 if (inet->inet_dport) {
352 if (inet->inet_dport != sport) 358 if (inet->inet_dport != sport)
353 return -1; 359 return -1;
354 score += 2; 360 score += 4;
355 } 361 }
356 if (sk->sk_bound_dev_if) { 362 if (sk->sk_bound_dev_if) {
357 if (sk->sk_bound_dev_if != dif) 363 if (sk->sk_bound_dev_if != dif)
358 return -1; 364 return -1;
359 score += 2; 365 score += 4;
360 } 366 }
361 } 367 }
362 return score; 368 return score;
@@ -365,7 +371,6 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
365/* 371/*
366 * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num) 372 * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
367 */ 373 */
368#define SCORE2_MAX (1 + 2 + 2 + 2)
369static inline int compute_score2(struct sock *sk, struct net *net, 374static inline int compute_score2(struct sock *sk, struct net *net,
370 __be32 saddr, __be16 sport, 375 __be32 saddr, __be16 sport,
371 __be32 daddr, unsigned int hnum, int dif) 376 __be32 daddr, unsigned int hnum, int dif)
@@ -380,21 +385,21 @@ static inline int compute_score2(struct sock *sk, struct net *net,
380 if (inet->inet_num != hnum) 385 if (inet->inet_num != hnum)
381 return -1; 386 return -1;
382 387
383 score = (sk->sk_family == PF_INET ? 1 : 0); 388 score = (sk->sk_family == PF_INET ? 2 : 1);
384 if (inet->inet_daddr) { 389 if (inet->inet_daddr) {
385 if (inet->inet_daddr != saddr) 390 if (inet->inet_daddr != saddr)
386 return -1; 391 return -1;
387 score += 2; 392 score += 4;
388 } 393 }
389 if (inet->inet_dport) { 394 if (inet->inet_dport) {
390 if (inet->inet_dport != sport) 395 if (inet->inet_dport != sport)
391 return -1; 396 return -1;
392 score += 2; 397 score += 4;
393 } 398 }
394 if (sk->sk_bound_dev_if) { 399 if (sk->sk_bound_dev_if) {
395 if (sk->sk_bound_dev_if != dif) 400 if (sk->sk_bound_dev_if != dif)
396 return -1; 401 return -1;
397 score += 2; 402 score += 4;
398 } 403 }
399 } 404 }
400 return score; 405 return score;
@@ -409,19 +414,29 @@ static struct sock *udp4_lib_lookup2(struct net *net,
409{ 414{
410 struct sock *sk, *result; 415 struct sock *sk, *result;
411 struct hlist_nulls_node *node; 416 struct hlist_nulls_node *node;
412 int score, badness; 417 int score, badness, matches = 0, reuseport = 0;
418 u32 hash = 0;
413 419
414begin: 420begin:
415 result = NULL; 421 result = NULL;
416 badness = -1; 422 badness = 0;
417 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { 423 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
418 score = compute_score2(sk, net, saddr, sport, 424 score = compute_score2(sk, net, saddr, sport,
419 daddr, hnum, dif); 425 daddr, hnum, dif);
420 if (score > badness) { 426 if (score > badness) {
421 result = sk; 427 result = sk;
422 badness = score; 428 badness = score;
423 if (score == SCORE2_MAX) 429 reuseport = sk->sk_reuseport;
424 goto exact_match; 430 if (reuseport) {
431 hash = inet_ehashfn(net, daddr, hnum,
432 saddr, htons(sport));
433 matches = 1;
434 }
435 } else if (score == badness && reuseport) {
436 matches++;
437 if (((u64)hash * matches) >> 32 == 0)
438 result = sk;
439 hash = next_pseudo_random32(hash);
425 } 440 }
426 } 441 }
427 /* 442 /*
@@ -431,9 +446,7 @@ begin:
431 */ 446 */
432 if (get_nulls_value(node) != slot2) 447 if (get_nulls_value(node) != slot2)
433 goto begin; 448 goto begin;
434
435 if (result) { 449 if (result) {
436exact_match:
437 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) 450 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
438 result = NULL; 451 result = NULL;
439 else if (unlikely(compute_score2(result, net, saddr, sport, 452 else if (unlikely(compute_score2(result, net, saddr, sport,
@@ -457,7 +470,8 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
457 unsigned short hnum = ntohs(dport); 470 unsigned short hnum = ntohs(dport);
458 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); 471 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
459 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; 472 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
460 int score, badness; 473 int score, badness, matches = 0, reuseport = 0;
474 u32 hash = 0;
461 475
462 rcu_read_lock(); 476 rcu_read_lock();
463 if (hslot->count > 10) { 477 if (hslot->count > 10) {
@@ -486,13 +500,24 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
486 } 500 }
487begin: 501begin:
488 result = NULL; 502 result = NULL;
489 badness = -1; 503 badness = 0;
490 sk_nulls_for_each_rcu(sk, node, &hslot->head) { 504 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
491 score = compute_score(sk, net, saddr, hnum, sport, 505 score = compute_score(sk, net, saddr, hnum, sport,
492 daddr, dport, dif); 506 daddr, dport, dif);
493 if (score > badness) { 507 if (score > badness) {
494 result = sk; 508 result = sk;
495 badness = score; 509 badness = score;
510 reuseport = sk->sk_reuseport;
511 if (reuseport) {
512 hash = inet_ehashfn(net, daddr, hnum,
513 saddr, htons(sport));
514 matches = 1;
515 }
516 } else if (score == badness && reuseport) {
517 matches++;
518 if (((u64)hash * matches) >> 32 == 0)
519 result = sk;
520 hash = next_pseudo_random32(hash);
496 } 521 }
497 } 522 }
498 /* 523 /*
@@ -971,7 +996,7 @@ back_from_confirm:
971 sizeof(struct udphdr), &ipc, &rt, 996 sizeof(struct udphdr), &ipc, &rt,
972 msg->msg_flags); 997 msg->msg_flags);
973 err = PTR_ERR(skb); 998 err = PTR_ERR(skb);
974 if (skb && !IS_ERR(skb)) 999 if (!IS_ERR_OR_NULL(skb))
975 err = udp_send_skb(skb, fl4); 1000 err = udp_send_skb(skb, fl4);
976 goto out; 1001 goto out;
977 } 1002 }
@@ -1952,6 +1977,7 @@ struct proto udp_prot = {
1952 .recvmsg = udp_recvmsg, 1977 .recvmsg = udp_recvmsg,
1953 .sendpage = udp_sendpage, 1978 .sendpage = udp_sendpage,
1954 .backlog_rcv = __udp_queue_rcv_skb, 1979 .backlog_rcv = __udp_queue_rcv_skb,
1980 .release_cb = ip4_datagram_release_cb,
1955 .hash = udp_lib_hash, 1981 .hash = udp_lib_hash,
1956 .unhash = udp_lib_unhash, 1982 .unhash = udp_lib_unhash,
1957 .rehash = udp_v4_rehash, 1983 .rehash = udp_v4_rehash,
@@ -2096,7 +2122,7 @@ EXPORT_SYMBOL(udp_proc_register);
2096 2122
2097void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) 2123void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
2098{ 2124{
2099 proc_net_remove(net, afinfo->name); 2125 remove_proc_entry(afinfo->name, net->proc_net);
2100} 2126}
2101EXPORT_SYMBOL(udp_proc_unregister); 2127EXPORT_SYMBOL(udp_proc_unregister);
2102 2128
@@ -2279,7 +2305,8 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
2279 /* Packet is from an untrusted source, reset gso_segs. */ 2305 /* Packet is from an untrusted source, reset gso_segs. */
2280 int type = skb_shinfo(skb)->gso_type; 2306 int type = skb_shinfo(skb)->gso_type;
2281 2307
2282 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || 2308 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
2309 SKB_GSO_GRE) ||
2283 !(type & (SKB_GSO_UDP)))) 2310 !(type & (SKB_GSO_UDP))))
2284 goto out; 2311 goto out;
2285 2312
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 06814b6216dc..1f12c8b45864 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -132,7 +132,7 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
132 * header and optional ESP marker bytes) and then modify the 132 * header and optional ESP marker bytes) and then modify the
133 * protocol to ESP, and then call into the transform receiver. 133 * protocol to ESP, and then call into the transform receiver.
134 */ 134 */
135 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 135 if (skb_unclone(skb, GFP_ATOMIC))
136 goto drop; 136 goto drop;
137 137
138 /* Now we can update and verify the packet length... */ 138 /* Now we can update and verify the packet length... */
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index ddee0a099a2c..fe5189e2e114 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -142,8 +142,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
142 for_each_input_rcu(rcv_notify_handlers, handler) 142 for_each_input_rcu(rcv_notify_handlers, handler)
143 handler->handler(skb); 143 handler->handler(skb);
144 144
145 if (skb_cloned(skb) && 145 err = skb_unclone(skb, GFP_ATOMIC);
146 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 146 if (err)
147 goto out; 147 goto out;
148 148
149 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 149 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 3be0ac2c1920..9a459be24af7 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -262,21 +262,56 @@ static struct ctl_table xfrm4_policy_table[] = {
262 { } 262 { }
263}; 263};
264 264
265static struct ctl_table_header *sysctl_hdr; 265static int __net_init xfrm4_net_init(struct net *net)
266#endif
267
268static void __init xfrm4_policy_init(void)
269{ 266{
270 xfrm_policy_register_afinfo(&xfrm4_policy_afinfo); 267 struct ctl_table *table;
268 struct ctl_table_header *hdr;
269
270 table = xfrm4_policy_table;
271 if (!net_eq(net, &init_net)) {
272 table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
273 if (!table)
274 goto err_alloc;
275
276 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
277 }
278
279 hdr = register_net_sysctl(net, "net/ipv4", table);
280 if (!hdr)
281 goto err_reg;
282
283 net->ipv4.xfrm4_hdr = hdr;
284 return 0;
285
286err_reg:
287 if (!net_eq(net, &init_net))
288 kfree(table);
289err_alloc:
290 return -ENOMEM;
271} 291}
272 292
273static void __exit xfrm4_policy_fini(void) 293static void __net_exit xfrm4_net_exit(struct net *net)
274{ 294{
275#ifdef CONFIG_SYSCTL 295 struct ctl_table *table;
276 if (sysctl_hdr) 296
277 unregister_net_sysctl_table(sysctl_hdr); 297 if (net->ipv4.xfrm4_hdr == NULL)
298 return;
299
300 table = net->ipv4.xfrm4_hdr->ctl_table_arg;
301 unregister_net_sysctl_table(net->ipv4.xfrm4_hdr);
302 if (!net_eq(net, &init_net))
303 kfree(table);
304}
305
306static struct pernet_operations __net_initdata xfrm4_net_ops = {
307 .init = xfrm4_net_init,
308 .exit = xfrm4_net_exit,
309};
278#endif 310#endif
279 xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); 311
312static void __init xfrm4_policy_init(void)
313{
314 xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
280} 315}
281 316
282void __init xfrm4_init(void) 317void __init xfrm4_init(void)
@@ -286,8 +321,7 @@ void __init xfrm4_init(void)
286 xfrm4_state_init(); 321 xfrm4_state_init();
287 xfrm4_policy_init(); 322 xfrm4_policy_init();
288#ifdef CONFIG_SYSCTL 323#ifdef CONFIG_SYSCTL
289 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4", 324 register_pernet_subsys(&xfrm4_net_ops);
290 xfrm4_policy_table);
291#endif 325#endif
292} 326}
293 327
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 4f7fe7270e37..ed0b9e2e797a 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -11,7 +11,7 @@ menuconfig IPV6
11 You will still be able to do traditional IPv4 networking as well. 11 You will still be able to do traditional IPv4 networking as well.
12 12
13 For general information about IPv6, see 13 For general information about IPv6, see
14 <http://playground.sun.com/pub/ipng/html/ipng-main.html>. 14 <https://en.wikipedia.org/wiki/IPv6>.
15 For Linux IPv6 development information, see <http://www.linux-ipv6.org>. 15 For Linux IPv6 development information, see <http://www.linux-ipv6.org>.
16 For specific information about IPv6 under Linux, read the HOWTO at 16 For specific information about IPv6 under Linux, read the HOWTO at
17 <http://www.bieringer.de/linux/IPv6/>. 17 <http://www.bieringer.de/linux/IPv6/>.
@@ -50,16 +50,15 @@ config IPV6_ROUTER_PREF
50 If unsure, say N. 50 If unsure, say N.
51 51
52config IPV6_ROUTE_INFO 52config IPV6_ROUTE_INFO
53 bool "IPv6: Route Information (RFC 4191) support (EXPERIMENTAL)" 53 bool "IPv6: Route Information (RFC 4191) support"
54 depends on IPV6_ROUTER_PREF && EXPERIMENTAL 54 depends on IPV6_ROUTER_PREF
55 ---help--- 55 ---help---
56 This is experimental support of Route Information. 56 This is experimental support of Route Information.
57 57
58 If unsure, say N. 58 If unsure, say N.
59 59
60config IPV6_OPTIMISTIC_DAD 60config IPV6_OPTIMISTIC_DAD
61 bool "IPv6: Enable RFC 4429 Optimistic DAD (EXPERIMENTAL)" 61 bool "IPv6: Enable RFC 4429 Optimistic DAD"
62 depends on EXPERIMENTAL
63 ---help--- 62 ---help---
64 This is experimental support for optimistic Duplicate 63 This is experimental support for optimistic Duplicate
65 Address Detection. It allows for autoconfigured addresses 64 Address Detection. It allows for autoconfigured addresses
@@ -105,8 +104,7 @@ config INET6_IPCOMP
105 If unsure, say Y. 104 If unsure, say Y.
106 105
107config IPV6_MIP6 106config IPV6_MIP6
108 tristate "IPv6: Mobility (EXPERIMENTAL)" 107 tristate "IPv6: Mobility"
109 depends on EXPERIMENTAL
110 select XFRM 108 select XFRM
111 ---help--- 109 ---help---
112 Support for IPv6 Mobility described in RFC 3775. 110 Support for IPv6 Mobility described in RFC 3775.
@@ -150,8 +148,7 @@ config INET6_XFRM_MODE_BEET
150 If unsure, say Y. 148 If unsure, say Y.
151 149
152config INET6_XFRM_MODE_ROUTEOPTIMIZATION 150config INET6_XFRM_MODE_ROUTEOPTIMIZATION
153 tristate "IPv6: MIPv6 route optimization mode (EXPERIMENTAL)" 151 tristate "IPv6: MIPv6 route optimization mode"
154 depends on EXPERIMENTAL
155 select XFRM 152 select XFRM
156 ---help--- 153 ---help---
157 Support for MIPv6 route optimization mode. 154 Support for MIPv6 route optimization mode.
@@ -171,8 +168,8 @@ config IPV6_SIT
171 Saying M here will produce a module called sit. If unsure, say Y. 168 Saying M here will produce a module called sit. If unsure, say Y.
172 169
173config IPV6_SIT_6RD 170config IPV6_SIT_6RD
174 bool "IPv6: IPv6 Rapid Deployment (6RD) (EXPERIMENTAL)" 171 bool "IPv6: IPv6 Rapid Deployment (6RD)"
175 depends on IPV6_SIT && EXPERIMENTAL 172 depends on IPV6_SIT
176 default n 173 default n
177 ---help--- 174 ---help---
178 IPv6 Rapid Deployment (6rd; draft-ietf-softwire-ipv6-6rd) builds upon 175 IPv6 Rapid Deployment (6rd; draft-ietf-softwire-ipv6-6rd) builds upon
@@ -219,7 +216,6 @@ config IPV6_GRE
219 216
220config IPV6_MULTIPLE_TABLES 217config IPV6_MULTIPLE_TABLES
221 bool "IPv6: Multiple Routing Tables" 218 bool "IPv6: Multiple Routing Tables"
222 depends on EXPERIMENTAL
223 select FIB_RULES 219 select FIB_RULES
224 ---help--- 220 ---help---
225 Support multiple routing tables. 221 Support multiple routing tables.
@@ -239,8 +235,8 @@ config IPV6_SUBTREES
239 If unsure, say N. 235 If unsure, say N.
240 236
241config IPV6_MROUTE 237config IPV6_MROUTE
242 bool "IPv6: multicast routing (EXPERIMENTAL)" 238 bool "IPv6: multicast routing"
243 depends on IPV6 && EXPERIMENTAL 239 depends on IPV6
244 ---help--- 240 ---help---
245 Experimental support for IPv6 multicast forwarding. 241 Experimental support for IPv6 multicast forwarding.
246 If unsure, say N. 242 If unsure, say N.
@@ -260,7 +256,7 @@ config IPV6_MROUTE_MULTIPLE_TABLES
260 If unsure, say N. 256 If unsure, say N.
261 257
262config IPV6_PIMSM_V2 258config IPV6_PIMSM_V2
263 bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)" 259 bool "IPv6: PIM-SM version 2 support"
264 depends on IPV6_MROUTE 260 depends on IPV6_MROUTE
265 ---help--- 261 ---help---
266 Support for IPv6 PIM multicast routing protocol PIM-SMv2. 262 Support for IPv6 PIM multicast routing protocol PIM-SMv2.
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2068ac4fbdad..309af19a0a0a 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -40,7 +40,7 @@ obj-$(CONFIG_IPV6_SIT) += sit.o
40obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 40obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
41obj-$(CONFIG_IPV6_GRE) += ip6_gre.o 41obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
42 42
43obj-y += addrconf_core.o exthdrs_core.o 43obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o
44obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6_offload) 44obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
45 45
46obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o 46obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6fca01f136ad..4dc0d44a5d31 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -110,10 +110,6 @@ static inline u32 cstamp_delta(unsigned long cstamp)
110 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ; 110 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
111} 111}
112 112
113#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
114#define ADDRCONF_TIMER_FUZZ (HZ / 4)
115#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
116
117#ifdef CONFIG_SYSCTL 113#ifdef CONFIG_SYSCTL
118static void addrconf_sysctl_register(struct inet6_dev *idev); 114static void addrconf_sysctl_register(struct inet6_dev *idev);
119static void addrconf_sysctl_unregister(struct inet6_dev *idev); 115static void addrconf_sysctl_unregister(struct inet6_dev *idev);
@@ -154,6 +150,11 @@ static void addrconf_type_change(struct net_device *dev,
154 unsigned long event); 150 unsigned long event);
155static int addrconf_ifdown(struct net_device *dev, int how); 151static int addrconf_ifdown(struct net_device *dev, int how);
156 152
153static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
154 int plen,
155 const struct net_device *dev,
156 u32 flags, u32 noflags);
157
157static void addrconf_dad_start(struct inet6_ifaddr *ifp); 158static void addrconf_dad_start(struct inet6_ifaddr *ifp);
158static void addrconf_dad_timer(unsigned long data); 159static void addrconf_dad_timer(unsigned long data);
159static void addrconf_dad_completed(struct inet6_ifaddr *ifp); 160static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
@@ -243,6 +244,9 @@ const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
243const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; 244const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
244const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT; 245const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
245const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; 246const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
247const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
248const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
249const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
246 250
247/* Check if a valid qdisc is available */ 251/* Check if a valid qdisc is available */
248static inline bool addrconf_qdisc_ok(const struct net_device *dev) 252static inline bool addrconf_qdisc_ok(const struct net_device *dev)
@@ -250,12 +254,6 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
250 return !qdisc_tx_is_noop(dev); 254 return !qdisc_tx_is_noop(dev);
251} 255}
252 256
253/* Check if a route is valid prefix route */
254static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
255{
256 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
257}
258
259static void addrconf_del_timer(struct inet6_ifaddr *ifp) 257static void addrconf_del_timer(struct inet6_ifaddr *ifp)
260{ 258{
261 if (del_timer(&ifp->timer)) 259 if (del_timer(&ifp->timer))
@@ -433,6 +431,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
433 /* protected by rtnl_lock */ 431 /* protected by rtnl_lock */
434 rcu_assign_pointer(dev->ip6_ptr, ndev); 432 rcu_assign_pointer(dev->ip6_ptr, ndev);
435 433
434 /* Join interface-local all-node multicast group */
435 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
436
436 /* Join all-node multicast group */ 437 /* Join all-node multicast group */
437 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 438 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
438 439
@@ -534,8 +535,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
534 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC); 535 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC);
535 return; 536 return;
536errout: 537errout:
537 if (err < 0) 538 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
538 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
539} 539}
540 540
541static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = { 541static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
@@ -617,10 +617,15 @@ static void dev_forward_change(struct inet6_dev *idev)
617 if (idev->cnf.forwarding) 617 if (idev->cnf.forwarding)
618 dev_disable_lro(dev); 618 dev_disable_lro(dev);
619 if (dev->flags & IFF_MULTICAST) { 619 if (dev->flags & IFF_MULTICAST) {
620 if (idev->cnf.forwarding) 620 if (idev->cnf.forwarding) {
621 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 621 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
622 else 622 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
623 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
624 } else {
623 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); 625 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
626 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
627 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
628 }
624 } 629 }
625 630
626 list_for_each_entry(ifa, &idev->addr_list, if_list) { 631 list_for_each_entry(ifa, &idev->addr_list, if_list) {
@@ -942,17 +947,15 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
942 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) { 947 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
943 struct in6_addr prefix; 948 struct in6_addr prefix;
944 struct rt6_info *rt; 949 struct rt6_info *rt;
945 struct net *net = dev_net(ifp->idev->dev);
946 struct flowi6 fl6 = {};
947 950
948 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 951 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
949 fl6.flowi6_oif = ifp->idev->dev->ifindex;
950 fl6.daddr = prefix;
951 rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
952 RT6_LOOKUP_F_IFACE);
953 952
954 if (rt != net->ipv6.ip6_null_entry && 953 rt = addrconf_get_prefix_route(&prefix,
955 addrconf_is_prefix_route(rt)) { 954 ifp->prefix_len,
955 ifp->idev->dev,
956 0, RTF_GATEWAY | RTF_DEFAULT);
957
958 if (rt) {
956 if (onlink == 0) { 959 if (onlink == 0) {
957 ip6_del_rt(rt); 960 ip6_del_rt(rt);
958 rt = NULL; 961 rt = NULL;
@@ -1055,7 +1058,7 @@ retry:
1055 ipv6_add_addr(idev, &addr, tmp_plen, 1058 ipv6_add_addr(idev, &addr, tmp_plen,
1056 ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK, 1059 ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
1057 addr_flags) : NULL; 1060 addr_flags) : NULL;
1058 if (!ift || IS_ERR(ift)) { 1061 if (IS_ERR_OR_NULL(ift)) {
1059 in6_ifa_put(ifp); 1062 in6_ifa_put(ifp);
1060 in6_dev_put(idev); 1063 in6_dev_put(idev);
1061 pr_info("%s: retry temporary address regeneration\n", __func__); 1064 pr_info("%s: retry temporary address regeneration\n", __func__);
@@ -1664,6 +1667,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
1664 if (dev->addr_len != IEEE802154_ADDR_LEN) 1667 if (dev->addr_len != IEEE802154_ADDR_LEN)
1665 return -1; 1668 return -1;
1666 memcpy(eui, dev->dev_addr, 8); 1669 memcpy(eui, dev->dev_addr, 8);
1670 eui[0] ^= 2;
1667 return 0; 1671 return 0;
1668} 1672}
1669 1673
@@ -1878,7 +1882,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
1878 continue; 1882 continue;
1879 if ((rt->rt6i_flags & flags) != flags) 1883 if ((rt->rt6i_flags & flags) != flags)
1880 continue; 1884 continue;
1881 if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0)) 1885 if ((rt->rt6i_flags & noflags) != 0)
1882 continue; 1886 continue;
1883 dst_hold(&rt->dst); 1887 dst_hold(&rt->dst);
1884 break; 1888 break;
@@ -2083,7 +2087,7 @@ ok:
2083 addr_type&IPV6_ADDR_SCOPE_MASK, 2087 addr_type&IPV6_ADDR_SCOPE_MASK,
2084 addr_flags); 2088 addr_flags);
2085 2089
2086 if (!ifp || IS_ERR(ifp)) { 2090 if (IS_ERR_OR_NULL(ifp)) {
2087 in6_dev_put(in6_dev); 2091 in6_dev_put(in6_dev);
2088 return; 2092 return;
2089 } 2093 }
@@ -3321,14 +3325,14 @@ static const struct file_operations if6_fops = {
3321 3325
3322static int __net_init if6_proc_net_init(struct net *net) 3326static int __net_init if6_proc_net_init(struct net *net)
3323{ 3327{
3324 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops)) 3328 if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
3325 return -ENOMEM; 3329 return -ENOMEM;
3326 return 0; 3330 return 0;
3327} 3331}
3328 3332
3329static void __net_exit if6_proc_net_exit(struct net *net) 3333static void __net_exit if6_proc_net_exit(struct net *net)
3330{ 3334{
3331 proc_net_remove(net, "if_inet6"); 3335 remove_proc_entry("if_inet6", net->proc_net);
3332} 3336}
3333 3337
3334static struct pernet_operations if6_proc_net_ops = { 3338static struct pernet_operations if6_proc_net_ops = {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b043c60429bd..6b793bfc0e10 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -811,11 +811,10 @@ static struct pernet_operations inet6_net_ops = {
811 811
812static int __init inet6_init(void) 812static int __init inet6_init(void)
813{ 813{
814 struct sk_buff *dummy_skb;
815 struct list_head *r; 814 struct list_head *r;
816 int err = 0; 815 int err = 0;
817 816
818 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); 817 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
819 818
820 /* Register the socket-side information for inet6_create. */ 819 /* Register the socket-side information for inet6_create. */
821 for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) 820 for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ecc35b93314b..bb02e176cb70 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -472,7 +472,10 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
472 skb->network_header += ah_hlen; 472 skb->network_header += ah_hlen;
473 memcpy(skb_network_header(skb), work_iph, hdr_len); 473 memcpy(skb_network_header(skb), work_iph, hdr_len);
474 __skb_pull(skb, ah_hlen + hdr_len); 474 __skb_pull(skb, ah_hlen + hdr_len);
475 skb_set_transport_header(skb, -hdr_len); 475 if (x->props.mode == XFRM_MODE_TUNNEL)
476 skb_reset_transport_header(skb);
477 else
478 skb_set_transport_header(skb, -hdr_len);
476out: 479out:
477 kfree(AH_SKB_CB(skb)->tmp); 480 kfree(AH_SKB_CB(skb)->tmp);
478 xfrm_input_resume(skb, err); 481 xfrm_input_resume(skb, err);
@@ -518,8 +521,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
518 521
519 /* We are going to _remove_ AH header to keep sockets happy, 522 /* We are going to _remove_ AH header to keep sockets happy,
520 * so... Later this can change. */ 523 * so... Later this can change. */
521 if (skb_cloned(skb) && 524 if (skb_unclone(skb, GFP_ATOMIC))
522 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
523 goto out; 525 goto out;
524 526
525 skb->ip_summed = CHECKSUM_NONE; 527 skb->ip_summed = CHECKSUM_NONE;
@@ -593,9 +595,13 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
593 595
594 skb->network_header += ah_hlen; 596 skb->network_header += ah_hlen;
595 memcpy(skb_network_header(skb), work_iph, hdr_len); 597 memcpy(skb_network_header(skb), work_iph, hdr_len);
596 skb->transport_header = skb->network_header;
597 __skb_pull(skb, ah_hlen + hdr_len); 598 __skb_pull(skb, ah_hlen + hdr_len);
598 599
600 if (x->props.mode == XFRM_MODE_TUNNEL)
601 skb_reset_transport_header(skb);
602 else
603 skb_set_transport_header(skb, -hdr_len);
604
599 err = nexthdr; 605 err = nexthdr;
600 606
601out_free: 607out_free:
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 757a810d8f15..5a80f15a9de2 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -47,7 +47,7 @@
47static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr); 47static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
48 48
49/* Big ac list lock for all the sockets */ 49/* Big ac list lock for all the sockets */
50static DEFINE_RWLOCK(ipv6_sk_ac_lock); 50static DEFINE_SPINLOCK(ipv6_sk_ac_lock);
51 51
52 52
53/* 53/*
@@ -128,10 +128,10 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
128 128
129 err = ipv6_dev_ac_inc(dev, addr); 129 err = ipv6_dev_ac_inc(dev, addr);
130 if (!err) { 130 if (!err) {
131 write_lock_bh(&ipv6_sk_ac_lock); 131 spin_lock_bh(&ipv6_sk_ac_lock);
132 pac->acl_next = np->ipv6_ac_list; 132 pac->acl_next = np->ipv6_ac_list;
133 np->ipv6_ac_list = pac; 133 np->ipv6_ac_list = pac;
134 write_unlock_bh(&ipv6_sk_ac_lock); 134 spin_unlock_bh(&ipv6_sk_ac_lock);
135 pac = NULL; 135 pac = NULL;
136 } 136 }
137 137
@@ -152,7 +152,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
152 struct ipv6_ac_socklist *pac, *prev_pac; 152 struct ipv6_ac_socklist *pac, *prev_pac;
153 struct net *net = sock_net(sk); 153 struct net *net = sock_net(sk);
154 154
155 write_lock_bh(&ipv6_sk_ac_lock); 155 spin_lock_bh(&ipv6_sk_ac_lock);
156 prev_pac = NULL; 156 prev_pac = NULL;
157 for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) { 157 for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) {
158 if ((ifindex == 0 || pac->acl_ifindex == ifindex) && 158 if ((ifindex == 0 || pac->acl_ifindex == ifindex) &&
@@ -161,7 +161,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
161 prev_pac = pac; 161 prev_pac = pac;
162 } 162 }
163 if (!pac) { 163 if (!pac) {
164 write_unlock_bh(&ipv6_sk_ac_lock); 164 spin_unlock_bh(&ipv6_sk_ac_lock);
165 return -ENOENT; 165 return -ENOENT;
166 } 166 }
167 if (prev_pac) 167 if (prev_pac)
@@ -169,7 +169,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
169 else 169 else
170 np->ipv6_ac_list = pac->acl_next; 170 np->ipv6_ac_list = pac->acl_next;
171 171
172 write_unlock_bh(&ipv6_sk_ac_lock); 172 spin_unlock_bh(&ipv6_sk_ac_lock);
173 173
174 rcu_read_lock(); 174 rcu_read_lock();
175 dev = dev_get_by_index_rcu(net, pac->acl_ifindex); 175 dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
@@ -192,10 +192,10 @@ void ipv6_sock_ac_close(struct sock *sk)
192 if (!np->ipv6_ac_list) 192 if (!np->ipv6_ac_list)
193 return; 193 return;
194 194
195 write_lock_bh(&ipv6_sk_ac_lock); 195 spin_lock_bh(&ipv6_sk_ac_lock);
196 pac = np->ipv6_ac_list; 196 pac = np->ipv6_ac_list;
197 np->ipv6_ac_list = NULL; 197 np->ipv6_ac_list = NULL;
198 write_unlock_bh(&ipv6_sk_ac_lock); 198 spin_unlock_bh(&ipv6_sk_ac_lock);
199 199
200 prev_index = 0; 200 prev_index = 0;
201 rcu_read_lock(); 201 rcu_read_lock();
@@ -509,7 +509,7 @@ static const struct file_operations ac6_seq_fops = {
509 509
510int __net_init ac6_proc_init(struct net *net) 510int __net_init ac6_proc_init(struct net *net)
511{ 511{
512 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops)) 512 if (!proc_create("anycast6", S_IRUGO, net->proc_net, &ac6_seq_fops))
513 return -ENOMEM; 513 return -ENOMEM;
514 514
515 return 0; 515 return 0;
@@ -517,7 +517,7 @@ int __net_init ac6_proc_init(struct net *net)
517 517
518void ac6_proc_exit(struct net *net) 518void ac6_proc_exit(struct net *net)
519{ 519{
520 proc_net_remove(net, "anycast6"); 520 remove_proc_entry("anycast6", net->proc_net);
521} 521}
522#endif 522#endif
523 523
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 8edf2601065a..f5a54782a340 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -30,6 +30,7 @@
30#include <net/transp_v6.h> 30#include <net/transp_v6.h>
31#include <net/ip6_route.h> 31#include <net/ip6_route.h>
32#include <net/tcp_states.h> 32#include <net/tcp_states.h>
33#include <net/dsfield.h>
33 34
34#include <linux/errqueue.h> 35#include <linux/errqueue.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
@@ -356,12 +357,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
356 sin->sin6_port = serr->port; 357 sin->sin6_port = serr->port;
357 sin->sin6_scope_id = 0; 358 sin->sin6_scope_id = 0;
358 if (skb->protocol == htons(ETH_P_IPV6)) { 359 if (skb->protocol == htons(ETH_P_IPV6)) {
359 sin->sin6_addr = 360 const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset),
360 *(struct in6_addr *)(nh + serr->addr_offset); 361 struct ipv6hdr, daddr);
362 sin->sin6_addr = ip6h->daddr;
361 if (np->sndflow) 363 if (np->sndflow)
362 sin->sin6_flowinfo = 364 sin->sin6_flowinfo = ip6_flowinfo(ip6h);
363 (*(__be32 *)(nh + serr->addr_offset - 24) &
364 IPV6_FLOWINFO_MASK);
365 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 365 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
366 sin->sin6_scope_id = IP6CB(skb)->iif; 366 sin->sin6_scope_id = IP6CB(skb)->iif;
367 } else { 367 } else {
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
380 if (skb->protocol == htons(ETH_P_IPV6)) { 380 if (skb->protocol == htons(ETH_P_IPV6)) {
381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 381 sin->sin6_addr = ipv6_hdr(skb)->saddr;
382 if (np->rxopt.all) 382 if (np->rxopt.all)
383 datagram_recv_ctl(sk, msg, skb); 383 ip6_datagram_recv_ctl(sk, msg, skb);
384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
385 sin->sin6_scope_id = IP6CB(skb)->iif; 385 sin->sin6_scope_id = IP6CB(skb)->iif;
386 } else { 386 } else {
@@ -468,7 +468,8 @@ out:
468} 468}
469 469
470 470
471int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 471int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
472 struct sk_buff *skb)
472{ 473{
473 struct ipv6_pinfo *np = inet6_sk(sk); 474 struct ipv6_pinfo *np = inet6_sk(sk);
474 struct inet6_skb_parm *opt = IP6CB(skb); 475 struct inet6_skb_parm *opt = IP6CB(skb);
@@ -488,13 +489,14 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
488 } 489 }
489 490
490 if (np->rxopt.bits.rxtclass) { 491 if (np->rxopt.bits.rxtclass) {
491 int tclass = ipv6_tclass(ipv6_hdr(skb)); 492 int tclass = ipv6_get_dsfield(ipv6_hdr(skb));
492 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); 493 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
493 } 494 }
494 495
495 if (np->rxopt.bits.rxflow && (*(__be32 *)nh & IPV6_FLOWINFO_MASK)) { 496 if (np->rxopt.bits.rxflow) {
496 __be32 flowinfo = *(__be32 *)nh & IPV6_FLOWINFO_MASK; 497 __be32 flowinfo = ip6_flowinfo((struct ipv6hdr *)nh);
497 put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo); 498 if (flowinfo)
499 put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
498 } 500 }
499 501
500 /* HbH is allowed only once */ 502 /* HbH is allowed only once */
@@ -597,11 +599,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
597 } 599 }
598 return 0; 600 return 0;
599} 601}
602EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
600 603
601int datagram_send_ctl(struct net *net, struct sock *sk, 604int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
602 struct msghdr *msg, struct flowi6 *fl6, 605 struct msghdr *msg, struct flowi6 *fl6,
603 struct ipv6_txoptions *opt, 606 struct ipv6_txoptions *opt,
604 int *hlimit, int *tclass, int *dontfrag) 607 int *hlimit, int *tclass, int *dontfrag)
605{ 608{
606 struct in6_pktinfo *src_info; 609 struct in6_pktinfo *src_info;
607 struct cmsghdr *cmsg; 610 struct cmsghdr *cmsg;
@@ -871,4 +874,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
871exit_f: 874exit_f:
872 return err; 875 return err;
873} 876}
874EXPORT_SYMBOL_GPL(datagram_send_ctl); 877EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 282f3723ee19..40ffd72243a4 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -300,7 +300,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
300 300
301 pskb_trim(skb, skb->len - alen - padlen - 2); 301 pskb_trim(skb, skb->len - alen - padlen - 2);
302 __skb_pull(skb, hlen); 302 __skb_pull(skb, hlen);
303 skb_set_transport_header(skb, -hdr_len); 303 if (x->props.mode == XFRM_MODE_TUNNEL)
304 skb_reset_transport_header(skb);
305 else
306 skb_set_transport_header(skb, -hdr_len);
304 307
305 err = nexthdr[1]; 308 err = nexthdr[1];
306 309
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 473f628f9f20..07a7d65a7cb6 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -553,7 +553,8 @@ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
553 const unsigned char *nh = skb_network_header(skb); 553 const unsigned char *nh = skb_network_header(skb);
554 554
555 if (nh[optoff + 1] == 2) { 555 if (nh[optoff + 1] == 2) {
556 IP6CB(skb)->ra = optoff; 556 IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
557 memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
557 return true; 558 return true;
558 } 559 }
559 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", 560 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index b4a9fd51dae7..fff5bdd8b680 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -81,10 +81,22 @@ static inline struct sock *icmpv6_sk(struct net *net)
81 return net->ipv6.icmp_sk[smp_processor_id()]; 81 return net->ipv6.icmp_sk[smp_processor_id()];
82} 82}
83 83
84static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
85 u8 type, u8 code, int offset, __be32 info)
86{
87 struct net *net = dev_net(skb->dev);
88
89 if (type == ICMPV6_PKT_TOOBIG)
90 ip6_update_pmtu(skb, net, info, 0, 0);
91 else if (type == NDISC_REDIRECT)
92 ip6_redirect(skb, net, 0, 0);
93}
94
84static int icmpv6_rcv(struct sk_buff *skb); 95static int icmpv6_rcv(struct sk_buff *skb);
85 96
86static const struct inet6_protocol icmpv6_protocol = { 97static const struct inet6_protocol icmpv6_protocol = {
87 .handler = icmpv6_rcv, 98 .handler = icmpv6_rcv,
99 .err_handler = icmpv6_err,
88 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 100 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
89}; 101};
90 102
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 30647857a375..b386a2ce4c6f 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -32,6 +32,9 @@ int inet6_csk_bind_conflict(const struct sock *sk,
32{ 32{
33 const struct sock *sk2; 33 const struct sock *sk2;
34 const struct hlist_node *node; 34 const struct hlist_node *node;
35 int reuse = sk->sk_reuse;
36 int reuseport = sk->sk_reuseport;
37 kuid_t uid = sock_i_uid((struct sock *)sk);
35 38
36 /* We must walk the whole port owner list in this case. -DaveM */ 39 /* We must walk the whole port owner list in this case. -DaveM */
37 /* 40 /*
@@ -42,11 +45,17 @@ int inet6_csk_bind_conflict(const struct sock *sk,
42 if (sk != sk2 && 45 if (sk != sk2 &&
43 (!sk->sk_bound_dev_if || 46 (!sk->sk_bound_dev_if ||
44 !sk2->sk_bound_dev_if || 47 !sk2->sk_bound_dev_if ||
45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 48 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
46 (!sk->sk_reuse || !sk2->sk_reuse || 49 if ((!reuse || !sk2->sk_reuse ||
47 sk2->sk_state == TCP_LISTEN) && 50 sk2->sk_state == TCP_LISTEN) &&
48 ipv6_rcv_saddr_equal(sk, sk2)) 51 (!reuseport || !sk2->sk_reuseport ||
49 break; 52 (sk2->sk_state != TCP_TIME_WAIT &&
53 !uid_eq(uid,
54 sock_i_uid((struct sock *)sk2))))) {
55 if (ipv6_rcv_saddr_equal(sk, sk2))
56 break;
57 }
58 }
50 } 59 }
51 60
52 return node != NULL; 61 return node != NULL;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index dea17fd28e50..32b4a1675d82 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -158,25 +158,38 @@ static inline int compute_score(struct sock *sk, struct net *net,
158} 158}
159 159
160struct sock *inet6_lookup_listener(struct net *net, 160struct sock *inet6_lookup_listener(struct net *net,
161 struct inet_hashinfo *hashinfo, const struct in6_addr *daddr, 161 struct inet_hashinfo *hashinfo, const struct in6_addr *saddr,
162 const __be16 sport, const struct in6_addr *daddr,
162 const unsigned short hnum, const int dif) 163 const unsigned short hnum, const int dif)
163{ 164{
164 struct sock *sk; 165 struct sock *sk;
165 const struct hlist_nulls_node *node; 166 const struct hlist_nulls_node *node;
166 struct sock *result; 167 struct sock *result;
167 int score, hiscore; 168 int score, hiscore, matches = 0, reuseport = 0;
169 u32 phash = 0;
168 unsigned int hash = inet_lhashfn(net, hnum); 170 unsigned int hash = inet_lhashfn(net, hnum);
169 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; 171 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
170 172
171 rcu_read_lock(); 173 rcu_read_lock();
172begin: 174begin:
173 result = NULL; 175 result = NULL;
174 hiscore = -1; 176 hiscore = 0;
175 sk_nulls_for_each(sk, node, &ilb->head) { 177 sk_nulls_for_each(sk, node, &ilb->head) {
176 score = compute_score(sk, net, hnum, daddr, dif); 178 score = compute_score(sk, net, hnum, daddr, dif);
177 if (score > hiscore) { 179 if (score > hiscore) {
178 hiscore = score; 180 hiscore = score;
179 result = sk; 181 result = sk;
182 reuseport = sk->sk_reuseport;
183 if (reuseport) {
184 phash = inet6_ehashfn(net, daddr, hnum,
185 saddr, sport);
186 matches = 1;
187 }
188 } else if (score == hiscore && reuseport) {
189 matches++;
190 if (((u64)phash * matches) >> 32 == 0)
191 result = sk;
192 phash = next_pseudo_random32(phash);
180 } 193 }
181 } 194 }
182 /* 195 /*
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
new file mode 100644
index 000000000000..72d198b8e4d2
--- /dev/null
+++ b/net/ipv6/ip6_checksum.c
@@ -0,0 +1,97 @@
1#include <net/ip.h>
2#include <net/udp.h>
3#include <net/udplite.h>
4#include <asm/checksum.h>
5
6#ifndef _HAVE_ARCH_IPV6_CSUM
7__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
8 const struct in6_addr *daddr,
9 __u32 len, unsigned short proto,
10 __wsum csum)
11{
12
13 int carry;
14 __u32 ulen;
15 __u32 uproto;
16 __u32 sum = (__force u32)csum;
17
18 sum += (__force u32)saddr->s6_addr32[0];
19 carry = (sum < (__force u32)saddr->s6_addr32[0]);
20 sum += carry;
21
22 sum += (__force u32)saddr->s6_addr32[1];
23 carry = (sum < (__force u32)saddr->s6_addr32[1]);
24 sum += carry;
25
26 sum += (__force u32)saddr->s6_addr32[2];
27 carry = (sum < (__force u32)saddr->s6_addr32[2]);
28 sum += carry;
29
30 sum += (__force u32)saddr->s6_addr32[3];
31 carry = (sum < (__force u32)saddr->s6_addr32[3]);
32 sum += carry;
33
34 sum += (__force u32)daddr->s6_addr32[0];
35 carry = (sum < (__force u32)daddr->s6_addr32[0]);
36 sum += carry;
37
38 sum += (__force u32)daddr->s6_addr32[1];
39 carry = (sum < (__force u32)daddr->s6_addr32[1]);
40 sum += carry;
41
42 sum += (__force u32)daddr->s6_addr32[2];
43 carry = (sum < (__force u32)daddr->s6_addr32[2]);
44 sum += carry;
45
46 sum += (__force u32)daddr->s6_addr32[3];
47 carry = (sum < (__force u32)daddr->s6_addr32[3]);
48 sum += carry;
49
50 ulen = (__force u32)htonl((__u32) len);
51 sum += ulen;
52 carry = (sum < ulen);
53 sum += carry;
54
55 uproto = (__force u32)htonl(proto);
56 sum += uproto;
57 carry = (sum < uproto);
58 sum += carry;
59
60 return csum_fold((__force __wsum)sum);
61}
62EXPORT_SYMBOL(csum_ipv6_magic);
63#endif
64
65int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
66{
67 int err;
68
69 UDP_SKB_CB(skb)->partial_cov = 0;
70 UDP_SKB_CB(skb)->cscov = skb->len;
71
72 if (proto == IPPROTO_UDPLITE) {
73 err = udplite_checksum_init(skb, uh);
74 if (err)
75 return err;
76 }
77
78 if (uh->check == 0) {
79 /* RFC 2460 section 8.1 says that we SHOULD log
80 this error. Well, it is reasonable.
81 */
82 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
83 return 1;
84 }
85 if (skb->ip_summed == CHECKSUM_COMPLETE &&
86 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
87 skb->len, proto, skb->csum))
88 skb->ip_summed = CHECKSUM_UNNECESSARY;
89
90 if (!skb_csum_unnecessary(skb))
91 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
92 &ipv6_hdr(skb)->daddr,
93 skb->len, proto, 0));
94
95 return 0;
96}
97EXPORT_SYMBOL(udp6_csum_init);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 29124b7a04c8..b973ed3d06cf 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -51,25 +51,38 @@
51#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK) 51#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
52 52
53static atomic_t fl_size = ATOMIC_INIT(0); 53static atomic_t fl_size = ATOMIC_INIT(0);
54static struct ip6_flowlabel *fl_ht[FL_HASH_MASK+1]; 54static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
55 55
56static void ip6_fl_gc(unsigned long dummy); 56static void ip6_fl_gc(unsigned long dummy);
57static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0); 57static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
58 58
59/* FL hash table lock: it protects only of GC */ 59/* FL hash table lock: it protects only of GC */
60 60
61static DEFINE_RWLOCK(ip6_fl_lock); 61static DEFINE_SPINLOCK(ip6_fl_lock);
62 62
63/* Big socket sock */ 63/* Big socket sock */
64 64
65static DEFINE_RWLOCK(ip6_sk_fl_lock); 65static DEFINE_SPINLOCK(ip6_sk_fl_lock);
66 66
67#define for_each_fl_rcu(hash, fl) \
68 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
69 fl != NULL; \
70 fl = rcu_dereference_bh(fl->next))
71#define for_each_fl_continue_rcu(fl) \
72 for (fl = rcu_dereference_bh(fl->next); \
73 fl != NULL; \
74 fl = rcu_dereference_bh(fl->next))
75
76#define for_each_sk_fl_rcu(np, sfl) \
77 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
78 sfl != NULL; \
79 sfl = rcu_dereference_bh(sfl->next))
67 80
68static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label) 81static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
69{ 82{
70 struct ip6_flowlabel *fl; 83 struct ip6_flowlabel *fl;
71 84
72 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) { 85 for_each_fl_rcu(FL_HASH(label), fl) {
73 if (fl->label == label && net_eq(fl->fl_net, net)) 86 if (fl->label == label && net_eq(fl->fl_net, net))
74 return fl; 87 return fl;
75 } 88 }
@@ -80,11 +93,11 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
80{ 93{
81 struct ip6_flowlabel *fl; 94 struct ip6_flowlabel *fl;
82 95
83 read_lock_bh(&ip6_fl_lock); 96 rcu_read_lock_bh();
84 fl = __fl_lookup(net, label); 97 fl = __fl_lookup(net, label);
85 if (fl) 98 if (fl && !atomic_inc_not_zero(&fl->users))
86 atomic_inc(&fl->users); 99 fl = NULL;
87 read_unlock_bh(&ip6_fl_lock); 100 rcu_read_unlock_bh();
88 return fl; 101 return fl;
89} 102}
90 103
@@ -96,13 +109,13 @@ static void fl_free(struct ip6_flowlabel *fl)
96 put_pid(fl->owner.pid); 109 put_pid(fl->owner.pid);
97 release_net(fl->fl_net); 110 release_net(fl->fl_net);
98 kfree(fl->opt); 111 kfree(fl->opt);
112 kfree_rcu(fl, rcu);
99 } 113 }
100 kfree(fl);
101} 114}
102 115
103static void fl_release(struct ip6_flowlabel *fl) 116static void fl_release(struct ip6_flowlabel *fl)
104{ 117{
105 write_lock_bh(&ip6_fl_lock); 118 spin_lock_bh(&ip6_fl_lock);
106 119
107 fl->lastuse = jiffies; 120 fl->lastuse = jiffies;
108 if (atomic_dec_and_test(&fl->users)) { 121 if (atomic_dec_and_test(&fl->users)) {
@@ -119,7 +132,7 @@ static void fl_release(struct ip6_flowlabel *fl)
119 time_after(ip6_fl_gc_timer.expires, ttd)) 132 time_after(ip6_fl_gc_timer.expires, ttd))
120 mod_timer(&ip6_fl_gc_timer, ttd); 133 mod_timer(&ip6_fl_gc_timer, ttd);
121 } 134 }
122 write_unlock_bh(&ip6_fl_lock); 135 spin_unlock_bh(&ip6_fl_lock);
123} 136}
124 137
125static void ip6_fl_gc(unsigned long dummy) 138static void ip6_fl_gc(unsigned long dummy)
@@ -128,12 +141,13 @@ static void ip6_fl_gc(unsigned long dummy)
128 unsigned long now = jiffies; 141 unsigned long now = jiffies;
129 unsigned long sched = 0; 142 unsigned long sched = 0;
130 143
131 write_lock(&ip6_fl_lock); 144 spin_lock(&ip6_fl_lock);
132 145
133 for (i=0; i<=FL_HASH_MASK; i++) { 146 for (i=0; i<=FL_HASH_MASK; i++) {
134 struct ip6_flowlabel *fl, **flp; 147 struct ip6_flowlabel *fl, **flp;
135 flp = &fl_ht[i]; 148 flp = &fl_ht[i];
136 while ((fl=*flp) != NULL) { 149 while ((fl = rcu_dereference_protected(*flp,
150 lockdep_is_held(&ip6_fl_lock))) != NULL) {
137 if (atomic_read(&fl->users) == 0) { 151 if (atomic_read(&fl->users) == 0) {
138 unsigned long ttd = fl->lastuse + fl->linger; 152 unsigned long ttd = fl->lastuse + fl->linger;
139 if (time_after(ttd, fl->expires)) 153 if (time_after(ttd, fl->expires))
@@ -156,18 +170,19 @@ static void ip6_fl_gc(unsigned long dummy)
156 if (sched) { 170 if (sched) {
157 mod_timer(&ip6_fl_gc_timer, sched); 171 mod_timer(&ip6_fl_gc_timer, sched);
158 } 172 }
159 write_unlock(&ip6_fl_lock); 173 spin_unlock(&ip6_fl_lock);
160} 174}
161 175
162static void __net_exit ip6_fl_purge(struct net *net) 176static void __net_exit ip6_fl_purge(struct net *net)
163{ 177{
164 int i; 178 int i;
165 179
166 write_lock(&ip6_fl_lock); 180 spin_lock(&ip6_fl_lock);
167 for (i = 0; i <= FL_HASH_MASK; i++) { 181 for (i = 0; i <= FL_HASH_MASK; i++) {
168 struct ip6_flowlabel *fl, **flp; 182 struct ip6_flowlabel *fl, **flp;
169 flp = &fl_ht[i]; 183 flp = &fl_ht[i];
170 while ((fl = *flp) != NULL) { 184 while ((fl = rcu_dereference_protected(*flp,
185 lockdep_is_held(&ip6_fl_lock))) != NULL) {
171 if (net_eq(fl->fl_net, net) && 186 if (net_eq(fl->fl_net, net) &&
172 atomic_read(&fl->users) == 0) { 187 atomic_read(&fl->users) == 0) {
173 *flp = fl->next; 188 *flp = fl->next;
@@ -178,7 +193,7 @@ static void __net_exit ip6_fl_purge(struct net *net)
178 flp = &fl->next; 193 flp = &fl->next;
179 } 194 }
180 } 195 }
181 write_unlock(&ip6_fl_lock); 196 spin_unlock(&ip6_fl_lock);
182} 197}
183 198
184static struct ip6_flowlabel *fl_intern(struct net *net, 199static struct ip6_flowlabel *fl_intern(struct net *net,
@@ -188,7 +203,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
188 203
189 fl->label = label & IPV6_FLOWLABEL_MASK; 204 fl->label = label & IPV6_FLOWLABEL_MASK;
190 205
191 write_lock_bh(&ip6_fl_lock); 206 spin_lock_bh(&ip6_fl_lock);
192 if (label == 0) { 207 if (label == 0) {
193 for (;;) { 208 for (;;) {
194 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK; 209 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
@@ -210,16 +225,16 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
210 lfl = __fl_lookup(net, fl->label); 225 lfl = __fl_lookup(net, fl->label);
211 if (lfl != NULL) { 226 if (lfl != NULL) {
212 atomic_inc(&lfl->users); 227 atomic_inc(&lfl->users);
213 write_unlock_bh(&ip6_fl_lock); 228 spin_unlock_bh(&ip6_fl_lock);
214 return lfl; 229 return lfl;
215 } 230 }
216 } 231 }
217 232
218 fl->lastuse = jiffies; 233 fl->lastuse = jiffies;
219 fl->next = fl_ht[FL_HASH(fl->label)]; 234 fl->next = fl_ht[FL_HASH(fl->label)];
220 fl_ht[FL_HASH(fl->label)] = fl; 235 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
221 atomic_inc(&fl_size); 236 atomic_inc(&fl_size);
222 write_unlock_bh(&ip6_fl_lock); 237 spin_unlock_bh(&ip6_fl_lock);
223 return NULL; 238 return NULL;
224} 239}
225 240
@@ -234,17 +249,17 @@ struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
234 249
235 label &= IPV6_FLOWLABEL_MASK; 250 label &= IPV6_FLOWLABEL_MASK;
236 251
237 read_lock_bh(&ip6_sk_fl_lock); 252 rcu_read_lock_bh();
238 for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) { 253 for_each_sk_fl_rcu(np, sfl) {
239 struct ip6_flowlabel *fl = sfl->fl; 254 struct ip6_flowlabel *fl = sfl->fl;
240 if (fl->label == label) { 255 if (fl->label == label) {
241 fl->lastuse = jiffies; 256 fl->lastuse = jiffies;
242 atomic_inc(&fl->users); 257 atomic_inc(&fl->users);
243 read_unlock_bh(&ip6_sk_fl_lock); 258 rcu_read_unlock_bh();
244 return fl; 259 return fl;
245 } 260 }
246 } 261 }
247 read_unlock_bh(&ip6_sk_fl_lock); 262 rcu_read_unlock_bh();
248 return NULL; 263 return NULL;
249} 264}
250 265
@@ -255,11 +270,21 @@ void fl6_free_socklist(struct sock *sk)
255 struct ipv6_pinfo *np = inet6_sk(sk); 270 struct ipv6_pinfo *np = inet6_sk(sk);
256 struct ipv6_fl_socklist *sfl; 271 struct ipv6_fl_socklist *sfl;
257 272
258 while ((sfl = np->ipv6_fl_list) != NULL) { 273 if (!rcu_access_pointer(np->ipv6_fl_list))
274 return;
275
276 spin_lock_bh(&ip6_sk_fl_lock);
277 while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
278 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
259 np->ipv6_fl_list = sfl->next; 279 np->ipv6_fl_list = sfl->next;
280 spin_unlock_bh(&ip6_sk_fl_lock);
281
260 fl_release(sfl->fl); 282 fl_release(sfl->fl);
261 kfree(sfl); 283 kfree_rcu(sfl, rcu);
284
285 spin_lock_bh(&ip6_sk_fl_lock);
262 } 286 }
287 spin_unlock_bh(&ip6_sk_fl_lock);
263} 288}
264 289
265/* Service routines */ 290/* Service routines */
@@ -365,8 +390,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
365 msg.msg_control = (void*)(fl->opt+1); 390 msg.msg_control = (void*)(fl->opt+1);
366 memset(&flowi6, 0, sizeof(flowi6)); 391 memset(&flowi6, 0, sizeof(flowi6));
367 392
368 err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 393 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
369 &junk, &junk); 394 &junk, &junk, &junk);
370 if (err) 395 if (err)
371 goto done; 396 goto done;
372 err = -EINVAL; 397 err = -EINVAL;
@@ -424,7 +449,7 @@ static int mem_check(struct sock *sk)
424 if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK) 449 if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
425 return 0; 450 return 0;
426 451
427 for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) 452 for_each_sk_fl_rcu(np, sfl)
428 count++; 453 count++;
429 454
430 if (room <= 0 || 455 if (room <= 0 ||
@@ -467,11 +492,11 @@ static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
467static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, 492static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
468 struct ip6_flowlabel *fl) 493 struct ip6_flowlabel *fl)
469{ 494{
470 write_lock_bh(&ip6_sk_fl_lock); 495 spin_lock_bh(&ip6_sk_fl_lock);
471 sfl->fl = fl; 496 sfl->fl = fl;
472 sfl->next = np->ipv6_fl_list; 497 sfl->next = np->ipv6_fl_list;
473 np->ipv6_fl_list = sfl; 498 rcu_assign_pointer(np->ipv6_fl_list, sfl);
474 write_unlock_bh(&ip6_sk_fl_lock); 499 spin_unlock_bh(&ip6_sk_fl_lock);
475} 500}
476 501
477int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) 502int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
@@ -493,31 +518,33 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
493 518
494 switch (freq.flr_action) { 519 switch (freq.flr_action) {
495 case IPV6_FL_A_PUT: 520 case IPV6_FL_A_PUT:
496 write_lock_bh(&ip6_sk_fl_lock); 521 spin_lock_bh(&ip6_sk_fl_lock);
497 for (sflp = &np->ipv6_fl_list; (sfl=*sflp)!=NULL; sflp = &sfl->next) { 522 for (sflp = &np->ipv6_fl_list;
523 (sfl = rcu_dereference(*sflp))!=NULL;
524 sflp = &sfl->next) {
498 if (sfl->fl->label == freq.flr_label) { 525 if (sfl->fl->label == freq.flr_label) {
499 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) 526 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
500 np->flow_label &= ~IPV6_FLOWLABEL_MASK; 527 np->flow_label &= ~IPV6_FLOWLABEL_MASK;
501 *sflp = sfl->next; 528 *sflp = rcu_dereference(sfl->next);
502 write_unlock_bh(&ip6_sk_fl_lock); 529 spin_unlock_bh(&ip6_sk_fl_lock);
503 fl_release(sfl->fl); 530 fl_release(sfl->fl);
504 kfree(sfl); 531 kfree_rcu(sfl, rcu);
505 return 0; 532 return 0;
506 } 533 }
507 } 534 }
508 write_unlock_bh(&ip6_sk_fl_lock); 535 spin_unlock_bh(&ip6_sk_fl_lock);
509 return -ESRCH; 536 return -ESRCH;
510 537
511 case IPV6_FL_A_RENEW: 538 case IPV6_FL_A_RENEW:
512 read_lock_bh(&ip6_sk_fl_lock); 539 rcu_read_lock_bh();
513 for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) { 540 for_each_sk_fl_rcu(np, sfl) {
514 if (sfl->fl->label == freq.flr_label) { 541 if (sfl->fl->label == freq.flr_label) {
515 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires); 542 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
516 read_unlock_bh(&ip6_sk_fl_lock); 543 rcu_read_unlock_bh();
517 return err; 544 return err;
518 } 545 }
519 } 546 }
520 read_unlock_bh(&ip6_sk_fl_lock); 547 rcu_read_unlock_bh();
521 548
522 if (freq.flr_share == IPV6_FL_S_NONE && 549 if (freq.flr_share == IPV6_FL_S_NONE &&
523 ns_capable(net->user_ns, CAP_NET_ADMIN)) { 550 ns_capable(net->user_ns, CAP_NET_ADMIN)) {
@@ -541,11 +568,11 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
541 568
542 if (freq.flr_label) { 569 if (freq.flr_label) {
543 err = -EEXIST; 570 err = -EEXIST;
544 read_lock_bh(&ip6_sk_fl_lock); 571 rcu_read_lock_bh();
545 for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) { 572 for_each_sk_fl_rcu(np, sfl) {
546 if (sfl->fl->label == freq.flr_label) { 573 if (sfl->fl->label == freq.flr_label) {
547 if (freq.flr_flags&IPV6_FL_F_EXCL) { 574 if (freq.flr_flags&IPV6_FL_F_EXCL) {
548 read_unlock_bh(&ip6_sk_fl_lock); 575 rcu_read_unlock_bh();
549 goto done; 576 goto done;
550 } 577 }
551 fl1 = sfl->fl; 578 fl1 = sfl->fl;
@@ -553,7 +580,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
553 break; 580 break;
554 } 581 }
555 } 582 }
556 read_unlock_bh(&ip6_sk_fl_lock); 583 rcu_read_unlock_bh();
557 584
558 if (fl1 == NULL) 585 if (fl1 == NULL)
559 fl1 = fl_lookup(net, freq.flr_label); 586 fl1 = fl_lookup(net, freq.flr_label);
@@ -641,13 +668,13 @@ static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
641 struct net *net = seq_file_net(seq); 668 struct net *net = seq_file_net(seq);
642 669
643 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { 670 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
644 fl = fl_ht[state->bucket]; 671 for_each_fl_rcu(state->bucket, fl) {
645 672 if (net_eq(fl->fl_net, net))
646 while (fl && !net_eq(fl->fl_net, net)) 673 goto out;
647 fl = fl->next; 674 }
648 if (fl)
649 break;
650 } 675 }
676 fl = NULL;
677out:
651 return fl; 678 return fl;
652} 679}
653 680
@@ -656,18 +683,22 @@ static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flo
656 struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 683 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
657 struct net *net = seq_file_net(seq); 684 struct net *net = seq_file_net(seq);
658 685
659 fl = fl->next; 686 for_each_fl_continue_rcu(fl) {
687 if (net_eq(fl->fl_net, net))
688 goto out;
689 }
690
660try_again: 691try_again:
661 while (fl && !net_eq(fl->fl_net, net)) 692 if (++state->bucket <= FL_HASH_MASK) {
662 fl = fl->next; 693 for_each_fl_rcu(state->bucket, fl) {
663 694 if (net_eq(fl->fl_net, net))
664 while (!fl) { 695 goto out;
665 if (++state->bucket <= FL_HASH_MASK) { 696 }
666 fl = fl_ht[state->bucket]; 697 goto try_again;
667 goto try_again;
668 } else
669 break;
670 } 698 }
699 fl = NULL;
700
701out:
671 return fl; 702 return fl;
672} 703}
673 704
@@ -681,9 +712,9 @@ static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
681} 712}
682 713
683static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos) 714static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
684 __acquires(ip6_fl_lock) 715 __acquires(RCU)
685{ 716{
686 read_lock_bh(&ip6_fl_lock); 717 rcu_read_lock_bh();
687 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 718 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
688} 719}
689 720
@@ -700,9 +731,9 @@ static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
700} 731}
701 732
702static void ip6fl_seq_stop(struct seq_file *seq, void *v) 733static void ip6fl_seq_stop(struct seq_file *seq, void *v)
703 __releases(ip6_fl_lock) 734 __releases(RCU)
704{ 735{
705 read_unlock_bh(&ip6_fl_lock); 736 rcu_read_unlock_bh();
706} 737}
707 738
708static int ip6fl_seq_show(struct seq_file *seq, void *v) 739static int ip6fl_seq_show(struct seq_file *seq, void *v)
@@ -775,15 +806,15 @@ static const struct file_operations ip6fl_seq_fops = {
775 806
776static int __net_init ip6_flowlabel_proc_init(struct net *net) 807static int __net_init ip6_flowlabel_proc_init(struct net *net)
777{ 808{
778 if (!proc_net_fops_create(net, "ip6_flowlabel", 809 if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
779 S_IRUGO, &ip6fl_seq_fops)) 810 &ip6fl_seq_fops))
780 return -ENOMEM; 811 return -ENOMEM;
781 return 0; 812 return 0;
782} 813}
783 814
784static void __net_exit ip6_flowlabel_proc_fini(struct net *net) 815static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
785{ 816{
786 proc_net_remove(net, "ip6_flowlabel"); 817 remove_proc_entry("ip6_flowlabel", net->proc_net);
787} 818}
788#else 819#else
789static inline int ip6_flowlabel_proc_init(struct net *net) 820static inline int ip6_flowlabel_proc_init(struct net *net)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 867466c96aac..e4efffe2522e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -758,8 +758,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
758 skb_dst_set_noref(skb, dst); 758 skb_dst_set_noref(skb, dst);
759 } 759 }
760 760
761 skb->transport_header = skb->network_header;
762
763 proto = NEXTHDR_GRE; 761 proto = NEXTHDR_GRE;
764 if (encap_limit >= 0) { 762 if (encap_limit >= 0) {
765 init_tel_txopt(&opt, encap_limit); 763 init_tel_txopt(&opt, encap_limit);
@@ -768,14 +766,13 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
768 766
769 skb_push(skb, gre_hlen); 767 skb_push(skb, gre_hlen);
770 skb_reset_network_header(skb); 768 skb_reset_network_header(skb);
769 skb_set_transport_header(skb, sizeof(*ipv6h));
771 770
772 /* 771 /*
773 * Push down and install the IP header. 772 * Push down and install the IP header.
774 */ 773 */
775 ipv6h = ipv6_hdr(skb); 774 ipv6h = ipv6_hdr(skb);
776 *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000); 775 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
777 dsfield = INET_ECN_encapsulate(0, dsfield);
778 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
779 ipv6h->hop_limit = tunnel->parms.hop_limit; 776 ipv6h->hop_limit = tunnel->parms.hop_limit;
780 ipv6h->nexthdr = proto; 777 ipv6h->nexthdr = proto;
781 ipv6h->saddr = fl6->saddr; 778 ipv6h->saddr = fl6->saddr;
@@ -961,7 +958,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
961 int ret; 958 int ret;
962 959
963 if (!ip6_tnl_xmit_ctl(t)) 960 if (!ip6_tnl_xmit_ctl(t))
964 return -1; 961 goto tx_err;
965 962
966 switch (skb->protocol) { 963 switch (skb->protocol) {
967 case htons(ETH_P_IP): 964 case htons(ETH_P_IP):
@@ -1241,7 +1238,7 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1241 struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen); 1238 struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
1242 __be16 *p = (__be16 *)(ipv6h+1); 1239 __be16 *p = (__be16 *)(ipv6h+1);
1243 1240
1244 *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000); 1241 ip6_flow_hdr(ipv6h, 0, t->fl.u.ip6.flowlabel);
1245 ipv6h->hop_limit = t->parms.hop_limit; 1242 ipv6h->hop_limit = t->parms.hop_limit;
1246 ipv6h->nexthdr = NEXTHDR_GRE; 1243 ipv6h->nexthdr = NEXTHDR_GRE;
1247 ipv6h->saddr = t->parms.laddr; 1244 ipv6h->saddr = t->parms.laddr;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a52d864d562b..5b10414e619e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -118,6 +118,15 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
118 ipv6_addr_loopback(&hdr->daddr)) 118 ipv6_addr_loopback(&hdr->daddr))
119 goto err; 119 goto err;
120 120
121 /* RFC4291 2.7
122 * Nodes must not originate a packet to a multicast address whose scope
123 * field contains the reserved value 0; if such a packet is received, it
124 * must be silently dropped.
125 */
126 if (ipv6_addr_is_multicast(&hdr->daddr) &&
127 IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
128 goto err;
129
121 /* 130 /*
122 * RFC4291 2.7 131 * RFC4291 2.7
123 * Multicast addresses must not be used as source addresses in IPv6 132 * Multicast addresses must not be used as source addresses in IPv6
@@ -212,7 +221,7 @@ resubmit:
212 if (ipv6_addr_is_multicast(&hdr->daddr) && 221 if (ipv6_addr_is_multicast(&hdr->daddr) &&
213 !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, 222 !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
214 &hdr->saddr) && 223 &hdr->saddr) &&
215 !ipv6_is_mld(skb, nexthdr)) 224 !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
216 goto discard; 225 goto discard;
217 } 226 }
218 if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && 227 if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
@@ -280,10 +289,8 @@ int ip6_mc_input(struct sk_buff *skb)
280 struct inet6_skb_parm *opt = IP6CB(skb); 289 struct inet6_skb_parm *opt = IP6CB(skb);
281 290
282 /* Check for MLD */ 291 /* Check for MLD */
283 if (unlikely(opt->ra)) { 292 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
284 /* Check if this is a mld message */ 293 /* Check if this is a mld message */
285 u8 *ptr = skb_network_header(skb) + opt->ra;
286 struct icmp6hdr *icmp6;
287 u8 nexthdr = hdr->nexthdr; 294 u8 nexthdr = hdr->nexthdr;
288 __be16 frag_off; 295 __be16 frag_off;
289 int offset; 296 int offset;
@@ -291,7 +298,7 @@ int ip6_mc_input(struct sk_buff *skb)
291 /* Check if the value of Router Alert 298 /* Check if the value of Router Alert
292 * is for MLD (0x0000). 299 * is for MLD (0x0000).
293 */ 300 */
294 if ((ptr[2] | ptr[3]) == 0) { 301 if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) {
295 deliver = false; 302 deliver = false;
296 303
297 if (!ipv6_ext_hdr(nexthdr)) { 304 if (!ipv6_ext_hdr(nexthdr)) {
@@ -303,24 +310,10 @@ int ip6_mc_input(struct sk_buff *skb)
303 if (offset < 0) 310 if (offset < 0)
304 goto out; 311 goto out;
305 312
306 if (nexthdr != IPPROTO_ICMPV6) 313 if (!ipv6_is_mld(skb, nexthdr, offset))
307 goto out; 314 goto out;
308 315
309 if (!pskb_may_pull(skb, (skb_network_header(skb) + 316 deliver = true;
310 offset + 1 - skb->data)))
311 goto out;
312
313 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
314
315 switch (icmp6->icmp6_type) {
316 case ICMPV6_MGM_QUERY:
317 case ICMPV6_MGM_REPORT:
318 case ICMPV6_MGM_REDUCTION:
319 case ICMPV6_MLD2_REPORT:
320 deliver = true;
321 break;
322 }
323 goto out;
324 } 317 }
325 /* unknown RA - process it normally */ 318 /* unknown RA - process it normally */
326 } 319 }
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index f26f0da7f095..8234c1dcdf72 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -99,6 +99,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
99 ~(SKB_GSO_UDP | 99 ~(SKB_GSO_UDP |
100 SKB_GSO_DODGY | 100 SKB_GSO_DODGY |
101 SKB_GSO_TCP_ECN | 101 SKB_GSO_TCP_ECN |
102 SKB_GSO_GRE |
102 SKB_GSO_TCPV6 | 103 SKB_GSO_TCPV6 |
103 0))) 104 0)))
104 goto out; 105 goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5552d13ae92f..155eccfa7760 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,8 +56,6 @@
56#include <net/checksum.h> 56#include <net/checksum.h>
57#include <linux/mroute6.h> 57#include <linux/mroute6.h>
58 58
59int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60
61int __ip6_local_out(struct sk_buff *skb) 59int __ip6_local_out(struct sk_buff *skb)
62{ 60{
63 int len; 61 int len;
@@ -88,7 +86,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
88 struct dst_entry *dst = skb_dst(skb); 86 struct dst_entry *dst = skb_dst(skb);
89 struct net_device *dev = dst->dev; 87 struct net_device *dev = dst->dev;
90 struct neighbour *neigh; 88 struct neighbour *neigh;
91 struct rt6_info *rt; 89 struct in6_addr *nexthop;
90 int ret;
92 91
93 skb->protocol = htons(ETH_P_IPV6); 92 skb->protocol = htons(ETH_P_IPV6);
94 skb->dev = dev; 93 skb->dev = dev;
@@ -121,12 +120,26 @@ static int ip6_finish_output2(struct sk_buff *skb)
121 120
122 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, 121 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
123 skb->len); 122 skb->len);
123
124 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
125 IPV6_ADDR_SCOPE_NODELOCAL &&
126 !(dev->flags & IFF_LOOPBACK)) {
127 kfree_skb(skb);
128 return 0;
129 }
124 } 130 }
125 131
126 rt = (struct rt6_info *) dst; 132 rcu_read_lock_bh();
127 neigh = rt->n; 133 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
128 if (neigh) 134 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
129 return dst_neigh_output(dst, neigh, skb); 135 if (unlikely(!neigh))
136 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
137 if (!IS_ERR(neigh)) {
138 ret = dst_neigh_output(dst, neigh, skb);
139 rcu_read_unlock_bh();
140 return ret;
141 }
142 rcu_read_unlock_bh();
130 143
131 IP6_INC_STATS_BH(dev_net(dst->dev), 144 IP6_INC_STATS_BH(dev_net(dst->dev),
132 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 145 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@@ -216,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
216 if (hlimit < 0) 229 if (hlimit < 0)
217 hlimit = ip6_dst_hoplimit(dst); 230 hlimit = ip6_dst_hoplimit(dst);
218 231
219 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel; 232 ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
220 233
221 hdr->payload_len = htons(seg_len); 234 hdr->payload_len = htons(seg_len);
222 hdr->nexthdr = proto; 235 hdr->nexthdr = proto;
@@ -236,9 +249,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
236 dst->dev, dst_output); 249 dst->dev, dst_output);
237 } 250 }
238 251
239 net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n");
240 skb->dev = dst->dev; 252 skb->dev = dst->dev;
241 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 253 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
242 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); 254 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
243 kfree_skb(skb); 255 kfree_skb(skb);
244 return -EMSGSIZE; 256 return -EMSGSIZE;
@@ -246,39 +258,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
246 258
247EXPORT_SYMBOL(ip6_xmit); 259EXPORT_SYMBOL(ip6_xmit);
248 260
249/*
250 * To avoid extra problems ND packets are send through this
251 * routine. It's code duplication but I really want to avoid
252 * extra checks since ipv6_build_header is used by TCP (which
253 * is for us performance critical)
254 */
255
256int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
257 const struct in6_addr *saddr, const struct in6_addr *daddr,
258 int proto, int len)
259{
260 struct ipv6_pinfo *np = inet6_sk(sk);
261 struct ipv6hdr *hdr;
262
263 skb->protocol = htons(ETH_P_IPV6);
264 skb->dev = dev;
265
266 skb_reset_network_header(skb);
267 skb_put(skb, sizeof(struct ipv6hdr));
268 hdr = ipv6_hdr(skb);
269
270 *(__be32*)hdr = htonl(0x60000000);
271
272 hdr->payload_len = htons(len);
273 hdr->nexthdr = proto;
274 hdr->hop_limit = np->hop_limit;
275
276 hdr->saddr = *saddr;
277 hdr->daddr = *daddr;
278
279 return 0;
280}
281
282static int ip6_call_ra_chain(struct sk_buff *skb, int sel) 261static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
283{ 262{
284 struct ip6_ra_chain *ra; 263 struct ip6_ra_chain *ra;
@@ -913,8 +892,12 @@ static int ip6_dst_lookup_tail(struct sock *sk,
913 * dst entry of the nexthop router 892 * dst entry of the nexthop router
914 */ 893 */
915 rt = (struct rt6_info *) *dst; 894 rt = (struct rt6_info *) *dst;
916 n = rt->n; 895 rcu_read_lock_bh();
917 if (n && !(n->nud_state & NUD_VALID)) { 896 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
897 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
898 rcu_read_unlock_bh();
899
900 if (err) {
918 struct inet6_ifaddr *ifp; 901 struct inet6_ifaddr *ifp;
919 struct flowi6 fl_gw6; 902 struct flowi6 fl_gw6;
920 int redirect; 903 int redirect;
@@ -1213,10 +1196,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1213 if (dst_allfrag(rt->dst.path)) 1196 if (dst_allfrag(rt->dst.path))
1214 cork->flags |= IPCORK_ALLFRAG; 1197 cork->flags |= IPCORK_ALLFRAG;
1215 cork->length = 0; 1198 cork->length = 0;
1216 exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; 1199 exthdrlen = (opt ? opt->opt_flen : 0);
1217 length += exthdrlen; 1200 length += exthdrlen;
1218 transhdrlen += exthdrlen; 1201 transhdrlen += exthdrlen;
1219 dst_exthdrlen = rt->dst.header_len; 1202 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1220 } else { 1203 } else {
1221 rt = (struct rt6_info *)cork->dst; 1204 rt = (struct rt6_info *)cork->dst;
1222 fl6 = &inet->cork.fl.u.ip6; 1205 fl6 = &inet->cork.fl.u.ip6;
@@ -1548,9 +1531,7 @@ int ip6_push_pending_frames(struct sock *sk)
1548 skb_reset_network_header(skb); 1531 skb_reset_network_header(skb);
1549 hdr = ipv6_hdr(skb); 1532 hdr = ipv6_hdr(skb);
1550 1533
1551 *(__be32*)hdr = fl6->flowlabel | 1534 ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1552 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1553
1554 hdr->hop_limit = np->cork.hop_limit; 1535 hdr->hop_limit = np->cork.hop_limit;
1555 hdr->nexthdr = proto; 1536 hdr->nexthdr = proto;
1556 hdr->saddr = fl6->saddr; 1537 hdr->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a14f28b280f5..fff83cbc197f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1030,9 +1030,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1030 skb_push(skb, sizeof(struct ipv6hdr)); 1030 skb_push(skb, sizeof(struct ipv6hdr));
1031 skb_reset_network_header(skb); 1031 skb_reset_network_header(skb);
1032 ipv6h = ipv6_hdr(skb); 1032 ipv6h = ipv6_hdr(skb);
1033 *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000); 1033 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
1034 dsfield = INET_ECN_encapsulate(0, dsfield);
1035 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
1036 ipv6h->hop_limit = t->parms.hop_limit; 1034 ipv6h->hop_limit = t->parms.hop_limit;
1037 ipv6h->nexthdr = proto; 1035 ipv6h->nexthdr = proto;
1038 ipv6h->saddr = fl6->saddr; 1036 ipv6h->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 26dcdec9e3a5..96bfb4e4b820 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1017,6 +1017,50 @@ static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1017 return NULL; 1017 return NULL;
1018} 1018}
1019 1019
1020/* Look for a (*,*,oif) entry */
1021static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1022 mifi_t mifi)
1023{
1024 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1025 struct mfc6_cache *c;
1026
1027 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1028 if (ipv6_addr_any(&c->mf6c_origin) &&
1029 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1030 (c->mfc_un.res.ttls[mifi] < 255))
1031 return c;
1032
1033 return NULL;
1034}
1035
1036/* Look for a (*,G) entry */
1037static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1038 struct in6_addr *mcastgrp,
1039 mifi_t mifi)
1040{
1041 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1042 struct mfc6_cache *c, *proxy;
1043
1044 if (ipv6_addr_any(mcastgrp))
1045 goto skip;
1046
1047 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1048 if (ipv6_addr_any(&c->mf6c_origin) &&
1049 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1050 if (c->mfc_un.res.ttls[mifi] < 255)
1051 return c;
1052
1053 /* It's ok if the mifi is part of the static tree */
1054 proxy = ip6mr_cache_find_any_parent(mrt,
1055 c->mf6c_parent);
1056 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1057 return c;
1058 }
1059
1060skip:
1061 return ip6mr_cache_find_any_parent(mrt, mifi);
1062}
1063
1020/* 1064/*
1021 * Allocate a multicast cache entry 1065 * Allocate a multicast cache entry
1022 */ 1066 */
@@ -1247,7 +1291,8 @@ ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1247 * MFC6 cache manipulation by user space 1291 * MFC6 cache manipulation by user space
1248 */ 1292 */
1249 1293
1250static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc) 1294static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1295 int parent)
1251{ 1296{
1252 int line; 1297 int line;
1253 struct mfc6_cache *c, *next; 1298 struct mfc6_cache *c, *next;
@@ -1256,7 +1301,9 @@ static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
1256 1301
1257 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) { 1302 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1258 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && 1303 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1259 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { 1304 ipv6_addr_equal(&c->mf6c_mcastgrp,
1305 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1306 (parent == -1 || parent == c->mf6c_parent)) {
1260 write_lock_bh(&mrt_lock); 1307 write_lock_bh(&mrt_lock);
1261 list_del(&c->list); 1308 list_del(&c->list);
1262 write_unlock_bh(&mrt_lock); 1309 write_unlock_bh(&mrt_lock);
@@ -1312,9 +1359,9 @@ static int __net_init ip6mr_net_init(struct net *net)
1312 1359
1313#ifdef CONFIG_PROC_FS 1360#ifdef CONFIG_PROC_FS
1314 err = -ENOMEM; 1361 err = -ENOMEM;
1315 if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops)) 1362 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1316 goto proc_vif_fail; 1363 goto proc_vif_fail;
1317 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) 1364 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1318 goto proc_cache_fail; 1365 goto proc_cache_fail;
1319#endif 1366#endif
1320 1367
@@ -1322,7 +1369,7 @@ static int __net_init ip6mr_net_init(struct net *net)
1322 1369
1323#ifdef CONFIG_PROC_FS 1370#ifdef CONFIG_PROC_FS
1324proc_cache_fail: 1371proc_cache_fail:
1325 proc_net_remove(net, "ip6_mr_vif"); 1372 remove_proc_entry("ip6_mr_vif", net->proc_net);
1326proc_vif_fail: 1373proc_vif_fail:
1327 ip6mr_rules_exit(net); 1374 ip6mr_rules_exit(net);
1328#endif 1375#endif
@@ -1333,8 +1380,8 @@ fail:
1333static void __net_exit ip6mr_net_exit(struct net *net) 1380static void __net_exit ip6mr_net_exit(struct net *net)
1334{ 1381{
1335#ifdef CONFIG_PROC_FS 1382#ifdef CONFIG_PROC_FS
1336 proc_net_remove(net, "ip6_mr_cache"); 1383 remove_proc_entry("ip6_mr_cache", net->proc_net);
1337 proc_net_remove(net, "ip6_mr_vif"); 1384 remove_proc_entry("ip6_mr_vif", net->proc_net);
1338#endif 1385#endif
1339 ip6mr_rules_exit(net); 1386 ip6mr_rules_exit(net);
1340} 1387}
@@ -1391,7 +1438,7 @@ void ip6_mr_cleanup(void)
1391} 1438}
1392 1439
1393static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, 1440static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1394 struct mf6cctl *mfc, int mrtsock) 1441 struct mf6cctl *mfc, int mrtsock, int parent)
1395{ 1442{
1396 bool found = false; 1443 bool found = false;
1397 int line; 1444 int line;
@@ -1413,7 +1460,9 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1413 1460
1414 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { 1461 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1415 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && 1462 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1416 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { 1463 ipv6_addr_equal(&c->mf6c_mcastgrp,
1464 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1465 (parent == -1 || parent == mfc->mf6cc_parent)) {
1417 found = true; 1466 found = true;
1418 break; 1467 break;
1419 } 1468 }
@@ -1430,7 +1479,8 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1430 return 0; 1479 return 0;
1431 } 1480 }
1432 1481
1433 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) 1482 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1483 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1434 return -EINVAL; 1484 return -EINVAL;
1435 1485
1436 c = ip6mr_cache_alloc(); 1486 c = ip6mr_cache_alloc();
@@ -1596,7 +1646,7 @@ struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1596 1646
1597int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) 1647int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1598{ 1648{
1599 int ret; 1649 int ret, parent = 0;
1600 struct mif6ctl vif; 1650 struct mif6ctl vif;
1601 struct mf6cctl mfc; 1651 struct mf6cctl mfc;
1602 mifi_t mifi; 1652 mifi_t mifi;
@@ -1653,15 +1703,21 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1653 */ 1703 */
1654 case MRT6_ADD_MFC: 1704 case MRT6_ADD_MFC:
1655 case MRT6_DEL_MFC: 1705 case MRT6_DEL_MFC:
1706 parent = -1;
1707 case MRT6_ADD_MFC_PROXY:
1708 case MRT6_DEL_MFC_PROXY:
1656 if (optlen < sizeof(mfc)) 1709 if (optlen < sizeof(mfc))
1657 return -EINVAL; 1710 return -EINVAL;
1658 if (copy_from_user(&mfc, optval, sizeof(mfc))) 1711 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1659 return -EFAULT; 1712 return -EFAULT;
1713 if (parent == 0)
1714 parent = mfc.mf6cc_parent;
1660 rtnl_lock(); 1715 rtnl_lock();
1661 if (optname == MRT6_DEL_MFC) 1716 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1662 ret = ip6mr_mfc_delete(mrt, &mfc); 1717 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1663 else 1718 else
1664 ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk); 1719 ret = ip6mr_mfc_add(net, mrt, &mfc,
1720 sk == mrt->mroute6_sk, parent);
1665 rtnl_unlock(); 1721 rtnl_unlock();
1666 return ret; 1722 return ret;
1667 1723
@@ -1710,6 +1766,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1710 return -EINVAL; 1766 return -EINVAL;
1711 if (get_user(v, (u32 __user *)optval)) 1767 if (get_user(v, (u32 __user *)optval))
1712 return -EFAULT; 1768 return -EFAULT;
1769 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1770 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1771 return -EINVAL;
1713 if (sk == mrt->mroute6_sk) 1772 if (sk == mrt->mroute6_sk)
1714 return -EBUSY; 1773 return -EBUSY;
1715 1774
@@ -2015,19 +2074,29 @@ static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2015{ 2074{
2016 int psend = -1; 2075 int psend = -1;
2017 int vif, ct; 2076 int vif, ct;
2077 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2018 2078
2019 vif = cache->mf6c_parent; 2079 vif = cache->mf6c_parent;
2020 cache->mfc_un.res.pkt++; 2080 cache->mfc_un.res.pkt++;
2021 cache->mfc_un.res.bytes += skb->len; 2081 cache->mfc_un.res.bytes += skb->len;
2022 2082
2083 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2084 struct mfc6_cache *cache_proxy;
2085
2086 /* For an (*,G) entry, we only check that the incomming
2087 * interface is part of the static tree.
2088 */
2089 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2090 if (cache_proxy &&
2091 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2092 goto forward;
2093 }
2094
2023 /* 2095 /*
2024 * Wrong interface: drop packet and (maybe) send PIM assert. 2096 * Wrong interface: drop packet and (maybe) send PIM assert.
2025 */ 2097 */
2026 if (mrt->vif6_table[vif].dev != skb->dev) { 2098 if (mrt->vif6_table[vif].dev != skb->dev) {
2027 int true_vifi;
2028
2029 cache->mfc_un.res.wrong_if++; 2099 cache->mfc_un.res.wrong_if++;
2030 true_vifi = ip6mr_find_vif(mrt, skb->dev);
2031 2100
2032 if (true_vifi >= 0 && mrt->mroute_do_assert && 2101 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2033 /* pimsm uses asserts, when switching from RPT to SPT, 2102 /* pimsm uses asserts, when switching from RPT to SPT,
@@ -2045,14 +2114,32 @@ static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2045 goto dont_forward; 2114 goto dont_forward;
2046 } 2115 }
2047 2116
2117forward:
2048 mrt->vif6_table[vif].pkt_in++; 2118 mrt->vif6_table[vif].pkt_in++;
2049 mrt->vif6_table[vif].bytes_in += skb->len; 2119 mrt->vif6_table[vif].bytes_in += skb->len;
2050 2120
2051 /* 2121 /*
2052 * Forward the frame 2122 * Forward the frame
2053 */ 2123 */
2124 if (ipv6_addr_any(&cache->mf6c_origin) &&
2125 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2126 if (true_vifi >= 0 &&
2127 true_vifi != cache->mf6c_parent &&
2128 ipv6_hdr(skb)->hop_limit >
2129 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2130 /* It's an (*,*) entry and the packet is not coming from
2131 * the upstream: forward the packet to the upstream
2132 * only.
2133 */
2134 psend = cache->mf6c_parent;
2135 goto last_forward;
2136 }
2137 goto dont_forward;
2138 }
2054 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) { 2139 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2055 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) { 2140 /* For (*,G) entry, don't forward to the incoming interface */
2141 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2142 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2056 if (psend != -1) { 2143 if (psend != -1) {
2057 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 2144 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2058 if (skb2) 2145 if (skb2)
@@ -2061,6 +2148,7 @@ static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2061 psend = ct; 2148 psend = ct;
2062 } 2149 }
2063 } 2150 }
2151last_forward:
2064 if (psend != -1) { 2152 if (psend != -1) {
2065 ip6mr_forward2(net, mrt, skb, cache, psend); 2153 ip6mr_forward2(net, mrt, skb, cache, psend);
2066 return 0; 2154 return 0;
@@ -2096,6 +2184,14 @@ int ip6_mr_input(struct sk_buff *skb)
2096 read_lock(&mrt_lock); 2184 read_lock(&mrt_lock);
2097 cache = ip6mr_cache_find(mrt, 2185 cache = ip6mr_cache_find(mrt,
2098 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); 2186 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2187 if (cache == NULL) {
2188 int vif = ip6mr_find_vif(mrt, skb->dev);
2189
2190 if (vif >= 0)
2191 cache = ip6mr_cache_find_any(mrt,
2192 &ipv6_hdr(skb)->daddr,
2193 vif);
2194 }
2099 2195
2100 /* 2196 /*
2101 * No usable cache entry 2197 * No usable cache entry
@@ -2183,6 +2279,13 @@ int ip6mr_get_route(struct net *net,
2183 2279
2184 read_lock(&mrt_lock); 2280 read_lock(&mrt_lock);
2185 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); 2281 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2282 if (!cache && skb->dev) {
2283 int vif = ip6mr_find_vif(mrt, skb->dev);
2284
2285 if (vif >= 0)
2286 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2287 vif);
2288 }
2186 2289
2187 if (!cache) { 2290 if (!cache) {
2188 struct sk_buff *skb2; 2291 struct sk_buff *skb2;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ee94d31c9d4d..d1e2e8ef29c5 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -476,8 +476,8 @@ sticky_done:
476 msg.msg_controllen = optlen; 476 msg.msg_controllen = optlen;
477 msg.msg_control = (void*)(opt+1); 477 msg.msg_control = (void*)(opt+1);
478 478
479 retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 479 retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
480 &junk); 480 &junk, &junk);
481 if (retv) 481 if (retv)
482 goto done; 482 goto done;
483update: 483update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1002 release_sock(sk); 1002 release_sock(sk);
1003 1003
1004 if (skb) { 1004 if (skb) {
1005 int err = datagram_recv_ctl(sk, &msg, skb); 1005 int err = ip6_datagram_recv_ctl(sk, &msg, skb);
1006 kfree_skb(skb); 1006 kfree_skb(skb);
1007 if (err) 1007 if (err)
1008 return err; 1008 return err;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 28dfa5f3801f..bfa6cc36ef2a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -376,8 +376,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
376 goto done; /* err = -EADDRNOTAVAIL */ 376 goto done; /* err = -EADDRNOTAVAIL */
377 rv = !0; 377 rv = !0;
378 for (i=0; i<psl->sl_count; i++) { 378 for (i=0; i<psl->sl_count; i++) {
379 rv = memcmp(&psl->sl_addr[i], source, 379 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
380 sizeof(struct in6_addr));
381 if (rv == 0) 380 if (rv == 0)
382 break; 381 break;
383 } 382 }
@@ -427,12 +426,10 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
427 } 426 }
428 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 427 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
429 for (i=0; i<psl->sl_count; i++) { 428 for (i=0; i<psl->sl_count; i++) {
430 rv = memcmp(&psl->sl_addr[i], source, sizeof(struct in6_addr)); 429 rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
431 if (rv == 0) 430 if (rv == 0) /* There is an error in the address. */
432 break; 431 goto done;
433 } 432 }
434 if (rv == 0) /* address already there is an error */
435 goto done;
436 for (j=psl->sl_count-1; j>=i; j--) 433 for (j=psl->sl_count-1; j>=i; j--)
437 psl->sl_addr[j+1] = psl->sl_addr[j]; 434 psl->sl_addr[j+1] = psl->sl_addr[j];
438 psl->sl_addr[i] = *source; 435 psl->sl_addr[i] = *source;
@@ -664,6 +661,10 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
664 struct net_device *dev = mc->idev->dev; 661 struct net_device *dev = mc->idev->dev;
665 char buf[MAX_ADDR_LEN]; 662 char buf[MAX_ADDR_LEN];
666 663
664 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
665 IPV6_ADDR_SCOPE_LINKLOCAL)
666 return;
667
667 spin_lock_bh(&mc->mca_lock); 668 spin_lock_bh(&mc->mca_lock);
668 if (!(mc->mca_flags&MAF_LOADED)) { 669 if (!(mc->mca_flags&MAF_LOADED)) {
669 mc->mca_flags |= MAF_LOADED; 670 mc->mca_flags |= MAF_LOADED;
@@ -690,6 +691,10 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
690 struct net_device *dev = mc->idev->dev; 691 struct net_device *dev = mc->idev->dev;
691 char buf[MAX_ADDR_LEN]; 692 char buf[MAX_ADDR_LEN];
692 693
694 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
695 IPV6_ADDR_SCOPE_LINKLOCAL)
696 return;
697
693 spin_lock_bh(&mc->mca_lock); 698 spin_lock_bh(&mc->mca_lock);
694 if (mc->mca_flags&MAF_LOADED) { 699 if (mc->mca_flags&MAF_LOADED) {
695 mc->mca_flags &= ~MAF_LOADED; 700 mc->mca_flags &= ~MAF_LOADED;
@@ -935,33 +940,6 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
935} 940}
936 941
937/* 942/*
938 * identify MLD packets for MLD filter exceptions
939 */
940bool ipv6_is_mld(struct sk_buff *skb, int nexthdr)
941{
942 struct icmp6hdr *pic;
943
944 if (nexthdr != IPPROTO_ICMPV6)
945 return false;
946
947 if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
948 return false;
949
950 pic = icmp6_hdr(skb);
951
952 switch (pic->icmp6_type) {
953 case ICMPV6_MGM_QUERY:
954 case ICMPV6_MGM_REPORT:
955 case ICMPV6_MGM_REDUCTION:
956 case ICMPV6_MLD2_REPORT:
957 return true;
958 default:
959 break;
960 }
961 return false;
962}
963
964/*
965 * check if the interface/address pair is valid 943 * check if the interface/address pair is valid
966 */ 944 */
967bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, 945bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
@@ -1340,6 +1318,31 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1340 return scount; 1318 return scount;
1341} 1319}
1342 1320
1321static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1322 struct net_device *dev,
1323 const struct in6_addr *saddr,
1324 const struct in6_addr *daddr,
1325 int proto, int len)
1326{
1327 struct ipv6hdr *hdr;
1328
1329 skb->protocol = htons(ETH_P_IPV6);
1330 skb->dev = dev;
1331
1332 skb_reset_network_header(skb);
1333 skb_put(skb, sizeof(struct ipv6hdr));
1334 hdr = ipv6_hdr(skb);
1335
1336 ip6_flow_hdr(hdr, 0, 0);
1337
1338 hdr->payload_len = htons(len);
1339 hdr->nexthdr = proto;
1340 hdr->hop_limit = inet6_sk(sk)->hop_limit;
1341
1342 hdr->saddr = *saddr;
1343 hdr->daddr = *daddr;
1344}
1345
1343static struct sk_buff *mld_newpack(struct net_device *dev, int size) 1346static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1344{ 1347{
1345 struct net *net = dev_net(dev); 1348 struct net *net = dev_net(dev);
@@ -1375,7 +1378,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1375 } else 1378 } else
1376 saddr = &addr_buf; 1379 saddr = &addr_buf;
1377 1380
1378 ip6_nd_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0); 1381 ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1379 1382
1380 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); 1383 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1381 1384
@@ -1418,7 +1421,7 @@ static void mld_sendpack(struct sk_buff *skb)
1418 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT, 1421 icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1419 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1422 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1420 skb->dev->ifindex); 1423 skb->dev->ifindex);
1421 dst = icmp6_dst_alloc(skb->dev, NULL, &fl6); 1424 dst = icmp6_dst_alloc(skb->dev, &fl6);
1422 1425
1423 err = 0; 1426 err = 0;
1424 if (IS_ERR(dst)) { 1427 if (IS_ERR(dst)) {
@@ -1767,7 +1770,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1767 } else 1770 } else
1768 saddr = &addr_buf; 1771 saddr = &addr_buf;
1769 1772
1770 ip6_nd_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len); 1773 ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
1771 1774
1772 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); 1775 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1773 1776
@@ -1786,7 +1789,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1786 icmpv6_flow_init(sk, &fl6, type, 1789 icmpv6_flow_init(sk, &fl6, type,
1787 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 1790 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1788 skb->dev->ifindex); 1791 skb->dev->ifindex);
1789 dst = icmp6_dst_alloc(skb->dev, NULL, &fl6); 1792 dst = icmp6_dst_alloc(skb->dev, &fl6);
1790 if (IS_ERR(dst)) { 1793 if (IS_ERR(dst)) {
1791 err = PTR_ERR(dst); 1794 err = PTR_ERR(dst);
1792 goto err_out; 1795 goto err_out;
@@ -2596,10 +2599,10 @@ static int __net_init igmp6_proc_init(struct net *net)
2596 int err; 2599 int err;
2597 2600
2598 err = -ENOMEM; 2601 err = -ENOMEM;
2599 if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops)) 2602 if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
2600 goto out; 2603 goto out;
2601 if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO, 2604 if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
2602 &igmp6_mcf_seq_fops)) 2605 &igmp6_mcf_seq_fops))
2603 goto out_proc_net_igmp6; 2606 goto out_proc_net_igmp6;
2604 2607
2605 err = 0; 2608 err = 0;
@@ -2607,14 +2610,14 @@ out:
2607 return err; 2610 return err;
2608 2611
2609out_proc_net_igmp6: 2612out_proc_net_igmp6:
2610 proc_net_remove(net, "igmp6"); 2613 remove_proc_entry("igmp6", net->proc_net);
2611 goto out; 2614 goto out;
2612} 2615}
2613 2616
2614static void __net_exit igmp6_proc_exit(struct net *net) 2617static void __net_exit igmp6_proc_exit(struct net *net)
2615{ 2618{
2616 proc_net_remove(net, "mcfilter6"); 2619 remove_proc_entry("mcfilter6", net->proc_net);
2617 proc_net_remove(net, "igmp6"); 2620 remove_proc_entry("igmp6", net->proc_net);
2618} 2621}
2619#else 2622#else
2620static inline int igmp6_proc_init(struct net *net) 2623static inline int igmp6_proc_init(struct net *net)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index f2a007b7bde3..76ef4353d518 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -143,16 +143,12 @@ struct neigh_table nd_tbl = {
143 .gc_thresh3 = 1024, 143 .gc_thresh3 = 1024,
144}; 144};
145 145
146static inline int ndisc_opt_addr_space(struct net_device *dev) 146static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data)
147{ 147{
148 return NDISC_OPT_SPACE(dev->addr_len + ndisc_addr_option_pad(dev->type)); 148 int pad = ndisc_addr_option_pad(skb->dev->type);
149} 149 int data_len = skb->dev->addr_len;
150 150 int space = ndisc_opt_addr_space(skb->dev);
151static u8 *ndisc_fill_addr_option(u8 *opt, int type, void *data, int data_len, 151 u8 *opt = skb_put(skb, space);
152 unsigned short addr_type)
153{
154 int pad = ndisc_addr_option_pad(addr_type);
155 int space = NDISC_OPT_SPACE(data_len + pad);
156 152
157 opt[0] = type; 153 opt[0] = type;
158 opt[1] = space>>3; 154 opt[1] = space>>3;
@@ -166,7 +162,6 @@ static u8 *ndisc_fill_addr_option(u8 *opt, int type, void *data, int data_len,
166 opt += data_len; 162 opt += data_len;
167 if ((space -= data_len) > 0) 163 if ((space -= data_len) > 0)
168 memset(opt, 0, space); 164 memset(opt, 0, space);
169 return opt + space;
170} 165}
171 166
172static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, 167static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
@@ -370,91 +365,88 @@ static void pndisc_destructor(struct pneigh_entry *n)
370 ipv6_dev_mc_dec(dev, &maddr); 365 ipv6_dev_mc_dec(dev, &maddr);
371} 366}
372 367
373static struct sk_buff *ndisc_build_skb(struct net_device *dev, 368static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
374 const struct in6_addr *daddr, 369 int len)
375 const struct in6_addr *saddr,
376 struct icmp6hdr *icmp6h,
377 const struct in6_addr *target,
378 int llinfo)
379{ 370{
380 struct net *net = dev_net(dev);
381 struct sock *sk = net->ipv6.ndisc_sk;
382 struct sk_buff *skb;
383 struct icmp6hdr *hdr;
384 int hlen = LL_RESERVED_SPACE(dev); 371 int hlen = LL_RESERVED_SPACE(dev);
385 int tlen = dev->needed_tailroom; 372 int tlen = dev->needed_tailroom;
386 int len; 373 struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
374 struct sk_buff *skb;
387 int err; 375 int err;
388 u8 *opt;
389
390 if (!dev->addr_len)
391 llinfo = 0;
392
393 len = sizeof(struct icmp6hdr) + (target ? sizeof(*target) : 0);
394 if (llinfo)
395 len += ndisc_opt_addr_space(dev);
396 376
397 skb = sock_alloc_send_skb(sk, 377 skb = sock_alloc_send_skb(sk,
398 (MAX_HEADER + sizeof(struct ipv6hdr) + 378 hlen + sizeof(struct ipv6hdr) + len + tlen,
399 len + hlen + tlen),
400 1, &err); 379 1, &err);
401 if (!skb) { 380 if (!skb) {
402 ND_PRINTK(0, err, "ND: %s failed to allocate an skb, err=%d\n", 381 ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb, err=%d\n",
403 __func__, err); 382 __func__, err);
404 return NULL; 383 return NULL;
405 } 384 }
406 385
407 skb_reserve(skb, hlen); 386 skb->protocol = htons(ETH_P_IPV6);
408 ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); 387 skb->dev = dev;
409 388
410 skb->transport_header = skb->tail; 389 skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
411 skb_put(skb, len); 390 skb_reset_transport_header(skb);
412 391
413 hdr = (struct icmp6hdr *)skb_transport_header(skb); 392 return skb;
414 memcpy(hdr, icmp6h, sizeof(*hdr)); 393}
415 394
416 opt = skb_transport_header(skb) + sizeof(struct icmp6hdr); 395static void ip6_nd_hdr(struct sk_buff *skb,
417 if (target) { 396 const struct in6_addr *saddr,
418 *(struct in6_addr *)opt = *target; 397 const struct in6_addr *daddr,
419 opt += sizeof(*target); 398 int hop_limit, int len)
420 } 399{
400 struct ipv6hdr *hdr;
421 401
422 if (llinfo) 402 skb_push(skb, sizeof(*hdr));
423 ndisc_fill_addr_option(opt, llinfo, dev->dev_addr, 403 skb_reset_network_header(skb);
424 dev->addr_len, dev->type); 404 hdr = ipv6_hdr(skb);
425 405
426 hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len, 406 ip6_flow_hdr(hdr, 0, 0);
427 IPPROTO_ICMPV6,
428 csum_partial(hdr,
429 len, 0));
430 407
431 return skb; 408 hdr->payload_len = htons(len);
409 hdr->nexthdr = IPPROTO_ICMPV6;
410 hdr->hop_limit = hop_limit;
411
412 hdr->saddr = *saddr;
413 hdr->daddr = *daddr;
432} 414}
433 415
434static void ndisc_send_skb(struct sk_buff *skb, struct net_device *dev, 416static void ndisc_send_skb(struct sk_buff *skb,
435 struct neighbour *neigh,
436 const struct in6_addr *daddr, 417 const struct in6_addr *daddr,
437 const struct in6_addr *saddr, 418 const struct in6_addr *saddr)
438 struct icmp6hdr *icmp6h)
439{ 419{
440 struct flowi6 fl6; 420 struct dst_entry *dst = skb_dst(skb);
441 struct dst_entry *dst; 421 struct net *net = dev_net(skb->dev);
442 struct net *net = dev_net(dev);
443 struct sock *sk = net->ipv6.ndisc_sk; 422 struct sock *sk = net->ipv6.ndisc_sk;
444 struct inet6_dev *idev; 423 struct inet6_dev *idev;
445 int err; 424 int err;
425 struct icmp6hdr *icmp6h = icmp6_hdr(skb);
446 u8 type; 426 u8 type;
447 427
448 type = icmp6h->icmp6_type; 428 type = icmp6h->icmp6_type;
449 429
450 icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex); 430 if (!dst) {
451 dst = icmp6_dst_alloc(dev, neigh, &fl6); 431 struct sock *sk = net->ipv6.ndisc_sk;
452 if (IS_ERR(dst)) { 432 struct flowi6 fl6;
453 kfree_skb(skb); 433
454 return; 434 icmpv6_flow_init(sk, &fl6, type, saddr, daddr, skb->dev->ifindex);
435 dst = icmp6_dst_alloc(skb->dev, &fl6);
436 if (IS_ERR(dst)) {
437 kfree_skb(skb);
438 return;
439 }
440
441 skb_dst_set(skb, dst);
455 } 442 }
456 443
457 skb_dst_set(skb, dst); 444 icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, skb->len,
445 IPPROTO_ICMPV6,
446 csum_partial(icmp6h,
447 skb->len, 0));
448
449 ip6_nd_hdr(skb, saddr, daddr, inet6_sk(sk)->hop_limit, skb->len);
458 450
459 rcu_read_lock(); 451 rcu_read_lock();
460 idev = __in6_dev_get(dst->dev); 452 idev = __in6_dev_get(dst->dev);
@@ -470,36 +462,17 @@ static void ndisc_send_skb(struct sk_buff *skb, struct net_device *dev,
470 rcu_read_unlock(); 462 rcu_read_unlock();
471} 463}
472 464
473/*
474 * Send a Neighbour Discover packet
475 */
476static void __ndisc_send(struct net_device *dev,
477 struct neighbour *neigh,
478 const struct in6_addr *daddr,
479 const struct in6_addr *saddr,
480 struct icmp6hdr *icmp6h, const struct in6_addr *target,
481 int llinfo)
482{
483 struct sk_buff *skb;
484
485 skb = ndisc_build_skb(dev, daddr, saddr, icmp6h, target, llinfo);
486 if (!skb)
487 return;
488
489 ndisc_send_skb(skb, dev, neigh, daddr, saddr, icmp6h);
490}
491
492static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, 465static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
493 const struct in6_addr *daddr, 466 const struct in6_addr *daddr,
494 const struct in6_addr *solicited_addr, 467 const struct in6_addr *solicited_addr,
495 int router, int solicited, int override, int inc_opt) 468 bool router, bool solicited, bool override, bool inc_opt)
496{ 469{
470 struct sk_buff *skb;
497 struct in6_addr tmpaddr; 471 struct in6_addr tmpaddr;
498 struct inet6_ifaddr *ifp; 472 struct inet6_ifaddr *ifp;
499 const struct in6_addr *src_addr; 473 const struct in6_addr *src_addr;
500 struct icmp6hdr icmp6h = { 474 struct nd_msg *msg;
501 .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT, 475 int optlen = 0;
502 };
503 476
504 /* for anycast or proxy, solicited_addr != src_addr */ 477 /* for anycast or proxy, solicited_addr != src_addr */
505 ifp = ipv6_get_ifaddr(dev_net(dev), solicited_addr, dev, 1); 478 ifp = ipv6_get_ifaddr(dev_net(dev), solicited_addr, dev, 1);
@@ -517,13 +490,32 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
517 src_addr = &tmpaddr; 490 src_addr = &tmpaddr;
518 } 491 }
519 492
520 icmp6h.icmp6_router = router; 493 if (!dev->addr_len)
521 icmp6h.icmp6_solicited = solicited; 494 inc_opt = 0;
522 icmp6h.icmp6_override = override; 495 if (inc_opt)
496 optlen += ndisc_opt_addr_space(dev);
497
498 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
499 if (!skb)
500 return;
501
502 msg = (struct nd_msg *)skb_put(skb, sizeof(*msg));
503 *msg = (struct nd_msg) {
504 .icmph = {
505 .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
506 .icmp6_router = router,
507 .icmp6_solicited = solicited,
508 .icmp6_override = override,
509 },
510 .target = *solicited_addr,
511 };
512
513 if (inc_opt)
514 ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR,
515 dev->dev_addr);
523 516
524 __ndisc_send(dev, neigh, daddr, src_addr, 517
525 &icmp6h, solicited_addr, 518 ndisc_send_skb(skb, daddr, src_addr);
526 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
527} 519}
528 520
529static void ndisc_send_unsol_na(struct net_device *dev) 521static void ndisc_send_unsol_na(struct net_device *dev)
@@ -551,10 +543,11 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
551 const struct in6_addr *solicit, 543 const struct in6_addr *solicit,
552 const struct in6_addr *daddr, const struct in6_addr *saddr) 544 const struct in6_addr *daddr, const struct in6_addr *saddr)
553{ 545{
546 struct sk_buff *skb;
554 struct in6_addr addr_buf; 547 struct in6_addr addr_buf;
555 struct icmp6hdr icmp6h = { 548 int inc_opt = dev->addr_len;
556 .icmp6_type = NDISC_NEIGHBOUR_SOLICITATION, 549 int optlen = 0;
557 }; 550 struct nd_msg *msg;
558 551
559 if (saddr == NULL) { 552 if (saddr == NULL) {
560 if (ipv6_get_lladdr(dev, &addr_buf, 553 if (ipv6_get_lladdr(dev, &addr_buf,
@@ -563,18 +556,37 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
563 saddr = &addr_buf; 556 saddr = &addr_buf;
564 } 557 }
565 558
566 __ndisc_send(dev, neigh, daddr, saddr, 559 if (ipv6_addr_any(saddr))
567 &icmp6h, solicit, 560 inc_opt = 0;
568 !ipv6_addr_any(saddr) ? ND_OPT_SOURCE_LL_ADDR : 0); 561 if (inc_opt)
562 optlen += ndisc_opt_addr_space(dev);
563
564 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
565 if (!skb)
566 return;
567
568 msg = (struct nd_msg *)skb_put(skb, sizeof(*msg));
569 *msg = (struct nd_msg) {
570 .icmph = {
571 .icmp6_type = NDISC_NEIGHBOUR_SOLICITATION,
572 },
573 .target = *solicit,
574 };
575
576 if (inc_opt)
577 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
578 dev->dev_addr);
579
580 ndisc_send_skb(skb, daddr, saddr);
569} 581}
570 582
571void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, 583void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
572 const struct in6_addr *daddr) 584 const struct in6_addr *daddr)
573{ 585{
574 struct icmp6hdr icmp6h = { 586 struct sk_buff *skb;
575 .icmp6_type = NDISC_ROUTER_SOLICITATION, 587 struct rs_msg *msg;
576 };
577 int send_sllao = dev->addr_len; 588 int send_sllao = dev->addr_len;
589 int optlen = 0;
578 590
579#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 591#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
580 /* 592 /*
@@ -598,9 +610,27 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
598 } 610 }
599 } 611 }
600#endif 612#endif
601 __ndisc_send(dev, NULL, daddr, saddr, 613 if (!dev->addr_len)
602 &icmp6h, NULL, 614 send_sllao = 0;
603 send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0); 615 if (send_sllao)
616 optlen += ndisc_opt_addr_space(dev);
617
618 skb = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
619 if (!skb)
620 return;
621
622 msg = (struct rs_msg *)skb_put(skb, sizeof(*msg));
623 *msg = (struct rs_msg) {
624 .icmph = {
625 .icmp6_type = NDISC_ROUTER_SOLICITATION,
626 },
627 };
628
629 if (send_sllao)
630 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
631 dev->dev_addr);
632
633 ndisc_send_skb(skb, daddr, saddr);
604} 634}
605 635
606 636
@@ -676,6 +706,11 @@ static void ndisc_recv_ns(struct sk_buff *skb)
676 bool inc; 706 bool inc;
677 int is_router = -1; 707 int is_router = -1;
678 708
709 if (skb->len < sizeof(struct nd_msg)) {
710 ND_PRINTK(2, warn, "NS: packet too short\n");
711 return;
712 }
713
679 if (ipv6_addr_is_multicast(&msg->target)) { 714 if (ipv6_addr_is_multicast(&msg->target)) {
680 ND_PRINTK(2, warn, "NS: multicast target address\n"); 715 ND_PRINTK(2, warn, "NS: multicast target address\n");
681 return; 716 return;
@@ -685,11 +720,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
685 * RFC2461 7.1.1: 720 * RFC2461 7.1.1:
686 * DAD has to be destined for solicited node multicast address. 721 * DAD has to be destined for solicited node multicast address.
687 */ 722 */
688 if (dad && 723 if (dad && !ipv6_addr_is_solict_mult(daddr)) {
689 !(daddr->s6_addr32[0] == htonl(0xff020000) &&
690 daddr->s6_addr32[1] == htonl(0x00000000) &&
691 daddr->s6_addr32[2] == htonl(0x00000001) &&
692 daddr->s6_addr [12] == 0xff )) {
693 ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n"); 724 ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n");
694 return; 725 return;
695 } 726 }
@@ -780,11 +811,11 @@ static void ndisc_recv_ns(struct sk_buff *skb)
780 } 811 }
781 812
782 if (is_router < 0) 813 if (is_router < 0)
783 is_router = !!idev->cnf.forwarding; 814 is_router = idev->cnf.forwarding;
784 815
785 if (dad) { 816 if (dad) {
786 ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &msg->target, 817 ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &msg->target,
787 is_router, 0, (ifp != NULL), 1); 818 !!is_router, false, (ifp != NULL), true);
788 goto out; 819 goto out;
789 } 820 }
790 821
@@ -805,8 +836,8 @@ static void ndisc_recv_ns(struct sk_buff *skb)
805 NEIGH_UPDATE_F_OVERRIDE); 836 NEIGH_UPDATE_F_OVERRIDE);
806 if (neigh || !dev->header_ops) { 837 if (neigh || !dev->header_ops) {
807 ndisc_send_na(dev, neigh, saddr, &msg->target, 838 ndisc_send_na(dev, neigh, saddr, &msg->target,
808 is_router, 839 !!is_router,
809 1, (ifp != NULL && inc), inc); 840 true, (ifp != NULL && inc), inc);
810 if (neigh) 841 if (neigh)
811 neigh_release(neigh); 842 neigh_release(neigh);
812 } 843 }
@@ -1314,6 +1345,12 @@ out:
1314 1345
1315static void ndisc_redirect_rcv(struct sk_buff *skb) 1346static void ndisc_redirect_rcv(struct sk_buff *skb)
1316{ 1347{
1348 u8 *hdr;
1349 struct ndisc_options ndopts;
1350 struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb);
1351 u32 ndoptlen = skb->tail - (skb->transport_header +
1352 offsetof(struct rd_msg, opt));
1353
1317#ifdef CONFIG_IPV6_NDISC_NODETYPE 1354#ifdef CONFIG_IPV6_NDISC_NODETYPE
1318 switch (skb->ndisc_nodetype) { 1355 switch (skb->ndisc_nodetype) {
1319 case NDISC_NODETYPE_HOST: 1356 case NDISC_NODETYPE_HOST:
@@ -1330,28 +1367,48 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1330 return; 1367 return;
1331 } 1368 }
1332 1369
1370 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
1371 return;
1372
1373 if (!ndopts.nd_opts_rh)
1374 return;
1375
1376 hdr = (u8 *)ndopts.nd_opts_rh;
1377 hdr += 8;
1378 if (!pskb_pull(skb, hdr - skb_transport_header(skb)))
1379 return;
1380
1333 icmpv6_notify(skb, NDISC_REDIRECT, 0, 0); 1381 icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
1334} 1382}
1335 1383
1384static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
1385 struct sk_buff *orig_skb,
1386 int rd_len)
1387{
1388 u8 *opt = skb_put(skb, rd_len);
1389
1390 memset(opt, 0, 8);
1391 *(opt++) = ND_OPT_REDIRECT_HDR;
1392 *(opt++) = (rd_len >> 3);
1393 opt += 6;
1394
1395 memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
1396}
1397
1336void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) 1398void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1337{ 1399{
1338 struct net_device *dev = skb->dev; 1400 struct net_device *dev = skb->dev;
1339 struct net *net = dev_net(dev); 1401 struct net *net = dev_net(dev);
1340 struct sock *sk = net->ipv6.ndisc_sk; 1402 struct sock *sk = net->ipv6.ndisc_sk;
1341 int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); 1403 int optlen = 0;
1342 struct inet_peer *peer; 1404 struct inet_peer *peer;
1343 struct sk_buff *buff; 1405 struct sk_buff *buff;
1344 struct icmp6hdr *icmph; 1406 struct rd_msg *msg;
1345 struct in6_addr saddr_buf; 1407 struct in6_addr saddr_buf;
1346 struct in6_addr *addrp;
1347 struct rt6_info *rt; 1408 struct rt6_info *rt;
1348 struct dst_entry *dst; 1409 struct dst_entry *dst;
1349 struct inet6_dev *idev;
1350 struct flowi6 fl6; 1410 struct flowi6 fl6;
1351 u8 *opt;
1352 int hlen, tlen;
1353 int rd_len; 1411 int rd_len;
1354 int err;
1355 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; 1412 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
1356 bool ret; 1413 bool ret;
1357 1414
@@ -1407,7 +1464,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1407 memcpy(ha_buf, neigh->ha, dev->addr_len); 1464 memcpy(ha_buf, neigh->ha, dev->addr_len);
1408 read_unlock_bh(&neigh->lock); 1465 read_unlock_bh(&neigh->lock);
1409 ha = ha_buf; 1466 ha = ha_buf;
1410 len += ndisc_opt_addr_space(dev); 1467 optlen += ndisc_opt_addr_space(dev);
1411 } else 1468 } else
1412 read_unlock_bh(&neigh->lock); 1469 read_unlock_bh(&neigh->lock);
1413 1470
@@ -1415,80 +1472,40 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1415 } 1472 }
1416 1473
1417 rd_len = min_t(unsigned int, 1474 rd_len = min_t(unsigned int,
1418 IPV6_MIN_MTU-sizeof(struct ipv6hdr)-len, skb->len + 8); 1475 IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(*msg) - optlen,
1476 skb->len + 8);
1419 rd_len &= ~0x7; 1477 rd_len &= ~0x7;
1420 len += rd_len; 1478 optlen += rd_len;
1421
1422 hlen = LL_RESERVED_SPACE(dev);
1423 tlen = dev->needed_tailroom;
1424 buff = sock_alloc_send_skb(sk,
1425 (MAX_HEADER + sizeof(struct ipv6hdr) +
1426 len + hlen + tlen),
1427 1, &err);
1428 if (buff == NULL) {
1429 ND_PRINTK(0, err,
1430 "Redirect: %s failed to allocate an skb, err=%d\n",
1431 __func__, err);
1432 goto release;
1433 }
1434
1435 skb_reserve(buff, hlen);
1436 ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
1437 IPPROTO_ICMPV6, len);
1438
1439 skb_set_transport_header(buff, skb_tail_pointer(buff) - buff->data);
1440 skb_put(buff, len);
1441 icmph = icmp6_hdr(buff);
1442 1479
1443 memset(icmph, 0, sizeof(struct icmp6hdr)); 1480 buff = ndisc_alloc_skb(dev, sizeof(*msg) + optlen);
1444 icmph->icmp6_type = NDISC_REDIRECT; 1481 if (!buff)
1445 1482 goto release;
1446 /*
1447 * copy target and destination addresses
1448 */
1449
1450 addrp = (struct in6_addr *)(icmph + 1);
1451 *addrp = *target;
1452 addrp++;
1453 *addrp = ipv6_hdr(skb)->daddr;
1454 1483
1455 opt = (u8*) (addrp + 1); 1484 msg = (struct rd_msg *)skb_put(buff, sizeof(*msg));
1485 *msg = (struct rd_msg) {
1486 .icmph = {
1487 .icmp6_type = NDISC_REDIRECT,
1488 },
1489 .target = *target,
1490 .dest = ipv6_hdr(skb)->daddr,
1491 };
1456 1492
1457 /* 1493 /*
1458 * include target_address option 1494 * include target_address option
1459 */ 1495 */
1460 1496
1461 if (ha) 1497 if (ha)
1462 opt = ndisc_fill_addr_option(opt, ND_OPT_TARGET_LL_ADDR, ha, 1498 ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha);
1463 dev->addr_len, dev->type);
1464 1499
1465 /* 1500 /*
1466 * build redirect option and copy skb over to the new packet. 1501 * build redirect option and copy skb over to the new packet.
1467 */ 1502 */
1468 1503
1469 memset(opt, 0, 8); 1504 if (rd_len)
1470 *(opt++) = ND_OPT_REDIRECT_HDR; 1505 ndisc_fill_redirect_hdr_option(buff, skb, rd_len);
1471 *(opt++) = (rd_len >> 3);
1472 opt += 6;
1473
1474 memcpy(opt, ipv6_hdr(skb), rd_len - 8);
1475
1476 icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &ipv6_hdr(skb)->saddr,
1477 len, IPPROTO_ICMPV6,
1478 csum_partial(icmph, len, 0));
1479 1506
1480 skb_dst_set(buff, dst); 1507 skb_dst_set(buff, dst);
1481 rcu_read_lock(); 1508 ndisc_send_skb(buff, &ipv6_hdr(skb)->saddr, &saddr_buf);
1482 idev = __in6_dev_get(dst->dev);
1483 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1484 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
1485 dst_output);
1486 if (!err) {
1487 ICMP6MSGOUT_INC_STATS(net, idev, NDISC_REDIRECT);
1488 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1489 }
1490
1491 rcu_read_unlock();
1492 return; 1509 return;
1493 1510
1494release: 1511release:
@@ -1505,7 +1522,7 @@ int ndisc_rcv(struct sk_buff *skb)
1505{ 1522{
1506 struct nd_msg *msg; 1523 struct nd_msg *msg;
1507 1524
1508 if (!pskb_may_pull(skb, skb->len)) 1525 if (skb_linearize(skb))
1509 return 0; 1526 return 0;
1510 1527
1511 msg = (struct nd_msg *)skb_transport_header(skb); 1528 msg = (struct nd_msg *)skb_transport_header(skb);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 125a90d6a795..341b54ade72c 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1098,7 +1098,7 @@ static int get_info(struct net *net, void __user *user,
1098#endif 1098#endif
1099 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), 1099 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1100 "ip6table_%s", name); 1100 "ip6table_%s", name);
1101 if (t && !IS_ERR(t)) { 1101 if (!IS_ERR_OR_NULL(t)) {
1102 struct ip6t_getinfo info; 1102 struct ip6t_getinfo info;
1103 const struct xt_table_info *private = t->private; 1103 const struct xt_table_info *private = t->private;
1104#ifdef CONFIG_COMPAT 1104#ifdef CONFIG_COMPAT
@@ -1157,7 +1157,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1157 } 1157 }
1158 1158
1159 t = xt_find_table_lock(net, AF_INET6, get.name); 1159 t = xt_find_table_lock(net, AF_INET6, get.name);
1160 if (t && !IS_ERR(t)) { 1160 if (!IS_ERR_OR_NULL(t)) {
1161 struct xt_table_info *private = t->private; 1161 struct xt_table_info *private = t->private;
1162 duprintf("t->private->number = %u\n", private->number); 1162 duprintf("t->private->number = %u\n", private->number);
1163 if (get.size == private->size) 1163 if (get.size == private->size)
@@ -1197,7 +1197,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1197 1197
1198 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), 1198 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1199 "ip6table_%s", name); 1199 "ip6table_%s", name);
1200 if (!t || IS_ERR(t)) { 1200 if (IS_ERR_OR_NULL(t)) {
1201 ret = t ? PTR_ERR(t) : -ENOENT; 1201 ret = t ? PTR_ERR(t) : -ENOENT;
1202 goto free_newinfo_counters_untrans; 1202 goto free_newinfo_counters_untrans;
1203 } 1203 }
@@ -1355,7 +1355,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1355 } 1355 }
1356 1356
1357 t = xt_find_table_lock(net, AF_INET6, name); 1357 t = xt_find_table_lock(net, AF_INET6, name);
1358 if (!t || IS_ERR(t)) { 1358 if (IS_ERR_OR_NULL(t)) {
1359 ret = t ? PTR_ERR(t) : -ENOENT; 1359 ret = t ? PTR_ERR(t) : -ENOENT;
1360 goto free; 1360 goto free;
1361 } 1361 }
@@ -1939,7 +1939,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1939 1939
1940 xt_compat_lock(AF_INET6); 1940 xt_compat_lock(AF_INET6);
1941 t = xt_find_table_lock(net, AF_INET6, get.name); 1941 t = xt_find_table_lock(net, AF_INET6, get.name);
1942 if (t && !IS_ERR(t)) { 1942 if (!IS_ERR_OR_NULL(t)) {
1943 const struct xt_table_info *private = t->private; 1943 const struct xt_table_info *private = t->private;
1944 struct xt_table_info info; 1944 struct xt_table_info info;
1945 duprintf("t->private->number = %u\n", private->number); 1945 duprintf("t->private->number = %u\n", private->number);
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index e9486915eff6..83acc1405a18 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -9,47 +9,38 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <net/ipv6.h>
12#include <linux/netfilter.h> 13#include <linux/netfilter.h>
13#include <linux/netfilter_ipv6.h> 14#include <linux/netfilter_ipv6.h>
14#include <linux/netfilter_ipv6/ip6t_NPT.h> 15#include <linux/netfilter_ipv6/ip6t_NPT.h>
15#include <linux/netfilter/x_tables.h> 16#include <linux/netfilter/x_tables.h>
16 17
17static __sum16 csum16_complement(__sum16 a)
18{
19 return (__force __sum16)(0xffff - (__force u16)a);
20}
21
22static __sum16 csum16_add(__sum16 a, __sum16 b)
23{
24 u16 sum;
25
26 sum = (__force u16)a + (__force u16)b;
27 sum += (__force u16)a < (__force u16)b;
28 return (__force __sum16)sum;
29}
30
31static __sum16 csum16_sub(__sum16 a, __sum16 b)
32{
33 return csum16_add(a, csum16_complement(b));
34}
35
36static int ip6t_npt_checkentry(const struct xt_tgchk_param *par) 18static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
37{ 19{
38 struct ip6t_npt_tginfo *npt = par->targinfo; 20 struct ip6t_npt_tginfo *npt = par->targinfo;
39 __sum16 src_sum = 0, dst_sum = 0; 21 __wsum src_sum = 0, dst_sum = 0;
22 struct in6_addr pfx;
40 unsigned int i; 23 unsigned int i;
41 24
42 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64) 25 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
43 return -EINVAL; 26 return -EINVAL;
44 27
28 /* Ensure that LSB of prefix is zero */
29 ipv6_addr_prefix(&pfx, &npt->src_pfx.in6, npt->src_pfx_len);
30 if (!ipv6_addr_equal(&pfx, &npt->src_pfx.in6))
31 return -EINVAL;
32 ipv6_addr_prefix(&pfx, &npt->dst_pfx.in6, npt->dst_pfx_len);
33 if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6))
34 return -EINVAL;
35
45 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) { 36 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
46 src_sum = csum16_add(src_sum, 37 src_sum = csum_add(src_sum,
47 (__force __sum16)npt->src_pfx.in6.s6_addr16[i]); 38 (__force __wsum)npt->src_pfx.in6.s6_addr16[i]);
48 dst_sum = csum16_add(dst_sum, 39 dst_sum = csum_add(dst_sum,
49 (__force __sum16)npt->dst_pfx.in6.s6_addr16[i]); 40 (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);
50 } 41 }
51 42
52 npt->adjustment = csum16_sub(src_sum, dst_sum); 43 npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum));
53 return 0; 44 return 0;
54} 45}
55 46
@@ -70,7 +61,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
70 61
71 idx = i / 32; 62 idx = i / 32;
72 addr->s6_addr32[idx] &= mask; 63 addr->s6_addr32[idx] &= mask;
73 addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx]; 64 addr->s6_addr32[idx] |= ~mask & npt->dst_pfx.in6.s6_addr32[idx];
74 } 65 }
75 66
76 if (pfx_len <= 48) 67 if (pfx_len <= 48)
@@ -85,8 +76,8 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
85 return false; 76 return false;
86 } 77 }
87 78
88 sum = csum16_add((__force __sum16)addr->s6_addr16[idx], 79 sum = ~csum_fold(csum_add(csum_unfold((__force __sum16)addr->s6_addr16[idx]),
89 npt->adjustment); 80 csum_unfold(npt->adjustment)));
90 if (sum == CSUM_MANGLED_0) 81 if (sum == CSUM_MANGLED_0)
91 sum = 0; 82 sum = 0;
92 *(__force __sum16 *)&addr->s6_addr16[idx] = sum; 83 *(__force __sum16 *)&addr->s6_addr16[idx] = sum;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index fd4fb34c51c7..ed3b427b2841 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -126,12 +126,13 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
126 skb_put(nskb, sizeof(struct ipv6hdr)); 126 skb_put(nskb, sizeof(struct ipv6hdr));
127 skb_reset_network_header(nskb); 127 skb_reset_network_header(nskb);
128 ip6h = ipv6_hdr(nskb); 128 ip6h = ipv6_hdr(nskb);
129 *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); 129 ip6_flow_hdr(ip6h, tclass, 0);
130 ip6h->hop_limit = ip6_dst_hoplimit(dst); 130 ip6h->hop_limit = ip6_dst_hoplimit(dst);
131 ip6h->nexthdr = IPPROTO_TCP; 131 ip6h->nexthdr = IPPROTO_TCP;
132 ip6h->saddr = oip6h->daddr; 132 ip6h->saddr = oip6h->daddr;
133 ip6h->daddr = oip6h->saddr; 133 ip6h->daddr = oip6h->saddr;
134 134
135 skb_reset_transport_header(nskb);
135 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 136 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
136 /* Truncate to length (no data) */ 137 /* Truncate to length (no data) */
137 tcph->doff = sizeof(struct tcphdr)/4; 138 tcph->doff = sizeof(struct tcphdr)/4;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 7431121b87de..6134a1ebfb1b 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/netfilter_ipv6/ip6_tables.h> 12#include <linux/netfilter_ipv6/ip6_tables.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <net/ipv6.h>
14 15
15MODULE_LICENSE("GPL"); 16MODULE_LICENSE("GPL");
16MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 17MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -60,8 +61,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
60 dev_net(out)->ipv6.ip6table_mangle); 61 dev_net(out)->ipv6.ip6table_mangle);
61 62
62 if (ret != NF_DROP && ret != NF_STOLEN && 63 if (ret != NF_DROP && ret != NF_STOLEN &&
63 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || 64 (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
64 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || 65 !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &daddr) ||
65 skb->mark != mark || 66 skb->mark != mark ||
66 ipv6_hdr(skb)->hop_limit != hop_limit || 67 ipv6_hdr(skb)->hop_limit != hop_limit ||
67 flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) 68 flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 6c8ae24b85eb..e0e788d25b14 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -127,23 +127,28 @@ nf_nat_ipv6_fn(unsigned int hooknum,
127 ret = nf_nat_rule_find(skb, hooknum, in, out, ct); 127 ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
128 if (ret != NF_ACCEPT) 128 if (ret != NF_ACCEPT)
129 return ret; 129 return ret;
130 } else 130 } else {
131 pr_debug("Already setup manip %s for ct %p\n", 131 pr_debug("Already setup manip %s for ct %p\n",
132 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", 132 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
133 ct); 133 ct);
134 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
135 goto oif_changed;
136 }
134 break; 137 break;
135 138
136 default: 139 default:
137 /* ESTABLISHED */ 140 /* ESTABLISHED */
138 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 141 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
139 ctinfo == IP_CT_ESTABLISHED_REPLY); 142 ctinfo == IP_CT_ESTABLISHED_REPLY);
140 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out)) { 143 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
141 nf_ct_kill_acct(ct, ctinfo, skb); 144 goto oif_changed;
142 return NF_DROP;
143 }
144 } 145 }
145 146
146 return nf_nat_packet(ct, ctinfo, hooknum, skb); 147 return nf_nat_packet(ct, ctinfo, hooknum, skb);
148
149oif_changed:
150 nf_ct_kill_acct(ct, ctinfo, skb);
151 return NF_DROP;
147} 152}
148 153
149static unsigned int 154static unsigned int
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 00ee17c3e893..2b6c226f5198 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -81,8 +81,8 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
81 } 81 }
82 protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off); 82 protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
83 /* 83 /*
84 * (protoff == skb->len) mean that the packet doesn't have no data 84 * (protoff == skb->len) means the packet has not data, just
85 * except of IPv6 & ext headers. but it's tracked anyway. - YK 85 * IPv6 and possibly extensions headers, but it is tracked anyway
86 */ 86 */
87 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { 87 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
88 pr_debug("ip6_conntrack_core: can't find proto in pkt\n"); 88 pr_debug("ip6_conntrack_core: can't find proto in pkt\n");
@@ -104,7 +104,6 @@ static unsigned int ipv6_helper(unsigned int hooknum,
104 const struct nf_conn_help *help; 104 const struct nf_conn_help *help;
105 const struct nf_conntrack_helper *helper; 105 const struct nf_conntrack_helper *helper;
106 enum ip_conntrack_info ctinfo; 106 enum ip_conntrack_info ctinfo;
107 unsigned int ret;
108 __be16 frag_off; 107 __be16 frag_off;
109 int protoff; 108 int protoff;
110 u8 nexthdr; 109 u8 nexthdr;
@@ -130,12 +129,7 @@ static unsigned int ipv6_helper(unsigned int hooknum,
130 return NF_ACCEPT; 129 return NF_ACCEPT;
131 } 130 }
132 131
133 ret = helper->help(skb, protoff, ct, ctinfo); 132 return helper->help(skb, protoff, ct, ctinfo);
134 if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
135 nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL,
136 "nf_ct_%s: dropping packet", helper->name);
137 }
138 return ret;
139} 133}
140 134
141static unsigned int ipv6_confirm(unsigned int hooknum, 135static unsigned int ipv6_confirm(unsigned int hooknum,
@@ -421,54 +415,43 @@ static int ipv6_net_init(struct net *net)
421{ 415{
422 int ret = 0; 416 int ret = 0;
423 417
424 ret = nf_conntrack_l4proto_register(net, 418 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_tcp6);
425 &nf_conntrack_l4proto_tcp6);
426 if (ret < 0) { 419 if (ret < 0) {
427 printk(KERN_ERR "nf_conntrack_l4proto_tcp6: protocol register failed\n"); 420 pr_err("nf_conntrack_tcp6: pernet registration failed\n");
428 goto out; 421 goto out;
429 } 422 }
430 ret = nf_conntrack_l4proto_register(net, 423 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udp6);
431 &nf_conntrack_l4proto_udp6);
432 if (ret < 0) { 424 if (ret < 0) {
433 printk(KERN_ERR "nf_conntrack_l4proto_udp6: protocol register failed\n"); 425 pr_err("nf_conntrack_udp6: pernet registration failed\n");
434 goto cleanup_tcp6; 426 goto cleanup_tcp6;
435 } 427 }
436 ret = nf_conntrack_l4proto_register(net, 428 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_icmpv6);
437 &nf_conntrack_l4proto_icmpv6);
438 if (ret < 0) { 429 if (ret < 0) {
439 printk(KERN_ERR "nf_conntrack_l4proto_icmp6: protocol register failed\n"); 430 pr_err("nf_conntrack_icmp6: pernet registration failed\n");
440 goto cleanup_udp6; 431 goto cleanup_udp6;
441 } 432 }
442 ret = nf_conntrack_l3proto_register(net, 433 ret = nf_ct_l3proto_pernet_register(net, &nf_conntrack_l3proto_ipv6);
443 &nf_conntrack_l3proto_ipv6);
444 if (ret < 0) { 434 if (ret < 0) {
445 printk(KERN_ERR "nf_conntrack_l3proto_ipv6: protocol register failed\n"); 435 pr_err("nf_conntrack_ipv6: pernet registration failed.\n");
446 goto cleanup_icmpv6; 436 goto cleanup_icmpv6;
447 } 437 }
448 return 0; 438 return 0;
449 cleanup_icmpv6: 439 cleanup_icmpv6:
450 nf_conntrack_l4proto_unregister(net, 440 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_icmpv6);
451 &nf_conntrack_l4proto_icmpv6);
452 cleanup_udp6: 441 cleanup_udp6:
453 nf_conntrack_l4proto_unregister(net, 442 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udp6);
454 &nf_conntrack_l4proto_udp6);
455 cleanup_tcp6: 443 cleanup_tcp6:
456 nf_conntrack_l4proto_unregister(net, 444 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_tcp6);
457 &nf_conntrack_l4proto_tcp6);
458 out: 445 out:
459 return ret; 446 return ret;
460} 447}
461 448
462static void ipv6_net_exit(struct net *net) 449static void ipv6_net_exit(struct net *net)
463{ 450{
464 nf_conntrack_l3proto_unregister(net, 451 nf_ct_l3proto_pernet_unregister(net, &nf_conntrack_l3proto_ipv6);
465 &nf_conntrack_l3proto_ipv6); 452 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_icmpv6);
466 nf_conntrack_l4proto_unregister(net, 453 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udp6);
467 &nf_conntrack_l4proto_icmpv6); 454 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_tcp6);
468 nf_conntrack_l4proto_unregister(net,
469 &nf_conntrack_l4proto_udp6);
470 nf_conntrack_l4proto_unregister(net,
471 &nf_conntrack_l4proto_tcp6);
472} 455}
473 456
474static struct pernet_operations ipv6_net_ops = { 457static struct pernet_operations ipv6_net_ops = {
@@ -491,19 +474,52 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
491 474
492 ret = register_pernet_subsys(&ipv6_net_ops); 475 ret = register_pernet_subsys(&ipv6_net_ops);
493 if (ret < 0) 476 if (ret < 0)
494 goto cleanup_pernet; 477 goto cleanup_sockopt;
478
495 ret = nf_register_hooks(ipv6_conntrack_ops, 479 ret = nf_register_hooks(ipv6_conntrack_ops,
496 ARRAY_SIZE(ipv6_conntrack_ops)); 480 ARRAY_SIZE(ipv6_conntrack_ops));
497 if (ret < 0) { 481 if (ret < 0) {
498 pr_err("nf_conntrack_ipv6: can't register pre-routing defrag " 482 pr_err("nf_conntrack_ipv6: can't register pre-routing defrag "
499 "hook.\n"); 483 "hook.\n");
500 goto cleanup_ipv6; 484 goto cleanup_pernet;
485 }
486
487 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_tcp6);
488 if (ret < 0) {
489 pr_err("nf_conntrack_ipv6: can't register tcp6 proto.\n");
490 goto cleanup_hooks;
491 }
492
493 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udp6);
494 if (ret < 0) {
495 pr_err("nf_conntrack_ipv6: can't register udp6 proto.\n");
496 goto cleanup_tcp6;
497 }
498
499 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_icmpv6);
500 if (ret < 0) {
501 pr_err("nf_conntrack_ipv6: can't register icmpv6 proto.\n");
502 goto cleanup_udp6;
503 }
504
505 ret = nf_ct_l3proto_register(&nf_conntrack_l3proto_ipv6);
506 if (ret < 0) {
507 pr_err("nf_conntrack_ipv6: can't register ipv6 proto.\n");
508 goto cleanup_icmpv6;
501 } 509 }
502 return ret; 510 return ret;
503 511
504 cleanup_ipv6: 512 cleanup_icmpv6:
505 unregister_pernet_subsys(&ipv6_net_ops); 513 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
514 cleanup_udp6:
515 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udp6);
516 cleanup_tcp6:
517 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
518 cleanup_hooks:
519 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
506 cleanup_pernet: 520 cleanup_pernet:
521 unregister_pernet_subsys(&ipv6_net_ops);
522 cleanup_sockopt:
507 nf_unregister_sockopt(&so_getorigdst6); 523 nf_unregister_sockopt(&so_getorigdst6);
508 return ret; 524 return ret;
509} 525}
@@ -511,6 +527,10 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
511static void __exit nf_conntrack_l3proto_ipv6_fini(void) 527static void __exit nf_conntrack_l3proto_ipv6_fini(void)
512{ 528{
513 synchronize_net(); 529 synchronize_net();
530 nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
531 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
532 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udp6);
533 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
514 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); 534 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
515 unregister_pernet_subsys(&ipv6_net_ops); 535 unregister_pernet_subsys(&ipv6_net_ops);
516 nf_unregister_sockopt(&so_getorigdst6); 536 nf_unregister_sockopt(&so_getorigdst6);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 22c8ea951185..54087e96d7b8 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -97,9 +97,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
97 if (table == NULL) 97 if (table == NULL)
98 goto err_alloc; 98 goto err_alloc;
99 99
100 table[0].data = &net->ipv6.frags.high_thresh; 100 table[0].data = &net->nf_frag.frags.timeout;
101 table[1].data = &net->ipv6.frags.low_thresh; 101 table[1].data = &net->nf_frag.frags.low_thresh;
102 table[2].data = &net->ipv6.frags.timeout; 102 table[2].data = &net->nf_frag.frags.high_thresh;
103 } 103 }
104 104
105 hdr = register_net_sysctl(net, "net/netfilter", table); 105 hdr = register_net_sysctl(net, "net/netfilter", table);
@@ -311,12 +311,15 @@ found:
311 else 311 else
312 fq->q.fragments = skb; 312 fq->q.fragments = skb;
313 313
314 skb->dev = NULL; 314 if (skb->dev) {
315 fq->iif = skb->dev->ifindex;
316 skb->dev = NULL;
317 }
315 fq->q.stamp = skb->tstamp; 318 fq->q.stamp = skb->tstamp;
316 fq->q.meat += skb->len; 319 fq->q.meat += skb->len;
317 if (payload_len > fq->q.max_size) 320 if (payload_len > fq->q.max_size)
318 fq->q.max_size = payload_len; 321 fq->q.max_size = payload_len;
319 atomic_add(skb->truesize, &fq->q.net->mem); 322 add_frag_mem_limit(&fq->q, skb->truesize);
320 323
321 /* The first fragment. 324 /* The first fragment.
322 * nhoffset is obtained from the first fragment, of course. 325 * nhoffset is obtained from the first fragment, of course.
@@ -325,9 +328,8 @@ found:
325 fq->nhoffset = nhoff; 328 fq->nhoffset = nhoff;
326 fq->q.last_in |= INET_FRAG_FIRST_IN; 329 fq->q.last_in |= INET_FRAG_FIRST_IN;
327 } 330 }
328 write_lock(&nf_frags.lock); 331
329 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); 332 inet_frag_lru_move(&fq->q);
330 write_unlock(&nf_frags.lock);
331 return 0; 333 return 0;
332 334
333discard_fq: 335discard_fq:
@@ -366,7 +368,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
366 } 368 }
367 369
368 /* Head of list must not be cloned. */ 370 /* Head of list must not be cloned. */
369 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) { 371 if (skb_unclone(head, GFP_ATOMIC)) {
370 pr_debug("skb is cloned but can't expand head"); 372 pr_debug("skb is cloned but can't expand head");
371 goto out_oom; 373 goto out_oom;
372 } 374 }
@@ -395,7 +397,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
395 clone->ip_summed = head->ip_summed; 397 clone->ip_summed = head->ip_summed;
396 398
397 NFCT_FRAG6_CB(clone)->orig = NULL; 399 NFCT_FRAG6_CB(clone)->orig = NULL;
398 atomic_add(clone->truesize, &fq->q.net->mem); 400 add_frag_mem_limit(&fq->q, clone->truesize);
399 } 401 }
400 402
401 /* We have to remove fragment header from datagram and to relocate 403 /* We have to remove fragment header from datagram and to relocate
@@ -419,7 +421,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
419 head->csum = csum_add(head->csum, fp->csum); 421 head->csum = csum_add(head->csum, fp->csum);
420 head->truesize += fp->truesize; 422 head->truesize += fp->truesize;
421 } 423 }
422 atomic_sub(head->truesize, &fq->q.net->mem); 424 sub_frag_mem_limit(&fq->q, head->truesize);
423 425
424 head->local_df = 1; 426 head->local_df = 1;
425 head->next = NULL; 427 head->next = NULL;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 745a32042950..bbbe53a99b57 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -295,11 +295,11 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
295 295
296static int __net_init ipv6_proc_init_net(struct net *net) 296static int __net_init ipv6_proc_init_net(struct net *net)
297{ 297{
298 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO, 298 if (!proc_create("sockstat6", S_IRUGO, net->proc_net,
299 &sockstat6_seq_fops)) 299 &sockstat6_seq_fops))
300 return -ENOMEM; 300 return -ENOMEM;
301 301
302 if (!proc_net_fops_create(net, "snmp6", S_IRUGO, &snmp6_seq_fops)) 302 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
303 goto proc_snmp6_fail; 303 goto proc_snmp6_fail;
304 304
305 net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net); 305 net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
@@ -308,17 +308,17 @@ static int __net_init ipv6_proc_init_net(struct net *net)
308 return 0; 308 return 0;
309 309
310proc_dev_snmp6_fail: 310proc_dev_snmp6_fail:
311 proc_net_remove(net, "snmp6"); 311 remove_proc_entry("snmp6", net->proc_net);
312proc_snmp6_fail: 312proc_snmp6_fail:
313 proc_net_remove(net, "sockstat6"); 313 remove_proc_entry("sockstat6", net->proc_net);
314 return -ENOMEM; 314 return -ENOMEM;
315} 315}
316 316
317static void __net_exit ipv6_proc_exit_net(struct net *net) 317static void __net_exit ipv6_proc_exit_net(struct net *net)
318{ 318{
319 proc_net_remove(net, "sockstat6"); 319 remove_proc_entry("sockstat6", net->proc_net);
320 proc_net_remove(net, "dev_snmp6"); 320 remove_proc_entry("dev_snmp6", net->proc_net);
321 proc_net_remove(net, "snmp6"); 321 remove_proc_entry("snmp6", net->proc_net);
322} 322}
323 323
324static struct pernet_operations ipv6_proc_ops = { 324static struct pernet_operations ipv6_proc_ops = {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6cd29b1e8b92..c65907db8c44 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
507 sock_recv_ts_and_drops(msg, sk, skb); 507 sock_recv_ts_and_drops(msg, sk, skb);
508 508
509 if (np->rxopt.all) 509 if (np->rxopt.all)
510 datagram_recv_ctl(sk, msg, skb); 510 ip6_datagram_recv_ctl(sk, msg, skb);
511 511
512 err = copied; 512 err = copied;
513 if (flags & MSG_TRUNC) 513 if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
822 memset(opt, 0, sizeof(struct ipv6_txoptions)); 822 memset(opt, 0, sizeof(struct ipv6_txoptions));
823 opt->tot_len = sizeof(struct ipv6_txoptions); 823 opt->tot_len = sizeof(struct ipv6_txoptions);
824 824
825 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 825 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
826 &hlimit, &tclass, &dontfrag); 826 &hlimit, &tclass, &dontfrag);
827 if (err < 0) { 827 if (err < 0) {
828 fl6_sock_release(flowlabel); 828 fl6_sock_release(flowlabel);
829 return err; 829 return err;
@@ -1292,7 +1292,7 @@ static const struct file_operations raw6_seq_fops = {
1292 1292
1293static int __net_init raw6_init_net(struct net *net) 1293static int __net_init raw6_init_net(struct net *net)
1294{ 1294{
1295 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) 1295 if (!proc_create("raw6", S_IRUGO, net->proc_net, &raw6_seq_fops))
1296 return -ENOMEM; 1296 return -ENOMEM;
1297 1297
1298 return 0; 1298 return 0;
@@ -1300,7 +1300,7 @@ static int __net_init raw6_init_net(struct net *net)
1300 1300
1301static void __net_exit raw6_exit_net(struct net *net) 1301static void __net_exit raw6_exit_net(struct net *net)
1302{ 1302{
1303 proc_net_remove(net, "raw6"); 1303 remove_proc_entry("raw6", net->proc_net);
1304} 1304}
1305 1305
1306static struct pernet_operations raw6_net_ops = { 1306static struct pernet_operations raw6_net_ops = {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index e5253ec9e0fc..3c6a77290c6e 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -79,20 +79,8 @@ unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
79{ 79{
80 u32 c; 80 u32 c;
81 81
82 c = jhash_3words((__force u32)saddr->s6_addr32[0], 82 c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
83 (__force u32)saddr->s6_addr32[1], 83 (__force u32)id, rnd);
84 (__force u32)saddr->s6_addr32[2],
85 rnd);
86
87 c = jhash_3words((__force u32)saddr->s6_addr32[3],
88 (__force u32)daddr->s6_addr32[0],
89 (__force u32)daddr->s6_addr32[1],
90 c);
91
92 c = jhash_3words((__force u32)daddr->s6_addr32[2],
93 (__force u32)daddr->s6_addr32[3],
94 (__force u32)id,
95 c);
96 84
97 return c & (INETFRAGS_HASHSZ - 1); 85 return c & (INETFRAGS_HASHSZ - 1);
98} 86}
@@ -327,7 +315,7 @@ found:
327 } 315 }
328 fq->q.stamp = skb->tstamp; 316 fq->q.stamp = skb->tstamp;
329 fq->q.meat += skb->len; 317 fq->q.meat += skb->len;
330 atomic_add(skb->truesize, &fq->q.net->mem); 318 add_frag_mem_limit(&fq->q, skb->truesize);
331 319
332 /* The first fragment. 320 /* The first fragment.
333 * nhoffset is obtained from the first fragment, of course. 321 * nhoffset is obtained from the first fragment, of course.
@@ -341,9 +329,7 @@ found:
341 fq->q.meat == fq->q.len) 329 fq->q.meat == fq->q.len)
342 return ip6_frag_reasm(fq, prev, dev); 330 return ip6_frag_reasm(fq, prev, dev);
343 331
344 write_lock(&ip6_frags.lock); 332 inet_frag_lru_move(&fq->q);
345 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
346 write_unlock(&ip6_frags.lock);
347 return -1; 333 return -1;
348 334
349discard_fq: 335discard_fq:
@@ -406,7 +392,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
406 goto out_oversize; 392 goto out_oversize;
407 393
408 /* Head of list must not be cloned. */ 394 /* Head of list must not be cloned. */
409 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 395 if (skb_unclone(head, GFP_ATOMIC))
410 goto out_oom; 396 goto out_oom;
411 397
412 /* If the first fragment is fragmented itself, we split 398 /* If the first fragment is fragmented itself, we split
@@ -429,7 +415,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
429 head->len -= clone->len; 415 head->len -= clone->len;
430 clone->csum = 0; 416 clone->csum = 0;
431 clone->ip_summed = head->ip_summed; 417 clone->ip_summed = head->ip_summed;
432 atomic_add(clone->truesize, &fq->q.net->mem); 418 add_frag_mem_limit(&fq->q, clone->truesize);
433 } 419 }
434 420
435 /* We have to remove fragment header from datagram and to relocate 421 /* We have to remove fragment header from datagram and to relocate
@@ -467,7 +453,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
467 } 453 }
468 fp = next; 454 fp = next;
469 } 455 }
470 atomic_sub(sum_truesize, &fq->q.net->mem); 456 sub_frag_mem_limit(&fq->q, sum_truesize);
471 457
472 head->next = NULL; 458 head->next = NULL;
473 head->dev = dev; 459 head->dev = dev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e229a3bc345d..928266569689 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -145,25 +145,12 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
145 struct neighbour *n; 145 struct neighbour *n;
146 146
147 daddr = choose_neigh_daddr(rt, skb, daddr); 147 daddr = choose_neigh_daddr(rt, skb, daddr);
148 n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); 148 n = __ipv6_neigh_lookup(dst->dev, daddr);
149 if (n) 149 if (n)
150 return n; 150 return n;
151 return neigh_create(&nd_tbl, daddr, dst->dev); 151 return neigh_create(&nd_tbl, daddr, dst->dev);
152} 152}
153 153
154static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
155{
156 struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dev, &rt->rt6i_gateway);
157 if (!n) {
158 n = neigh_create(&nd_tbl, &rt->rt6i_gateway, dev);
159 if (IS_ERR(n))
160 return PTR_ERR(n);
161 }
162 rt->n = n;
163
164 return 0;
165}
166
167static struct dst_ops ip6_dst_ops_template = { 154static struct dst_ops ip6_dst_ops_template = {
168 .family = AF_INET6, 155 .family = AF_INET6,
169 .protocol = cpu_to_be16(ETH_P_IPV6), 156 .protocol = cpu_to_be16(ETH_P_IPV6),
@@ -300,9 +287,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
300{ 287{
301 struct rt6_info *rt = (struct rt6_info *)dst; 288 struct rt6_info *rt = (struct rt6_info *)dst;
302 struct inet6_dev *idev = rt->rt6i_idev; 289 struct inet6_dev *idev = rt->rt6i_idev;
303 290 struct dst_entry *from = dst->from;
304 if (rt->n)
305 neigh_release(rt->n);
306 291
307 if (!(rt->dst.flags & DST_HOST)) 292 if (!(rt->dst.flags & DST_HOST))
308 dst_destroy_metrics_generic(dst); 293 dst_destroy_metrics_generic(dst);
@@ -312,8 +297,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
312 in6_dev_put(idev); 297 in6_dev_put(idev);
313 } 298 }
314 299
315 if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) 300 dst->from = NULL;
316 dst_release(dst->from); 301 dst_release(from);
317 302
318 if (rt6_has_peer(rt)) { 303 if (rt6_has_peer(rt)) {
319 struct inet_peer *peer = rt6_peer_ptr(rt); 304 struct inet_peer *peer = rt6_peer_ptr(rt);
@@ -354,11 +339,6 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
354 in6_dev_put(idev); 339 in6_dev_put(idev);
355 } 340 }
356 } 341 }
357 if (rt->n && rt->n->dev == dev) {
358 rt->n->dev = loopback_dev;
359 dev_hold(loopback_dev);
360 dev_put(dev);
361 }
362 } 342 }
363} 343}
364 344
@@ -388,15 +368,8 @@ static int rt6_info_hash_nhsfn(unsigned int candidate_count,
388{ 368{
389 unsigned int val = fl6->flowi6_proto; 369 unsigned int val = fl6->flowi6_proto;
390 370
391 val ^= (__force u32)fl6->daddr.s6_addr32[0]; 371 val ^= ipv6_addr_hash(&fl6->daddr);
392 val ^= (__force u32)fl6->daddr.s6_addr32[1]; 372 val ^= ipv6_addr_hash(&fl6->saddr);
393 val ^= (__force u32)fl6->daddr.s6_addr32[2];
394 val ^= (__force u32)fl6->daddr.s6_addr32[3];
395
396 val ^= (__force u32)fl6->saddr.s6_addr32[0];
397 val ^= (__force u32)fl6->saddr.s6_addr32[1];
398 val ^= (__force u32)fl6->saddr.s6_addr32[2];
399 val ^= (__force u32)fl6->saddr.s6_addr32[3];
400 373
401 /* Work only if this not encapsulated */ 374 /* Work only if this not encapsulated */
402 switch (fl6->flowi6_proto) { 375 switch (fl6->flowi6_proto) {
@@ -505,24 +478,34 @@ static void rt6_probe(struct rt6_info *rt)
505 * Router Reachability Probe MUST be rate-limited 478 * Router Reachability Probe MUST be rate-limited
506 * to no more than one per minute. 479 * to no more than one per minute.
507 */ 480 */
508 neigh = rt ? rt->n : NULL; 481 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
509 if (!neigh || (neigh->nud_state & NUD_VALID))
510 return; 482 return;
511 read_lock_bh(&neigh->lock); 483 rcu_read_lock_bh();
512 if (!(neigh->nud_state & NUD_VALID) && 484 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
485 if (neigh) {
486 write_lock(&neigh->lock);
487 if (neigh->nud_state & NUD_VALID)
488 goto out;
489 }
490
491 if (!neigh ||
513 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { 492 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
514 struct in6_addr mcaddr; 493 struct in6_addr mcaddr;
515 struct in6_addr *target; 494 struct in6_addr *target;
516 495
517 neigh->updated = jiffies; 496 if (neigh) {
518 read_unlock_bh(&neigh->lock); 497 neigh->updated = jiffies;
498 write_unlock(&neigh->lock);
499 }
519 500
520 target = (struct in6_addr *)&neigh->primary_key; 501 target = (struct in6_addr *)&rt->rt6i_gateway;
521 addrconf_addr_solict_mult(target, &mcaddr); 502 addrconf_addr_solict_mult(target, &mcaddr);
522 ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL); 503 ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
523 } else { 504 } else {
524 read_unlock_bh(&neigh->lock); 505out:
506 write_unlock(&neigh->lock);
525 } 507 }
508 rcu_read_unlock_bh();
526} 509}
527#else 510#else
528static inline void rt6_probe(struct rt6_info *rt) 511static inline void rt6_probe(struct rt6_info *rt)
@@ -549,20 +532,24 @@ static inline bool rt6_check_neigh(struct rt6_info *rt)
549 struct neighbour *neigh; 532 struct neighbour *neigh;
550 bool ret = false; 533 bool ret = false;
551 534
552 neigh = rt->n;
553 if (rt->rt6i_flags & RTF_NONEXTHOP || 535 if (rt->rt6i_flags & RTF_NONEXTHOP ||
554 !(rt->rt6i_flags & RTF_GATEWAY)) 536 !(rt->rt6i_flags & RTF_GATEWAY))
555 ret = true; 537 return true;
556 else if (neigh) { 538
557 read_lock_bh(&neigh->lock); 539 rcu_read_lock_bh();
540 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
541 if (neigh) {
542 read_lock(&neigh->lock);
558 if (neigh->nud_state & NUD_VALID) 543 if (neigh->nud_state & NUD_VALID)
559 ret = true; 544 ret = true;
560#ifdef CONFIG_IPV6_ROUTER_PREF 545#ifdef CONFIG_IPV6_ROUTER_PREF
561 else if (!(neigh->nud_state & NUD_FAILED)) 546 else if (!(neigh->nud_state & NUD_FAILED))
562 ret = true; 547 ret = true;
563#endif 548#endif
564 read_unlock_bh(&neigh->lock); 549 read_unlock(&neigh->lock);
565 } 550 }
551 rcu_read_unlock_bh();
552
566 return ret; 553 return ret;
567} 554}
568 555
@@ -838,8 +825,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
838 rt = ip6_rt_copy(ort, daddr); 825 rt = ip6_rt_copy(ort, daddr);
839 826
840 if (rt) { 827 if (rt) {
841 int attempts = !in_softirq();
842
843 if (!(rt->rt6i_flags & RTF_GATEWAY)) { 828 if (!(rt->rt6i_flags & RTF_GATEWAY)) {
844 if (ort->rt6i_dst.plen != 128 && 829 if (ort->rt6i_dst.plen != 128 &&
845 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) 830 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
@@ -855,32 +840,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
855 rt->rt6i_src.plen = 128; 840 rt->rt6i_src.plen = 128;
856 } 841 }
857#endif 842#endif
858
859 retry:
860 if (rt6_bind_neighbour(rt, rt->dst.dev)) {
861 struct net *net = dev_net(rt->dst.dev);
862 int saved_rt_min_interval =
863 net->ipv6.sysctl.ip6_rt_gc_min_interval;
864 int saved_rt_elasticity =
865 net->ipv6.sysctl.ip6_rt_gc_elasticity;
866
867 if (attempts-- > 0) {
868 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
869 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
870
871 ip6_dst_gc(&net->ipv6.ip6_dst_ops);
872
873 net->ipv6.sysctl.ip6_rt_gc_elasticity =
874 saved_rt_elasticity;
875 net->ipv6.sysctl.ip6_rt_gc_min_interval =
876 saved_rt_min_interval;
877 goto retry;
878 }
879
880 net_warn_ratelimited("Neighbour table overflow\n");
881 dst_free(&rt->dst);
882 return NULL;
883 }
884 } 843 }
885 844
886 return rt; 845 return rt;
@@ -891,10 +850,8 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
891{ 850{
892 struct rt6_info *rt = ip6_rt_copy(ort, daddr); 851 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
893 852
894 if (rt) { 853 if (rt)
895 rt->rt6i_flags |= RTF_CACHE; 854 rt->rt6i_flags |= RTF_CACHE;
896 rt->n = neigh_clone(ort->n);
897 }
898 return rt; 855 return rt;
899} 856}
900 857
@@ -928,7 +885,7 @@ restart:
928 dst_hold(&rt->dst); 885 dst_hold(&rt->dst);
929 read_unlock_bh(&table->tb6_lock); 886 read_unlock_bh(&table->tb6_lock);
930 887
931 if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) 888 if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
932 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 889 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
933 else if (!(rt->dst.flags & DST_HOST)) 890 else if (!(rt->dst.flags & DST_HOST))
934 nrt = rt6_alloc_clone(rt, &fl6->daddr); 891 nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -994,7 +951,7 @@ void ip6_route_input(struct sk_buff *skb)
994 .flowi6_iif = skb->dev->ifindex, 951 .flowi6_iif = skb->dev->ifindex,
995 .daddr = iph->daddr, 952 .daddr = iph->daddr,
996 .saddr = iph->saddr, 953 .saddr = iph->saddr,
997 .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK, 954 .flowlabel = ip6_flowinfo(iph),
998 .flowi6_mark = skb->mark, 955 .flowi6_mark = skb->mark,
999 .flowi6_proto = iph->nexthdr, 956 .flowi6_proto = iph->nexthdr,
1000 }; 957 };
@@ -1054,7 +1011,6 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
1054 1011
1055 rt->rt6i_gateway = ort->rt6i_gateway; 1012 rt->rt6i_gateway = ort->rt6i_gateway;
1056 rt->rt6i_flags = ort->rt6i_flags; 1013 rt->rt6i_flags = ort->rt6i_flags;
1057 rt6_clean_expires(rt);
1058 rt->rt6i_metric = 0; 1014 rt->rt6i_metric = 0;
1059 1015
1060 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 1016 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -1159,7 +1115,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1159 fl6.flowi6_flags = 0; 1115 fl6.flowi6_flags = 0;
1160 fl6.daddr = iph->daddr; 1116 fl6.daddr = iph->daddr;
1161 fl6.saddr = iph->saddr; 1117 fl6.saddr = iph->saddr;
1162 fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK; 1118 fl6.flowlabel = ip6_flowinfo(iph);
1163 1119
1164 dst = ip6_route_output(net, NULL, &fl6); 1120 dst = ip6_route_output(net, NULL, &fl6);
1165 if (!dst->error) 1121 if (!dst->error)
@@ -1187,7 +1143,7 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1187 fl6.flowi6_flags = 0; 1143 fl6.flowi6_flags = 0;
1188 fl6.daddr = iph->daddr; 1144 fl6.daddr = iph->daddr;
1189 fl6.saddr = iph->saddr; 1145 fl6.saddr = iph->saddr;
1190 fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK; 1146 fl6.flowlabel = ip6_flowinfo(iph);
1191 1147
1192 dst = ip6_route_output(net, NULL, &fl6); 1148 dst = ip6_route_output(net, NULL, &fl6);
1193 if (!dst->error) 1149 if (!dst->error)
@@ -1247,7 +1203,6 @@ static struct dst_entry *icmp6_dst_gc_list;
1247static DEFINE_SPINLOCK(icmp6_dst_lock); 1203static DEFINE_SPINLOCK(icmp6_dst_lock);
1248 1204
1249struct dst_entry *icmp6_dst_alloc(struct net_device *dev, 1205struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1250 struct neighbour *neigh,
1251 struct flowi6 *fl6) 1206 struct flowi6 *fl6)
1252{ 1207{
1253 struct dst_entry *dst; 1208 struct dst_entry *dst;
@@ -1265,20 +1220,8 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1265 goto out; 1220 goto out;
1266 } 1221 }
1267 1222
1268 if (neigh)
1269 neigh_hold(neigh);
1270 else {
1271 neigh = ip6_neigh_lookup(&rt->dst, NULL, &fl6->daddr);
1272 if (IS_ERR(neigh)) {
1273 in6_dev_put(idev);
1274 dst_free(&rt->dst);
1275 return ERR_CAST(neigh);
1276 }
1277 }
1278
1279 rt->dst.flags |= DST_HOST; 1223 rt->dst.flags |= DST_HOST;
1280 rt->dst.output = ip6_output; 1224 rt->dst.output = ip6_output;
1281 rt->n = neigh;
1282 atomic_set(&rt->dst.__refcnt, 1); 1225 atomic_set(&rt->dst.__refcnt, 1);
1283 rt->rt6i_dst.addr = fl6->daddr; 1226 rt->rt6i_dst.addr = fl6->daddr;
1284 rt->rt6i_dst.plen = 128; 1227 rt->rt6i_dst.plen = 128;
@@ -1587,12 +1530,6 @@ int ip6_route_add(struct fib6_config *cfg)
1587 } else 1530 } else
1588 rt->rt6i_prefsrc.plen = 0; 1531 rt->rt6i_prefsrc.plen = 0;
1589 1532
1590 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1591 err = rt6_bind_neighbour(rt, dev);
1592 if (err)
1593 goto out;
1594 }
1595
1596 rt->rt6i_flags = cfg->fc_flags; 1533 rt->rt6i_flags = cfg->fc_flags;
1597 1534
1598install_route: 1535install_route:
@@ -1705,37 +1642,32 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
1705 struct net *net = dev_net(skb->dev); 1642 struct net *net = dev_net(skb->dev);
1706 struct netevent_redirect netevent; 1643 struct netevent_redirect netevent;
1707 struct rt6_info *rt, *nrt = NULL; 1644 struct rt6_info *rt, *nrt = NULL;
1708 const struct in6_addr *target;
1709 struct ndisc_options ndopts; 1645 struct ndisc_options ndopts;
1710 const struct in6_addr *dest;
1711 struct neighbour *old_neigh;
1712 struct inet6_dev *in6_dev; 1646 struct inet6_dev *in6_dev;
1713 struct neighbour *neigh; 1647 struct neighbour *neigh;
1714 struct icmp6hdr *icmph; 1648 struct rd_msg *msg;
1715 int optlen, on_link; 1649 int optlen, on_link;
1716 u8 *lladdr; 1650 u8 *lladdr;
1717 1651
1718 optlen = skb->tail - skb->transport_header; 1652 optlen = skb->tail - skb->transport_header;
1719 optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); 1653 optlen -= sizeof(*msg);
1720 1654
1721 if (optlen < 0) { 1655 if (optlen < 0) {
1722 net_dbg_ratelimited("rt6_do_redirect: packet too short\n"); 1656 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1723 return; 1657 return;
1724 } 1658 }
1725 1659
1726 icmph = icmp6_hdr(skb); 1660 msg = (struct rd_msg *)icmp6_hdr(skb);
1727 target = (const struct in6_addr *) (icmph + 1);
1728 dest = target + 1;
1729 1661
1730 if (ipv6_addr_is_multicast(dest)) { 1662 if (ipv6_addr_is_multicast(&msg->dest)) {
1731 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n"); 1663 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1732 return; 1664 return;
1733 } 1665 }
1734 1666
1735 on_link = 0; 1667 on_link = 0;
1736 if (ipv6_addr_equal(dest, target)) { 1668 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1737 on_link = 1; 1669 on_link = 1;
1738 } else if (ipv6_addr_type(target) != 1670 } else if (ipv6_addr_type(&msg->target) !=
1739 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { 1671 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1740 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n"); 1672 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1741 return; 1673 return;
@@ -1752,7 +1684,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
1752 * first-hop router for the specified ICMP Destination Address. 1684 * first-hop router for the specified ICMP Destination Address.
1753 */ 1685 */
1754 1686
1755 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { 1687 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1756 net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); 1688 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1757 return; 1689 return;
1758 } 1690 }
@@ -1779,15 +1711,10 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
1779 */ 1711 */
1780 dst_confirm(&rt->dst); 1712 dst_confirm(&rt->dst);
1781 1713
1782 neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1); 1714 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1783 if (!neigh) 1715 if (!neigh)
1784 return; 1716 return;
1785 1717
1786 /* Duplicate redirect: silently ignore. */
1787 old_neigh = rt->n;
1788 if (neigh == old_neigh)
1789 goto out;
1790
1791 /* 1718 /*
1792 * We have finally decided to accept it. 1719 * We have finally decided to accept it.
1793 */ 1720 */
@@ -1799,7 +1726,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
1799 NEIGH_UPDATE_F_ISROUTER)) 1726 NEIGH_UPDATE_F_ISROUTER))
1800 ); 1727 );
1801 1728
1802 nrt = ip6_rt_copy(rt, dest); 1729 nrt = ip6_rt_copy(rt, &msg->dest);
1803 if (!nrt) 1730 if (!nrt)
1804 goto out; 1731 goto out;
1805 1732
@@ -1808,16 +1735,14 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
1808 nrt->rt6i_flags &= ~RTF_GATEWAY; 1735 nrt->rt6i_flags &= ~RTF_GATEWAY;
1809 1736
1810 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 1737 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1811 nrt->n = neigh_clone(neigh);
1812 1738
1813 if (ip6_ins_rt(nrt)) 1739 if (ip6_ins_rt(nrt))
1814 goto out; 1740 goto out;
1815 1741
1816 netevent.old = &rt->dst; 1742 netevent.old = &rt->dst;
1817 netevent.old_neigh = old_neigh;
1818 netevent.new = &nrt->dst; 1743 netevent.new = &nrt->dst;
1819 netevent.new_neigh = neigh; 1744 netevent.daddr = &msg->dest;
1820 netevent.daddr = dest; 1745 netevent.neigh = neigh;
1821 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 1746 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1822 1747
1823 if (rt->rt6i_flags & RTF_CACHE) { 1748 if (rt->rt6i_flags & RTF_CACHE) {
@@ -1859,8 +1784,6 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1859 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == 1784 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1860 (RTF_DEFAULT | RTF_ADDRCONF)) 1785 (RTF_DEFAULT | RTF_ADDRCONF))
1861 rt6_set_from(rt, ort); 1786 rt6_set_from(rt, ort);
1862 else
1863 rt6_clean_expires(rt);
1864 rt->rt6i_metric = 0; 1787 rt->rt6i_metric = 0;
1865 1788
1866#ifdef CONFIG_IPV6_SUBTREES 1789#ifdef CONFIG_IPV6_SUBTREES
@@ -2123,7 +2046,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2123{ 2046{
2124 struct net *net = dev_net(idev->dev); 2047 struct net *net = dev_net(idev->dev);
2125 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL); 2048 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
2126 int err;
2127 2049
2128 if (!rt) { 2050 if (!rt) {
2129 net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n"); 2051 net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
@@ -2142,11 +2064,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2142 rt->rt6i_flags |= RTF_ANYCAST; 2064 rt->rt6i_flags |= RTF_ANYCAST;
2143 else 2065 else
2144 rt->rt6i_flags |= RTF_LOCAL; 2066 rt->rt6i_flags |= RTF_LOCAL;
2145 err = rt6_bind_neighbour(rt, rt->dst.dev);
2146 if (err) {
2147 dst_free(&rt->dst);
2148 return ERR_PTR(err);
2149 }
2150 2067
2151 rt->rt6i_dst.addr = *addr; 2068 rt->rt6i_dst.addr = *addr;
2152 rt->rt6i_dst.plen = 128; 2069 rt->rt6i_dst.plen = 128;
@@ -2492,7 +2409,6 @@ static int rt6_fill_node(struct net *net,
2492 struct nlmsghdr *nlh; 2409 struct nlmsghdr *nlh;
2493 long expires; 2410 long expires;
2494 u32 table; 2411 u32 table;
2495 struct neighbour *n;
2496 2412
2497 if (prefix) { /* user wants prefix routes only */ 2413 if (prefix) { /* user wants prefix routes only */
2498 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { 2414 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2605,9 +2521,8 @@ static int rt6_fill_node(struct net *net,
2605 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2521 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2606 goto nla_put_failure; 2522 goto nla_put_failure;
2607 2523
2608 n = rt->n; 2524 if (rt->rt6i_flags & RTF_GATEWAY) {
2609 if (n) { 2525 if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
2610 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0)
2611 goto nla_put_failure; 2526 goto nla_put_failure;
2612 } 2527 }
2613 2528
@@ -2802,7 +2717,6 @@ struct rt6_proc_arg
2802static int rt6_info_route(struct rt6_info *rt, void *p_arg) 2717static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2803{ 2718{
2804 struct seq_file *m = p_arg; 2719 struct seq_file *m = p_arg;
2805 struct neighbour *n;
2806 2720
2807 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); 2721 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2808 2722
@@ -2811,9 +2725,8 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2811#else 2725#else
2812 seq_puts(m, "00000000000000000000000000000000 00 "); 2726 seq_puts(m, "00000000000000000000000000000000 00 ");
2813#endif 2727#endif
2814 n = rt->n; 2728 if (rt->rt6i_flags & RTF_GATEWAY) {
2815 if (n) { 2729 seq_printf(m, "%pi6", &rt->rt6i_gateway);
2816 seq_printf(m, "%pi6", n->primary_key);
2817 } else { 2730 } else {
2818 seq_puts(m, "00000000000000000000000000000000"); 2731 seq_puts(m, "00000000000000000000000000000000");
2819 } 2732 }
@@ -3080,8 +2993,8 @@ static void __net_exit ip6_route_net_exit(struct net *net)
3080static int __net_init ip6_route_net_init_late(struct net *net) 2993static int __net_init ip6_route_net_init_late(struct net *net)
3081{ 2994{
3082#ifdef CONFIG_PROC_FS 2995#ifdef CONFIG_PROC_FS
3083 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops); 2996 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3084 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); 2997 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3085#endif 2998#endif
3086 return 0; 2999 return 0;
3087} 3000}
@@ -3089,8 +3002,8 @@ static int __net_init ip6_route_net_init_late(struct net *net)
3089static void __net_exit ip6_route_net_exit_late(struct net *net) 3002static void __net_exit ip6_route_net_exit_late(struct net *net)
3090{ 3003{
3091#ifdef CONFIG_PROC_FS 3004#ifdef CONFIG_PROC_FS
3092 proc_net_remove(net, "ipv6_route"); 3005 remove_proc_entry("ipv6_route", net->proc_net);
3093 proc_net_remove(net, "rt6_stats"); 3006 remove_proc_entry("rt6_stats", net->proc_net);
3094#endif 3007#endif
3095} 3008}
3096 3009
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index cfba99b2c2a4..02f96dcbcf02 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -72,6 +72,8 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
72static int ipip6_tunnel_init(struct net_device *dev); 72static int ipip6_tunnel_init(struct net_device *dev);
73static void ipip6_tunnel_setup(struct net_device *dev); 73static void ipip6_tunnel_setup(struct net_device *dev);
74static void ipip6_dev_free(struct net_device *dev); 74static void ipip6_dev_free(struct net_device *dev);
75static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
76 __be32 *v4dst);
75static struct rtnl_link_ops sit_link_ops __read_mostly; 77static struct rtnl_link_ops sit_link_ops __read_mostly;
76 78
77static int sit_net_id __read_mostly; 79static int sit_net_id __read_mostly;
@@ -590,17 +592,21 @@ out:
590 return err; 592 return err;
591} 593}
592 594
595static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
596 const struct in6_addr *v6addr)
597{
598 __be32 v4embed = 0;
599 if (check_6rd(tunnel, v6addr, &v4embed) && v4addr != v4embed)
600 return true;
601 return false;
602}
603
593static int ipip6_rcv(struct sk_buff *skb) 604static int ipip6_rcv(struct sk_buff *skb)
594{ 605{
595 const struct iphdr *iph; 606 const struct iphdr *iph = ip_hdr(skb);
596 struct ip_tunnel *tunnel; 607 struct ip_tunnel *tunnel;
597 int err; 608 int err;
598 609
599 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
600 goto out;
601
602 iph = ip_hdr(skb);
603
604 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 610 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
605 iph->saddr, iph->daddr); 611 iph->saddr, iph->daddr);
606 if (tunnel != NULL) { 612 if (tunnel != NULL) {
@@ -613,10 +619,19 @@ static int ipip6_rcv(struct sk_buff *skb)
613 skb->protocol = htons(ETH_P_IPV6); 619 skb->protocol = htons(ETH_P_IPV6);
614 skb->pkt_type = PACKET_HOST; 620 skb->pkt_type = PACKET_HOST;
615 621
616 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 622 if (tunnel->dev->priv_flags & IFF_ISATAP) {
617 !isatap_chksrc(skb, iph, tunnel)) { 623 if (!isatap_chksrc(skb, iph, tunnel)) {
618 tunnel->dev->stats.rx_errors++; 624 tunnel->dev->stats.rx_errors++;
619 goto out; 625 goto out;
626 }
627 } else {
628 if (is_spoofed_6rd(tunnel, iph->saddr,
629 &ipv6_hdr(skb)->saddr) ||
630 is_spoofed_6rd(tunnel, iph->daddr,
631 &ipv6_hdr(skb)->daddr)) {
632 tunnel->dev->stats.rx_errors++;
633 goto out;
634 }
620 } 635 }
621 636
622 __skb_tunnel_rx(skb, tunnel->dev); 637 __skb_tunnel_rx(skb, tunnel->dev);
@@ -650,14 +665,12 @@ out:
650} 665}
651 666
652/* 667/*
653 * Returns the embedded IPv4 address if the IPv6 address 668 * If the IPv6 address comes from 6rd / 6to4 (RFC 3056) addr space this function
654 * comes from 6rd / 6to4 (RFC 3056) addr space. 669 * stores the embedded IPv4 address in v4dst and returns true.
655 */ 670 */
656static inline 671static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
657__be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel) 672 __be32 *v4dst)
658{ 673{
659 __be32 dst = 0;
660
661#ifdef CONFIG_IPV6_SIT_6RD 674#ifdef CONFIG_IPV6_SIT_6RD
662 if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix, 675 if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
663 tunnel->ip6rd.prefixlen)) { 676 tunnel->ip6rd.prefixlen)) {
@@ -676,14 +689,24 @@ __be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
676 d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >> 689 d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
677 (32 - pbi1); 690 (32 - pbi1);
678 691
679 dst = tunnel->ip6rd.relay_prefix | htonl(d); 692 *v4dst = tunnel->ip6rd.relay_prefix | htonl(d);
693 return true;
680 } 694 }
681#else 695#else
682 if (v6dst->s6_addr16[0] == htons(0x2002)) { 696 if (v6dst->s6_addr16[0] == htons(0x2002)) {
683 /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ 697 /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
684 memcpy(&dst, &v6dst->s6_addr16[1], 4); 698 memcpy(v4dst, &v6dst->s6_addr16[1], 4);
699 return true;
685 } 700 }
686#endif 701#endif
702 return false;
703}
704
705static inline __be32 try_6rd(struct ip_tunnel *tunnel,
706 const struct in6_addr *v6dst)
707{
708 __be32 dst = 0;
709 check_6rd(tunnel, v6dst, &dst);
687 return dst; 710 return dst;
688} 711}
689 712
@@ -744,7 +767,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
744 } 767 }
745 768
746 if (!dst) 769 if (!dst)
747 dst = try_6rd(&iph6->daddr, tunnel); 770 dst = try_6rd(tunnel, &iph6->daddr);
748 771
749 if (!dst) { 772 if (!dst) {
750 struct neighbour *neigh = NULL; 773 struct neighbour *neigh = NULL;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 40161977f7cf..8a0848b60b35 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -179,7 +179,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
179 memset(&tcp_opt, 0, sizeof(tcp_opt)); 179 memset(&tcp_opt, 0, sizeof(tcp_opt));
180 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); 180 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
181 181
182 if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) 182 if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
183 goto out; 183 goto out;
184 184
185 ret = NULL; 185 ret = NULL;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6565cf55eb1e..9b6460055df5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
423 } 423 }
424 424
425 inet_csk_reqsk_queue_drop(sk, req, prev); 425 inet_csk_reqsk_queue_drop(sk, req, prev);
426 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
426 goto out; 427 goto out;
427 428
428 case TCP_SYN_SENT: 429 case TCP_SYN_SENT:
@@ -712,7 +713,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
712#endif 713#endif
713 714
714static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 715static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
715 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass) 716 u32 tsval, u32 tsecr,
717 struct tcp_md5sig_key *key, int rst, u8 tclass)
716{ 718{
717 const struct tcphdr *th = tcp_hdr(skb); 719 const struct tcphdr *th = tcp_hdr(skb);
718 struct tcphdr *t1; 720 struct tcphdr *t1;
@@ -724,7 +726,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
724 struct dst_entry *dst; 726 struct dst_entry *dst;
725 __be32 *topt; 727 __be32 *topt;
726 728
727 if (ts) 729 if (tsecr)
728 tot_len += TCPOLEN_TSTAMP_ALIGNED; 730 tot_len += TCPOLEN_TSTAMP_ALIGNED;
729#ifdef CONFIG_TCP_MD5SIG 731#ifdef CONFIG_TCP_MD5SIG
730 if (key) 732 if (key)
@@ -754,11 +756,11 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
754 756
755 topt = (__be32 *)(t1 + 1); 757 topt = (__be32 *)(t1 + 1);
756 758
757 if (ts) { 759 if (tsecr) {
758 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 760 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
759 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 761 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
760 *topt++ = htonl(tcp_time_stamp); 762 *topt++ = htonl(tsval);
761 *topt++ = htonl(ts); 763 *topt++ = htonl(tsecr);
762 } 764 }
763 765
764#ifdef CONFIG_TCP_MD5SIG 766#ifdef CONFIG_TCP_MD5SIG
@@ -834,7 +836,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
834 * no RST generated if md5 hash doesn't match. 836 * no RST generated if md5 hash doesn't match.
835 */ 837 */
836 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), 838 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
837 &tcp_hashinfo, &ipv6h->daddr, 839 &tcp_hashinfo, &ipv6h->saddr,
840 th->source, &ipv6h->daddr,
838 ntohs(th->source), inet6_iif(skb)); 841 ntohs(th->source), inet6_iif(skb));
839 if (!sk1) 842 if (!sk1)
840 return; 843 return;
@@ -858,7 +861,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
858 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - 861 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
859 (th->doff << 2); 862 (th->doff << 2);
860 863
861 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0); 864 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0);
862 865
863#ifdef CONFIG_TCP_MD5SIG 866#ifdef CONFIG_TCP_MD5SIG
864release_sk1: 867release_sk1:
@@ -869,10 +872,11 @@ release_sk1:
869#endif 872#endif
870} 873}
871 874
872static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 875static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
876 u32 win, u32 tsval, u32 tsecr,
873 struct tcp_md5sig_key *key, u8 tclass) 877 struct tcp_md5sig_key *key, u8 tclass)
874{ 878{
875 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass); 879 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass);
876} 880}
877 881
878static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 882static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -882,6 +886,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
882 886
883 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 887 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
884 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 888 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
889 tcp_time_stamp + tcptw->tw_ts_offset,
885 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw), 890 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
886 tw->tw_tclass); 891 tw->tw_tclass);
887 892
@@ -891,7 +896,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
891static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 896static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
892 struct request_sock *req) 897 struct request_sock *req)
893{ 898{
894 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 899 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
900 req->rcv_wnd, tcp_time_stamp, req->ts_recent,
895 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0); 901 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
896} 902}
897 903
@@ -958,8 +964,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
958 goto drop; 964 goto drop;
959 } 965 }
960 966
961 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 967 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
968 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
962 goto drop; 969 goto drop;
970 }
963 971
964 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 972 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
965 if (req == NULL) 973 if (req == NULL)
@@ -1027,7 +1035,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1027 treq->rmt_addr = ipv6_hdr(skb)->saddr; 1035 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1028 treq->loc_addr = ipv6_hdr(skb)->daddr; 1036 treq->loc_addr = ipv6_hdr(skb)->daddr;
1029 if (!want_cookie || tmp_opt.tstamp_ok) 1037 if (!want_cookie || tmp_opt.tstamp_ok)
1030 TCP_ECN_create_request(req, skb); 1038 TCP_ECN_create_request(req, skb, sock_net(sk));
1031 1039
1032 treq->iif = sk->sk_bound_dev_if; 1040 treq->iif = sk->sk_bound_dev_if;
1033 1041
@@ -1108,6 +1116,7 @@ drop_and_release:
1108drop_and_free: 1116drop_and_free:
1109 reqsk_free(req); 1117 reqsk_free(req);
1110drop: 1118drop:
1119 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1111 return 0; /* don't send reset */ 1120 return 0; /* don't send reset */
1112} 1121}
1113 1122
@@ -1163,7 +1172,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1163 newnp->opt = NULL; 1172 newnp->opt = NULL;
1164 newnp->mcast_oif = inet6_iif(skb); 1173 newnp->mcast_oif = inet6_iif(skb);
1165 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1174 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1166 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb)); 1175 newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
1167 1176
1168 /* 1177 /*
1169 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1178 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -1243,7 +1252,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1243 newnp->opt = NULL; 1252 newnp->opt = NULL;
1244 newnp->mcast_oif = inet6_iif(skb); 1253 newnp->mcast_oif = inet6_iif(skb);
1245 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1254 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1246 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb)); 1255 newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
1247 1256
1248 /* Clone native IPv6 options from listening socket (if any) 1257 /* Clone native IPv6 options from listening socket (if any)
1249 1258
@@ -1288,7 +1297,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1288#endif 1297#endif
1289 1298
1290 if (__inet_inherit_port(sk, newsk) < 0) { 1299 if (__inet_inherit_port(sk, newsk) < 0) {
1291 sock_put(newsk); 1300 inet_csk_prepare_forced_close(newsk);
1301 tcp_done(newsk);
1292 goto out; 1302 goto out;
1293 } 1303 }
1294 __inet6_hash(newsk, NULL); 1304 __inet6_hash(newsk, NULL);
@@ -1455,7 +1465,7 @@ ipv6_pktoptions:
1455 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1465 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1456 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1466 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1457 if (np->rxopt.bits.rxtclass) 1467 if (np->rxopt.bits.rxtclass)
1458 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb)); 1468 np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
1459 if (ipv6_opt_accepted(sk, opt_skb)) { 1469 if (ipv6_opt_accepted(sk, opt_skb)) {
1460 skb_set_owner_r(opt_skb, sk); 1470 skb_set_owner_r(opt_skb, sk);
1461 opt_skb = xchg(&np->pktoptions, opt_skb); 1471 opt_skb = xchg(&np->pktoptions, opt_skb);
@@ -1597,6 +1607,7 @@ do_time_wait:
1597 struct sock *sk2; 1607 struct sock *sk2;
1598 1608
1599 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, 1609 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1610 &ipv6_hdr(skb)->saddr, th->source,
1600 &ipv6_hdr(skb)->daddr, 1611 &ipv6_hdr(skb)->daddr,
1601 ntohs(th->dest), inet6_iif(skb)); 1612 ntohs(th->dest), inet6_iif(skb));
1602 if (sk2 != NULL) { 1613 if (sk2 != NULL) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index dfaa29b8b293..599e1ba6d1ce 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -45,6 +45,7 @@
45#include <net/tcp_states.h> 45#include <net/tcp_states.h>
46#include <net/ip6_checksum.h> 46#include <net/ip6_checksum.h>
47#include <net/xfrm.h> 47#include <net/xfrm.h>
48#include <net/inet6_hashtables.h>
48 49
49#include <linux/proc_fs.h> 50#include <linux/proc_fs.h>
50#include <linux/seq_file.h> 51#include <linux/seq_file.h>
@@ -203,7 +204,8 @@ static struct sock *udp6_lib_lookup2(struct net *net,
203{ 204{
204 struct sock *sk, *result; 205 struct sock *sk, *result;
205 struct hlist_nulls_node *node; 206 struct hlist_nulls_node *node;
206 int score, badness; 207 int score, badness, matches = 0, reuseport = 0;
208 u32 hash = 0;
207 209
208begin: 210begin:
209 result = NULL; 211 result = NULL;
@@ -214,8 +216,18 @@ begin:
214 if (score > badness) { 216 if (score > badness) {
215 result = sk; 217 result = sk;
216 badness = score; 218 badness = score;
217 if (score == SCORE2_MAX) 219 reuseport = sk->sk_reuseport;
220 if (reuseport) {
221 hash = inet6_ehashfn(net, daddr, hnum,
222 saddr, sport);
223 matches = 1;
224 } else if (score == SCORE2_MAX)
218 goto exact_match; 225 goto exact_match;
226 } else if (score == badness && reuseport) {
227 matches++;
228 if (((u64)hash * matches) >> 32 == 0)
229 result = sk;
230 hash = next_pseudo_random32(hash);
219 } 231 }
220 } 232 }
221 /* 233 /*
@@ -249,7 +261,8 @@ struct sock *__udp6_lib_lookup(struct net *net,
249 unsigned short hnum = ntohs(dport); 261 unsigned short hnum = ntohs(dport);
250 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); 262 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
251 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; 263 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
252 int score, badness; 264 int score, badness, matches = 0, reuseport = 0;
265 u32 hash = 0;
253 266
254 rcu_read_lock(); 267 rcu_read_lock();
255 if (hslot->count > 10) { 268 if (hslot->count > 10) {
@@ -284,6 +297,17 @@ begin:
284 if (score > badness) { 297 if (score > badness) {
285 result = sk; 298 result = sk;
286 badness = score; 299 badness = score;
300 reuseport = sk->sk_reuseport;
301 if (reuseport) {
302 hash = inet6_ehashfn(net, daddr, hnum,
303 saddr, sport);
304 matches = 1;
305 }
306 } else if (score == badness && reuseport) {
307 matches++;
308 if (((u64)hash * matches) >> 32 == 0)
309 result = sk;
310 hash = next_pseudo_random32(hash);
287 } 311 }
288 } 312 }
289 /* 313 /*
@@ -443,7 +467,7 @@ try_again:
443 ip_cmsg_recv(msg, skb); 467 ip_cmsg_recv(msg, skb);
444 } else { 468 } else {
445 if (np->rxopt.all) 469 if (np->rxopt.all)
446 datagram_recv_ctl(sk, msg, skb); 470 ip6_datagram_recv_ctl(sk, msg, skb);
447 } 471 }
448 472
449 err = copied; 473 err = copied;
@@ -752,40 +776,6 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
752 return 0; 776 return 0;
753} 777}
754 778
755static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
756 int proto)
757{
758 int err;
759
760 UDP_SKB_CB(skb)->partial_cov = 0;
761 UDP_SKB_CB(skb)->cscov = skb->len;
762
763 if (proto == IPPROTO_UDPLITE) {
764 err = udplite_checksum_init(skb, uh);
765 if (err)
766 return err;
767 }
768
769 if (uh->check == 0) {
770 /* RFC 2460 section 8.1 says that we SHOULD log
771 this error. Well, it is reasonable.
772 */
773 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
774 return 1;
775 }
776 if (skb->ip_summed == CHECKSUM_COMPLETE &&
777 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
778 skb->len, proto, skb->csum))
779 skb->ip_summed = CHECKSUM_UNNECESSARY;
780
781 if (!skb_csum_unnecessary(skb))
782 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
783 &ipv6_hdr(skb)->daddr,
784 skb->len, proto, 0));
785
786 return 0;
787}
788
789int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 779int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
790 int proto) 780 int proto)
791{ 781{
@@ -1153,8 +1143,8 @@ do_udp_sendmsg:
1153 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1143 memset(opt, 0, sizeof(struct ipv6_txoptions));
1154 opt->tot_len = sizeof(*opt); 1144 opt->tot_len = sizeof(*opt);
1155 1145
1156 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1146 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1157 &hlimit, &tclass, &dontfrag); 1147 &hlimit, &tclass, &dontfrag);
1158 if (err < 0) { 1148 if (err < 0) {
1159 fl6_sock_release(flowlabel); 1149 fl6_sock_release(flowlabel);
1160 return err; 1150 return err;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 0c8934a317c2..cf05cf073c51 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -56,7 +56,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
56 /* Packet is from an untrusted source, reset gso_segs. */ 56 /* Packet is from an untrusted source, reset gso_segs. */
57 int type = skb_shinfo(skb)->gso_type; 57 int type = skb_shinfo(skb)->gso_type;
58 58
59 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || 59 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
60 SKB_GSO_GRE) ||
60 !(type & (SKB_GSO_UDP)))) 61 !(type & (SKB_GSO_UDP))))
61 goto out; 62 goto out;
62 63
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 9f2095b19ad0..9bf6a74a71d2 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -69,8 +69,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
69 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 69 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
70 goto out; 70 goto out;
71 71
72 if (skb_cloned(skb) && 72 err = skb_unclone(skb, GFP_ATOMIC);
73 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 73 if (err)
74 goto out; 74 goto out;
75 75
76 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 76 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index c9844135c9ca..4ef7bdb65440 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -110,7 +110,6 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
110 110
111 /* Sheit... I remember I did this right. Apparently, 111 /* Sheit... I remember I did this right. Apparently,
112 * it was magically lost, so this code needs audit */ 112 * it was magically lost, so this code needs audit */
113 xdst->u.rt6.n = neigh_clone(rt->n);
114 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | 113 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
115 RTF_LOCAL); 114 RTF_LOCAL);
116 xdst->u.rt6.rt6i_metric = rt->rt6i_metric; 115 xdst->u.rt6.rt6i_metric = rt->rt6i_metric;
@@ -321,7 +320,51 @@ static struct ctl_table xfrm6_policy_table[] = {
321 { } 320 { }
322}; 321};
323 322
324static struct ctl_table_header *sysctl_hdr; 323static int __net_init xfrm6_net_init(struct net *net)
324{
325 struct ctl_table *table;
326 struct ctl_table_header *hdr;
327
328 table = xfrm6_policy_table;
329 if (!net_eq(net, &init_net)) {
330 table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
331 if (!table)
332 goto err_alloc;
333
334 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
335 }
336
337 hdr = register_net_sysctl(net, "net/ipv6", table);
338 if (!hdr)
339 goto err_reg;
340
341 net->ipv6.sysctl.xfrm6_hdr = hdr;
342 return 0;
343
344err_reg:
345 if (!net_eq(net, &init_net))
346 kfree(table);
347err_alloc:
348 return -ENOMEM;
349}
350
351static void __net_exit xfrm6_net_exit(struct net *net)
352{
353 struct ctl_table *table;
354
355 if (net->ipv6.sysctl.xfrm6_hdr == NULL)
356 return;
357
358 table = net->ipv6.sysctl.xfrm6_hdr->ctl_table_arg;
359 unregister_net_sysctl_table(net->ipv6.sysctl.xfrm6_hdr);
360 if (!net_eq(net, &init_net))
361 kfree(table);
362}
363
364static struct pernet_operations xfrm6_net_ops = {
365 .init = xfrm6_net_init,
366 .exit = xfrm6_net_exit,
367};
325#endif 368#endif
326 369
327int __init xfrm6_init(void) 370int __init xfrm6_init(void)
@@ -340,8 +383,7 @@ int __init xfrm6_init(void)
340 goto out_policy; 383 goto out_policy;
341 384
342#ifdef CONFIG_SYSCTL 385#ifdef CONFIG_SYSCTL
343 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6", 386 register_pernet_subsys(&xfrm6_net_ops);
344 xfrm6_policy_table);
345#endif 387#endif
346out: 388out:
347 return ret; 389 return ret;
@@ -353,8 +395,7 @@ out_policy:
353void xfrm6_fini(void) 395void xfrm6_fini(void)
354{ 396{
355#ifdef CONFIG_SYSCTL 397#ifdef CONFIG_SYSCTL
356 if (sysctl_hdr) 398 unregister_pernet_subsys(&xfrm6_net_ops);
357 unregister_net_sysctl_table(sysctl_hdr);
358#endif 399#endif
359 xfrm6_policy_fini(); 400 xfrm6_policy_fini();
360 xfrm6_state_fini(); 401 xfrm6_state_fini();
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index ee5a7065aacc..6cc48012b730 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -72,7 +72,7 @@ static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *ad
72{ 72{
73 unsigned int h; 73 unsigned int h;
74 74
75 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]); 75 h = ipv6_addr_hash((const struct in6_addr *)addr);
76 h ^= h >> 16; 76 h ^= h >> 16;
77 h ^= h >> 8; 77 h ^= h >> 8;
78 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; 78 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
@@ -94,7 +94,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const
94 hlist_for_each_entry_rcu(x6spi, pos, 94 hlist_for_each_entry_rcu(x6spi, pos,
95 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 95 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
96 list_byaddr) { 96 list_byaddr) {
97 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) 97 if (xfrm6_addr_equal(&x6spi->addr, saddr))
98 return x6spi; 98 return x6spi;
99 } 99 }
100 100
@@ -211,7 +211,7 @@ static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
211 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 211 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
212 list_byaddr) 212 list_byaddr)
213 { 213 {
214 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 214 if (xfrm6_addr_equal(&x6spi->addr, saddr)) {
215 if (atomic_dec_and_test(&x6spi->refcnt)) { 215 if (atomic_dec_and_test(&x6spi->refcnt)) {
216 hlist_del_rcu(&x6spi->list_byaddr); 216 hlist_del_rcu(&x6spi->list_byaddr);
217 hlist_del_rcu(&x6spi->list_byspi); 217 hlist_del_rcu(&x6spi->list_byspi);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index b833677d83d6..d07e3a626446 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2567,8 +2567,7 @@ bed:
2567 err); 2567 err);
2568 2568
2569 /* If watchdog is still activated, kill it! */ 2569 /* If watchdog is still activated, kill it! */
2570 if(timer_pending(&(self->watchdog))) 2570 del_timer(&(self->watchdog));
2571 del_timer(&(self->watchdog));
2572 2571
2573 IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__); 2572 IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__);
2574 2573
diff --git a/net/irda/ircomm/Kconfig b/net/irda/ircomm/Kconfig
index 2d4c6b4a78d6..19492c1707b7 100644
--- a/net/irda/ircomm/Kconfig
+++ b/net/irda/ircomm/Kconfig
@@ -1,6 +1,6 @@
1config IRCOMM 1config IRCOMM
2 tristate "IrCOMM protocol" 2 tristate "IrCOMM protocol"
3 depends on IRDA 3 depends on IRDA && TTY
4 help 4 help
5 Say Y here if you want to build support for the IrCOMM protocol. 5 Say Y here if you want to build support for the IrCOMM protocol.
6 To compile it as modules, choose M here: the modules will be 6 To compile it as modules, choose M here: the modules will be
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index a68c88cdec6e..9a5fd3c3e530 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -452,7 +452,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
452 self->line, self->port.count); 452 self->line, self->port.count);
453 453
454 /* Not really used by us, but lets do it anyway */ 454 /* Not really used by us, but lets do it anyway */
455 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; 455 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
456 456
457 /* 457 /*
458 * If the port is the middle of closing, bail out now 458 * If the port is the middle of closing, bail out now
@@ -1136,14 +1136,14 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1136 ircomm_tty_send_initial_parameters(self); 1136 ircomm_tty_send_initial_parameters(self);
1137 ircomm_tty_link_established(self); 1137 ircomm_tty_link_established(self);
1138 } 1138 }
1139 tty_kref_put(tty);
1139 1140
1140 /* 1141 /*
1141 * Use flip buffer functions since the code may be called from interrupt 1142 * Use flip buffer functions since the code may be called from interrupt
1142 * context 1143 * context
1143 */ 1144 */
1144 tty_insert_flip_string(tty, skb->data, skb->len); 1145 tty_insert_flip_string(&self->port, skb->data, skb->len);
1145 tty_flip_buffer_push(tty); 1146 tty_flip_buffer_push(&self->port);
1146 tty_kref_put(tty);
1147 1147
1148 /* No need to kfree_skb - see ircomm_ttp_data_indication() */ 1148 /* No need to kfree_skb - see ircomm_ttp_data_indication() */
1149 1149
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 2bb2beb6a373..3c83a1e5ab03 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -214,8 +214,7 @@ irnet_get_discovery_log(irnet_socket * ap)
214 * After reading : discoveries = NULL ; disco_index = Y ; disco_number = -1 214 * After reading : discoveries = NULL ; disco_index = Y ; disco_number = -1
215 */ 215 */
216static inline int 216static inline int
217irnet_read_discovery_log(irnet_socket * ap, 217irnet_read_discovery_log(irnet_socket *ap, char *event, int buf_size)
218 char * event)
219{ 218{
220 int done_event = 0; 219 int done_event = 0;
221 220
@@ -237,12 +236,13 @@ irnet_read_discovery_log(irnet_socket * ap,
237 if(ap->disco_index < ap->disco_number) 236 if(ap->disco_index < ap->disco_number)
238 { 237 {
239 /* Write an event */ 238 /* Write an event */
240 sprintf(event, "Found %08x (%s) behind %08x {hints %02X-%02X}\n", 239 snprintf(event, buf_size,
241 ap->discoveries[ap->disco_index].daddr, 240 "Found %08x (%s) behind %08x {hints %02X-%02X}\n",
242 ap->discoveries[ap->disco_index].info, 241 ap->discoveries[ap->disco_index].daddr,
243 ap->discoveries[ap->disco_index].saddr, 242 ap->discoveries[ap->disco_index].info,
244 ap->discoveries[ap->disco_index].hints[0], 243 ap->discoveries[ap->disco_index].saddr,
245 ap->discoveries[ap->disco_index].hints[1]); 244 ap->discoveries[ap->disco_index].hints[0],
245 ap->discoveries[ap->disco_index].hints[1]);
246 DEBUG(CTRL_INFO, "Writing discovery %d : %s\n", 246 DEBUG(CTRL_INFO, "Writing discovery %d : %s\n",
247 ap->disco_index, ap->discoveries[ap->disco_index].info); 247 ap->disco_index, ap->discoveries[ap->disco_index].info);
248 248
@@ -282,27 +282,24 @@ irnet_ctrl_read(irnet_socket * ap,
282 size_t count) 282 size_t count)
283{ 283{
284 DECLARE_WAITQUEUE(wait, current); 284 DECLARE_WAITQUEUE(wait, current);
285 char event[64]; /* Max event is 61 char */ 285 char event[75];
286 ssize_t ret = 0; 286 ssize_t ret = 0;
287 287
288 DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); 288 DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count);
289 289
290 /* Check if we can write an event out in one go */
291 DABORT(count < sizeof(event), -EOVERFLOW, CTRL_ERROR, "Buffer to small.\n");
292
293#ifdef INITIAL_DISCOVERY 290#ifdef INITIAL_DISCOVERY
294 /* Check if we have read the log */ 291 /* Check if we have read the log */
295 if(irnet_read_discovery_log(ap, event)) 292 if (irnet_read_discovery_log(ap, event, sizeof(event)))
296 { 293 {
297 /* We have an event !!! Copy it to the user */ 294 count = min(strlen(event), count);
298 if(copy_to_user(buf, event, strlen(event))) 295 if (copy_to_user(buf, event, count))
299 { 296 {
300 DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); 297 DERROR(CTRL_ERROR, "Invalid user space pointer.\n");
301 return -EFAULT; 298 return -EFAULT;
302 } 299 }
303 300
304 DEXIT(CTRL_TRACE, "\n"); 301 DEXIT(CTRL_TRACE, "\n");
305 return strlen(event); 302 return count;
306 } 303 }
307#endif /* INITIAL_DISCOVERY */ 304#endif /* INITIAL_DISCOVERY */
308 305
@@ -339,79 +336,81 @@ irnet_ctrl_read(irnet_socket * ap,
339 switch(irnet_events.log[ap->event_index].event) 336 switch(irnet_events.log[ap->event_index].event)
340 { 337 {
341 case IRNET_DISCOVER: 338 case IRNET_DISCOVER:
342 sprintf(event, "Discovered %08x (%s) behind %08x {hints %02X-%02X}\n", 339 snprintf(event, sizeof(event),
343 irnet_events.log[ap->event_index].daddr, 340 "Discovered %08x (%s) behind %08x {hints %02X-%02X}\n",
344 irnet_events.log[ap->event_index].name, 341 irnet_events.log[ap->event_index].daddr,
345 irnet_events.log[ap->event_index].saddr, 342 irnet_events.log[ap->event_index].name,
346 irnet_events.log[ap->event_index].hints.byte[0], 343 irnet_events.log[ap->event_index].saddr,
347 irnet_events.log[ap->event_index].hints.byte[1]); 344 irnet_events.log[ap->event_index].hints.byte[0],
345 irnet_events.log[ap->event_index].hints.byte[1]);
348 break; 346 break;
349 case IRNET_EXPIRE: 347 case IRNET_EXPIRE:
350 sprintf(event, "Expired %08x (%s) behind %08x {hints %02X-%02X}\n", 348 snprintf(event, sizeof(event),
351 irnet_events.log[ap->event_index].daddr, 349 "Expired %08x (%s) behind %08x {hints %02X-%02X}\n",
352 irnet_events.log[ap->event_index].name, 350 irnet_events.log[ap->event_index].daddr,
353 irnet_events.log[ap->event_index].saddr, 351 irnet_events.log[ap->event_index].name,
354 irnet_events.log[ap->event_index].hints.byte[0], 352 irnet_events.log[ap->event_index].saddr,
355 irnet_events.log[ap->event_index].hints.byte[1]); 353 irnet_events.log[ap->event_index].hints.byte[0],
354 irnet_events.log[ap->event_index].hints.byte[1]);
356 break; 355 break;
357 case IRNET_CONNECT_TO: 356 case IRNET_CONNECT_TO:
358 sprintf(event, "Connected to %08x (%s) on ppp%d\n", 357 snprintf(event, sizeof(event), "Connected to %08x (%s) on ppp%d\n",
359 irnet_events.log[ap->event_index].daddr, 358 irnet_events.log[ap->event_index].daddr,
360 irnet_events.log[ap->event_index].name, 359 irnet_events.log[ap->event_index].name,
361 irnet_events.log[ap->event_index].unit); 360 irnet_events.log[ap->event_index].unit);
362 break; 361 break;
363 case IRNET_CONNECT_FROM: 362 case IRNET_CONNECT_FROM:
364 sprintf(event, "Connection from %08x (%s) on ppp%d\n", 363 snprintf(event, sizeof(event), "Connection from %08x (%s) on ppp%d\n",
365 irnet_events.log[ap->event_index].daddr, 364 irnet_events.log[ap->event_index].daddr,
366 irnet_events.log[ap->event_index].name, 365 irnet_events.log[ap->event_index].name,
367 irnet_events.log[ap->event_index].unit); 366 irnet_events.log[ap->event_index].unit);
368 break; 367 break;
369 case IRNET_REQUEST_FROM: 368 case IRNET_REQUEST_FROM:
370 sprintf(event, "Request from %08x (%s) behind %08x\n", 369 snprintf(event, sizeof(event), "Request from %08x (%s) behind %08x\n",
371 irnet_events.log[ap->event_index].daddr, 370 irnet_events.log[ap->event_index].daddr,
372 irnet_events.log[ap->event_index].name, 371 irnet_events.log[ap->event_index].name,
373 irnet_events.log[ap->event_index].saddr); 372 irnet_events.log[ap->event_index].saddr);
374 break; 373 break;
375 case IRNET_NOANSWER_FROM: 374 case IRNET_NOANSWER_FROM:
376 sprintf(event, "No-answer from %08x (%s) on ppp%d\n", 375 snprintf(event, sizeof(event), "No-answer from %08x (%s) on ppp%d\n",
377 irnet_events.log[ap->event_index].daddr, 376 irnet_events.log[ap->event_index].daddr,
378 irnet_events.log[ap->event_index].name, 377 irnet_events.log[ap->event_index].name,
379 irnet_events.log[ap->event_index].unit); 378 irnet_events.log[ap->event_index].unit);
380 break; 379 break;
381 case IRNET_BLOCKED_LINK: 380 case IRNET_BLOCKED_LINK:
382 sprintf(event, "Blocked link with %08x (%s) on ppp%d\n", 381 snprintf(event, sizeof(event), "Blocked link with %08x (%s) on ppp%d\n",
383 irnet_events.log[ap->event_index].daddr, 382 irnet_events.log[ap->event_index].daddr,
384 irnet_events.log[ap->event_index].name, 383 irnet_events.log[ap->event_index].name,
385 irnet_events.log[ap->event_index].unit); 384 irnet_events.log[ap->event_index].unit);
386 break; 385 break;
387 case IRNET_DISCONNECT_FROM: 386 case IRNET_DISCONNECT_FROM:
388 sprintf(event, "Disconnection from %08x (%s) on ppp%d\n", 387 snprintf(event, sizeof(event), "Disconnection from %08x (%s) on ppp%d\n",
389 irnet_events.log[ap->event_index].daddr, 388 irnet_events.log[ap->event_index].daddr,
390 irnet_events.log[ap->event_index].name, 389 irnet_events.log[ap->event_index].name,
391 irnet_events.log[ap->event_index].unit); 390 irnet_events.log[ap->event_index].unit);
392 break; 391 break;
393 case IRNET_DISCONNECT_TO: 392 case IRNET_DISCONNECT_TO:
394 sprintf(event, "Disconnected to %08x (%s)\n", 393 snprintf(event, sizeof(event), "Disconnected to %08x (%s)\n",
395 irnet_events.log[ap->event_index].daddr, 394 irnet_events.log[ap->event_index].daddr,
396 irnet_events.log[ap->event_index].name); 395 irnet_events.log[ap->event_index].name);
397 break; 396 break;
398 default: 397 default:
399 sprintf(event, "Bug\n"); 398 snprintf(event, sizeof(event), "Bug\n");
400 } 399 }
401 /* Increment our event index */ 400 /* Increment our event index */
402 ap->event_index = (ap->event_index + 1) % IRNET_MAX_EVENTS; 401 ap->event_index = (ap->event_index + 1) % IRNET_MAX_EVENTS;
403 402
404 DEBUG(CTRL_INFO, "Event is :%s", event); 403 DEBUG(CTRL_INFO, "Event is :%s", event);
405 404
406 /* Copy it to the user */ 405 count = min(strlen(event), count);
407 if(copy_to_user(buf, event, strlen(event))) 406 if (copy_to_user(buf, event, count))
408 { 407 {
409 DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); 408 DERROR(CTRL_ERROR, "Invalid user space pointer.\n");
410 return -EFAULT; 409 return -EFAULT;
411 } 410 }
412 411
413 DEXIT(CTRL_TRACE, "\n"); 412 DEXIT(CTRL_TRACE, "\n");
414 return strlen(event); 413 return count;
415} 414}
416 415
417/*------------------------------------------------------------------*/ 416/*------------------------------------------------------------------*/
diff --git a/net/irda/timer.c b/net/irda/timer.c
index 1d552b3946fc..0c4c115a5cab 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -57,7 +57,7 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
57 * Basically, we multiply the number of remaining slots by our 57 * Basically, we multiply the number of remaining slots by our
58 * slot time, plus add some extra time to properly receive the last 58 * slot time, plus add some extra time to properly receive the last
59 * discovery packet (which is longer due to extra discovery info), 59 * discovery packet (which is longer due to extra discovery info),
60 * to avoid messing with for incomming connections requests and 60 * to avoid messing with for incoming connections requests and
61 * to accommodate devices that perform discovery slower than us. 61 * to accommodate devices that perform discovery slower than us.
62 * Jean II */ 62 * Jean II */
63 timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s) 63 timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s)
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3ad1f9db5f8b..4fe76ff214c2 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -831,8 +831,11 @@ static int iucv_reboot_event(struct notifier_block *this,
831{ 831{
832 int i; 832 int i;
833 833
834 if (cpumask_empty(&iucv_irq_cpumask))
835 return NOTIFY_DONE;
836
834 get_online_cpus(); 837 get_online_cpus();
835 on_each_cpu(iucv_block_cpu, NULL, 1); 838 on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
836 preempt_disable(); 839 preempt_disable();
837 for (i = 0; i < iucv_max_pathid; i++) { 840 for (i = 0; i < iucv_max_pathid; i++) {
838 if (iucv_path_table[i]) 841 if (iucv_path_table[i])
@@ -1806,7 +1809,7 @@ static void iucv_external_interrupt(struct ext_code ext_code,
1806 struct iucv_irq_data *p; 1809 struct iucv_irq_data *p;
1807 struct iucv_irq_list *work; 1810 struct iucv_irq_list *work;
1808 1811
1809 kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++; 1812 inc_irq_stat(IRQEXT_IUC);
1810 p = iucv_irq_data[smp_processor_id()]; 1813 p = iucv_irq_data[smp_processor_id()];
1811 if (p->ippathid >= iucv_max_pathid) { 1814 if (p->ippathid >= iucv_max_pathid) {
1812 WARN_ON(p->ippathid >= iucv_max_pathid); 1815 WARN_ON(p->ippathid >= iucv_max_pathid);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5b426a646544..9ef79851f297 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -203,7 +203,6 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
203 } 203 }
204 if (*skb2 != NULL) { 204 if (*skb2 != NULL) {
205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
206 skb_orphan(*skb2);
207 skb_set_owner_r(*skb2, sk); 206 skb_set_owner_r(*skb2, sk);
208 skb_queue_tail(&sk->sk_receive_queue, *skb2); 207 skb_queue_tail(&sk->sk_receive_queue, *skb2);
209 sk->sk_data_ready(sk, (*skb2)->len); 208 sk->sk_data_ready(sk, (*skb2)->len);
@@ -762,7 +761,7 @@ static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
762 } 761 }
763 762
764 /* identity & sensitivity */ 763 /* identity & sensitivity */
765 if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr, x->props.family)) 764 if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr, x->props.family))
766 size += sizeof(struct sadb_address) + sockaddr_size; 765 size += sizeof(struct sadb_address) + sockaddr_size;
767 766
768 if (add_keys) { 767 if (add_keys) {
@@ -816,18 +815,21 @@ static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
816 sa->sadb_sa_auth = 0; 815 sa->sadb_sa_auth = 0;
817 if (x->aalg) { 816 if (x->aalg) {
818 struct xfrm_algo_desc *a = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 817 struct xfrm_algo_desc *a = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
819 sa->sadb_sa_auth = a ? a->desc.sadb_alg_id : 0; 818 sa->sadb_sa_auth = (a && a->pfkey_supported) ?
819 a->desc.sadb_alg_id : 0;
820 } 820 }
821 sa->sadb_sa_encrypt = 0; 821 sa->sadb_sa_encrypt = 0;
822 BUG_ON(x->ealg && x->calg); 822 BUG_ON(x->ealg && x->calg);
823 if (x->ealg) { 823 if (x->ealg) {
824 struct xfrm_algo_desc *a = xfrm_ealg_get_byname(x->ealg->alg_name, 0); 824 struct xfrm_algo_desc *a = xfrm_ealg_get_byname(x->ealg->alg_name, 0);
825 sa->sadb_sa_encrypt = a ? a->desc.sadb_alg_id : 0; 825 sa->sadb_sa_encrypt = (a && a->pfkey_supported) ?
826 a->desc.sadb_alg_id : 0;
826 } 827 }
827 /* KAME compatible: sadb_sa_encrypt is overloaded with calg id */ 828 /* KAME compatible: sadb_sa_encrypt is overloaded with calg id */
828 if (x->calg) { 829 if (x->calg) {
829 struct xfrm_algo_desc *a = xfrm_calg_get_byname(x->calg->alg_name, 0); 830 struct xfrm_algo_desc *a = xfrm_calg_get_byname(x->calg->alg_name, 0);
830 sa->sadb_sa_encrypt = a ? a->desc.sadb_alg_id : 0; 831 sa->sadb_sa_encrypt = (a && a->pfkey_supported) ?
832 a->desc.sadb_alg_id : 0;
831 } 833 }
832 834
833 sa->sadb_sa_flags = 0; 835 sa->sadb_sa_flags = 0;
@@ -909,8 +911,8 @@ static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
909 if (!addr->sadb_address_prefixlen) 911 if (!addr->sadb_address_prefixlen)
910 BUG(); 912 BUG();
911 913
912 if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr, 914 if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr,
913 x->props.family)) { 915 x->props.family)) {
914 addr = (struct sadb_address*) skb_put(skb, 916 addr = (struct sadb_address*) skb_put(skb,
915 sizeof(struct sadb_address)+sockaddr_size); 917 sizeof(struct sadb_address)+sockaddr_size);
916 addr->sadb_address_len = 918 addr->sadb_address_len =
@@ -1138,7 +1140,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1138 if (sa->sadb_sa_auth) { 1140 if (sa->sadb_sa_auth) {
1139 int keysize = 0; 1141 int keysize = 0;
1140 struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth); 1142 struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
1141 if (!a) { 1143 if (!a || !a->pfkey_supported) {
1142 err = -ENOSYS; 1144 err = -ENOSYS;
1143 goto out; 1145 goto out;
1144 } 1146 }
@@ -1160,7 +1162,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1160 if (sa->sadb_sa_encrypt) { 1162 if (sa->sadb_sa_encrypt) {
1161 if (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP) { 1163 if (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP) {
1162 struct xfrm_algo_desc *a = xfrm_calg_get_byid(sa->sadb_sa_encrypt); 1164 struct xfrm_algo_desc *a = xfrm_calg_get_byid(sa->sadb_sa_encrypt);
1163 if (!a) { 1165 if (!a || !a->pfkey_supported) {
1164 err = -ENOSYS; 1166 err = -ENOSYS;
1165 goto out; 1167 goto out;
1166 } 1168 }
@@ -1172,7 +1174,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1172 } else { 1174 } else {
1173 int keysize = 0; 1175 int keysize = 0;
1174 struct xfrm_algo_desc *a = xfrm_ealg_get_byid(sa->sadb_sa_encrypt); 1176 struct xfrm_algo_desc *a = xfrm_ealg_get_byid(sa->sadb_sa_encrypt);
1175 if (!a) { 1177 if (!a || !a->pfkey_supported) {
1176 err = -ENOSYS; 1178 err = -ENOSYS;
1177 goto out; 1179 goto out;
1178 } 1180 }
@@ -1321,7 +1323,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
1321 1323
1322 if (hdr->sadb_msg_seq) { 1324 if (hdr->sadb_msg_seq) {
1323 x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq); 1325 x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
1324 if (x && xfrm_addr_cmp(&x->id.daddr, xdaddr, family)) { 1326 if (x && !xfrm_addr_equal(&x->id.daddr, xdaddr, family)) {
1325 xfrm_state_put(x); 1327 xfrm_state_put(x);
1326 x = NULL; 1328 x = NULL;
1327 } 1329 }
@@ -1578,13 +1580,13 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
1578 struct sadb_msg *hdr; 1580 struct sadb_msg *hdr;
1579 int len, auth_len, enc_len, i; 1581 int len, auth_len, enc_len, i;
1580 1582
1581 auth_len = xfrm_count_auth_supported(); 1583 auth_len = xfrm_count_pfkey_auth_supported();
1582 if (auth_len) { 1584 if (auth_len) {
1583 auth_len *= sizeof(struct sadb_alg); 1585 auth_len *= sizeof(struct sadb_alg);
1584 auth_len += sizeof(struct sadb_supported); 1586 auth_len += sizeof(struct sadb_supported);
1585 } 1587 }
1586 1588
1587 enc_len = xfrm_count_enc_supported(); 1589 enc_len = xfrm_count_pfkey_enc_supported();
1588 if (enc_len) { 1590 if (enc_len) {
1589 enc_len *= sizeof(struct sadb_alg); 1591 enc_len *= sizeof(struct sadb_alg);
1590 enc_len += sizeof(struct sadb_supported); 1592 enc_len += sizeof(struct sadb_supported);
@@ -1615,6 +1617,8 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
1615 struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i); 1617 struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
1616 if (!aalg) 1618 if (!aalg)
1617 break; 1619 break;
1620 if (!aalg->pfkey_supported)
1621 continue;
1618 if (aalg->available) 1622 if (aalg->available)
1619 *ap++ = aalg->desc; 1623 *ap++ = aalg->desc;
1620 } 1624 }
@@ -1634,6 +1638,8 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
1634 struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i); 1638 struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
1635 if (!ealg) 1639 if (!ealg)
1636 break; 1640 break;
1641 if (!ealg->pfkey_supported)
1642 continue;
1637 if (ealg->available) 1643 if (ealg->available)
1638 *ap++ = ealg->desc; 1644 *ap++ = ealg->desc;
1639 } 1645 }
@@ -2825,6 +2831,8 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
2825 const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i); 2831 const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
2826 if (!aalg) 2832 if (!aalg)
2827 break; 2833 break;
2834 if (!aalg->pfkey_supported)
2835 continue;
2828 if (aalg_tmpl_set(t, aalg) && aalg->available) 2836 if (aalg_tmpl_set(t, aalg) && aalg->available)
2829 sz += sizeof(struct sadb_comb); 2837 sz += sizeof(struct sadb_comb);
2830 } 2838 }
@@ -2840,6 +2848,9 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
2840 if (!ealg) 2848 if (!ealg)
2841 break; 2849 break;
2842 2850
2851 if (!ealg->pfkey_supported)
2852 continue;
2853
2843 if (!(ealg_tmpl_set(t, ealg) && ealg->available)) 2854 if (!(ealg_tmpl_set(t, ealg) && ealg->available))
2844 continue; 2855 continue;
2845 2856
@@ -2848,6 +2859,9 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
2848 if (!aalg) 2859 if (!aalg)
2849 break; 2860 break;
2850 2861
2862 if (!aalg->pfkey_supported)
2863 continue;
2864
2851 if (aalg_tmpl_set(t, aalg) && aalg->available) 2865 if (aalg_tmpl_set(t, aalg) && aalg->available)
2852 sz += sizeof(struct sadb_comb); 2866 sz += sizeof(struct sadb_comb);
2853 } 2867 }
@@ -2871,6 +2885,9 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
2871 if (!aalg) 2885 if (!aalg)
2872 break; 2886 break;
2873 2887
2888 if (!aalg->pfkey_supported)
2889 continue;
2890
2874 if (aalg_tmpl_set(t, aalg) && aalg->available) { 2891 if (aalg_tmpl_set(t, aalg) && aalg->available) {
2875 struct sadb_comb *c; 2892 struct sadb_comb *c;
2876 c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb)); 2893 c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
@@ -2903,6 +2920,9 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
2903 if (!ealg) 2920 if (!ealg)
2904 break; 2921 break;
2905 2922
2923 if (!ealg->pfkey_supported)
2924 continue;
2925
2906 if (!(ealg_tmpl_set(t, ealg) && ealg->available)) 2926 if (!(ealg_tmpl_set(t, ealg) && ealg->available))
2907 continue; 2927 continue;
2908 2928
@@ -2911,6 +2931,8 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
2911 const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k); 2931 const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
2912 if (!aalg) 2932 if (!aalg)
2913 break; 2933 break;
2934 if (!aalg->pfkey_supported)
2935 continue;
2914 if (!(aalg_tmpl_set(t, aalg) && aalg->available)) 2936 if (!(aalg_tmpl_set(t, aalg) && aalg->available))
2915 continue; 2937 continue;
2916 c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb)); 2938 c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
@@ -3718,7 +3740,7 @@ static int __net_init pfkey_init_proc(struct net *net)
3718{ 3740{
3719 struct proc_dir_entry *e; 3741 struct proc_dir_entry *e;
3720 3742
3721 e = proc_net_fops_create(net, "pfkey", 0, &pfkey_proc_ops); 3743 e = proc_create("pfkey", 0, net->proc_net, &pfkey_proc_ops);
3722 if (e == NULL) 3744 if (e == NULL)
3723 return -ENOMEM; 3745 return -ENOMEM;
3724 3746
@@ -3727,7 +3749,7 @@ static int __net_init pfkey_init_proc(struct net *net)
3727 3749
3728static void __net_exit pfkey_exit_proc(struct net *net) 3750static void __net_exit pfkey_exit_proc(struct net *net)
3729{ 3751{
3730 proc_net_remove(net, "pfkey"); 3752 remove_proc_entry("pfkey", net->proc_net);
3731} 3753}
3732#else 3754#else
3733static inline int pfkey_init_proc(struct net *net) 3755static inline int pfkey_init_proc(struct net *net)
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig
index 147a8fd47a17..adb9843dd7cf 100644
--- a/net/l2tp/Kconfig
+++ b/net/l2tp/Kconfig
@@ -46,8 +46,8 @@ config L2TP_DEBUGFS
46 will be called l2tp_debugfs. 46 will be called l2tp_debugfs.
47 47
48config L2TP_V3 48config L2TP_V3
49 bool "L2TPv3 support (EXPERIMENTAL)" 49 bool "L2TPv3 support"
50 depends on EXPERIMENTAL && L2TP 50 depends on L2TP
51 help 51 help
52 Layer Two Tunneling Protocol Version 3 52 Layer Two Tunneling Protocol Version 3
53 53
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1a9f3723c13c..dcfd64e83ab7 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -101,6 +101,7 @@ struct l2tp_skb_cb {
101 101
102static atomic_t l2tp_tunnel_count; 102static atomic_t l2tp_tunnel_count;
103static atomic_t l2tp_session_count; 103static atomic_t l2tp_session_count;
104static struct workqueue_struct *l2tp_wq;
104 105
105/* per-net private data for this module */ 106/* per-net private data for this module */
106static unsigned int l2tp_net_id; 107static unsigned int l2tp_net_id;
@@ -122,7 +123,6 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net)
122 return net_generic(net, l2tp_net_id); 123 return net_generic(net, l2tp_net_id);
123} 124}
124 125
125
126/* Tunnel reference counts. Incremented per session that is added to 126/* Tunnel reference counts. Incremented per session that is added to
127 * the tunnel. 127 * the tunnel.
128 */ 128 */
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
168 168
169} 169}
170 170
171/* Lookup the tunnel socket, possibly involving the fs code if the socket is
172 * owned by userspace. A struct sock returned from this function must be
173 * released using l2tp_tunnel_sock_put once you're done with it.
174 */
175struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
176{
177 int err = 0;
178 struct socket *sock = NULL;
179 struct sock *sk = NULL;
180
181 if (!tunnel)
182 goto out;
183
184 if (tunnel->fd >= 0) {
185 /* Socket is owned by userspace, who might be in the process
186 * of closing it. Look the socket up using the fd to ensure
187 * consistency.
188 */
189 sock = sockfd_lookup(tunnel->fd, &err);
190 if (sock)
191 sk = sock->sk;
192 } else {
193 /* Socket is owned by kernelspace */
194 sk = tunnel->sock;
195 }
196
197out:
198 return sk;
199}
200EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
201
202/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
203void l2tp_tunnel_sock_put(struct sock *sk)
204{
205 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
206 if (tunnel) {
207 if (tunnel->fd >= 0) {
208 /* Socket is owned by userspace */
209 sockfd_put(sk->sk_socket);
210 }
211 sock_put(sk);
212 }
213}
214EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
215
171/* Lookup a session by id in the global session list 216/* Lookup a session by id in the global session list
172 */ 217 */
173static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) 218static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1123 struct udphdr *uh; 1168 struct udphdr *uh;
1124 struct inet_sock *inet; 1169 struct inet_sock *inet;
1125 __wsum csum; 1170 __wsum csum;
1126 int old_headroom;
1127 int new_headroom;
1128 int headroom; 1171 int headroom;
1129 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1172 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1130 int udp_len; 1173 int udp_len;
@@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1136 */ 1179 */
1137 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1180 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1138 uhlen + hdr_len; 1181 uhlen + hdr_len;
1139 old_headroom = skb_headroom(skb);
1140 if (skb_cow_head(skb, headroom)) { 1182 if (skb_cow_head(skb, headroom)) {
1141 kfree_skb(skb); 1183 kfree_skb(skb);
1142 return NET_XMIT_DROP; 1184 return NET_XMIT_DROP;
1143 } 1185 }
1144 1186
1145 new_headroom = skb_headroom(skb);
1146 skb_orphan(skb); 1187 skb_orphan(skb);
1147 skb->truesize += new_headroom - old_headroom;
1148
1149 /* Setup L2TP header */ 1188 /* Setup L2TP header */
1150 session->build_header(session, __skb_push(skb, hdr_len)); 1189 session->build_header(session, __skb_push(skb, hdr_len));
1151 1190
@@ -1232,6 +1271,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1232static void l2tp_tunnel_destruct(struct sock *sk) 1271static void l2tp_tunnel_destruct(struct sock *sk)
1233{ 1272{
1234 struct l2tp_tunnel *tunnel; 1273 struct l2tp_tunnel *tunnel;
1274 struct l2tp_net *pn;
1235 1275
1236 tunnel = sk->sk_user_data; 1276 tunnel = sk->sk_user_data;
1237 if (tunnel == NULL) 1277 if (tunnel == NULL)
@@ -1239,9 +1279,8 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1239 1279
1240 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); 1280 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1241 1281
1242 /* Close all sessions */
1243 l2tp_tunnel_closeall(tunnel);
1244 1282
1283 /* Disable udp encapsulation */
1245 switch (tunnel->encap) { 1284 switch (tunnel->encap) {
1246 case L2TP_ENCAPTYPE_UDP: 1285 case L2TP_ENCAPTYPE_UDP:
1247 /* No longer an encapsulation socket. See net/ipv4/udp.c */ 1286 /* No longer an encapsulation socket. See net/ipv4/udp.c */
@@ -1253,17 +1292,23 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1253 } 1292 }
1254 1293
1255 /* Remove hooks into tunnel socket */ 1294 /* Remove hooks into tunnel socket */
1256 tunnel->sock = NULL;
1257 sk->sk_destruct = tunnel->old_sk_destruct; 1295 sk->sk_destruct = tunnel->old_sk_destruct;
1258 sk->sk_user_data = NULL; 1296 sk->sk_user_data = NULL;
1297 tunnel->sock = NULL;
1259 1298
1260 /* Call the original destructor */ 1299 /* Remove the tunnel struct from the tunnel list */
1261 if (sk->sk_destruct) 1300 pn = l2tp_pernet(tunnel->l2tp_net);
1262 (*sk->sk_destruct)(sk); 1301 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1302 list_del_rcu(&tunnel->list);
1303 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1304 atomic_dec(&l2tp_tunnel_count);
1263 1305
1264 /* We're finished with the socket */ 1306 l2tp_tunnel_closeall(tunnel);
1265 l2tp_tunnel_dec_refcount(tunnel); 1307 l2tp_tunnel_dec_refcount(tunnel);
1266 1308
1309 /* Call the original destructor */
1310 if (sk->sk_destruct)
1311 (*sk->sk_destruct)(sk);
1267end: 1312end:
1268 return; 1313 return;
1269} 1314}
@@ -1337,48 +1382,77 @@ again:
1337 */ 1382 */
1338static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) 1383static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1339{ 1384{
1340 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1341
1342 BUG_ON(atomic_read(&tunnel->ref_count) != 0); 1385 BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1343 BUG_ON(tunnel->sock != NULL); 1386 BUG_ON(tunnel->sock != NULL);
1344
1345 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); 1387 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1346
1347 /* Remove from tunnel list */
1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1349 list_del_rcu(&tunnel->list);
1350 kfree_rcu(tunnel, rcu); 1388 kfree_rcu(tunnel, rcu);
1351 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1389}
1352 1390
1353 atomic_dec(&l2tp_tunnel_count); 1391/* Workqueue tunnel deletion function */
1392static void l2tp_tunnel_del_work(struct work_struct *work)
1393{
1394 struct l2tp_tunnel *tunnel = NULL;
1395 struct socket *sock = NULL;
1396 struct sock *sk = NULL;
1397
1398 tunnel = container_of(work, struct l2tp_tunnel, del_work);
1399 sk = l2tp_tunnel_sock_lookup(tunnel);
1400 if (!sk)
1401 return;
1402
1403 sock = sk->sk_socket;
1404 BUG_ON(!sock);
1405
1406 /* If the tunnel socket was created directly by the kernel, use the
1407 * sk_* API to release the socket now. Otherwise go through the
1408 * inet_* layer to shut the socket down, and let userspace close it.
1409 * In either case the tunnel resources are freed in the socket
1410 * destructor when the tunnel socket goes away.
1411 */
1412 if (sock->file == NULL) {
1413 kernel_sock_shutdown(sock, SHUT_RDWR);
1414 sk_release_kernel(sk);
1415 } else {
1416 inet_shutdown(sock, 2);
1417 }
1418
1419 l2tp_tunnel_sock_put(sk);
1354} 1420}
1355 1421
1356/* Create a socket for the tunnel, if one isn't set up by 1422/* Create a socket for the tunnel, if one isn't set up by
1357 * userspace. This is used for static tunnels where there is no 1423 * userspace. This is used for static tunnels where there is no
1358 * managing L2TP daemon. 1424 * managing L2TP daemon.
1425 *
1426 * Since we don't want these sockets to keep a namespace alive by
1427 * themselves, we drop the socket's namespace refcount after creation.
1428 * These sockets are freed when the namespace exits using the pernet
1429 * exit hook.
1359 */ 1430 */
1360static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp) 1431static int l2tp_tunnel_sock_create(struct net *net,
1432 u32 tunnel_id,
1433 u32 peer_tunnel_id,
1434 struct l2tp_tunnel_cfg *cfg,
1435 struct socket **sockp)
1361{ 1436{
1362 int err = -EINVAL; 1437 int err = -EINVAL;
1363 struct sockaddr_in udp_addr; 1438 struct socket *sock = NULL;
1439 struct sockaddr_in udp_addr = {0};
1440 struct sockaddr_l2tpip ip_addr = {0};
1364#if IS_ENABLED(CONFIG_IPV6) 1441#if IS_ENABLED(CONFIG_IPV6)
1365 struct sockaddr_in6 udp6_addr; 1442 struct sockaddr_in6 udp6_addr = {0};
1366 struct sockaddr_l2tpip6 ip6_addr; 1443 struct sockaddr_l2tpip6 ip6_addr = {0};
1367#endif 1444#endif
1368 struct sockaddr_l2tpip ip_addr;
1369 struct socket *sock = NULL;
1370 1445
1371 switch (cfg->encap) { 1446 switch (cfg->encap) {
1372 case L2TP_ENCAPTYPE_UDP: 1447 case L2TP_ENCAPTYPE_UDP:
1373#if IS_ENABLED(CONFIG_IPV6) 1448#if IS_ENABLED(CONFIG_IPV6)
1374 if (cfg->local_ip6 && cfg->peer_ip6) { 1449 if (cfg->local_ip6 && cfg->peer_ip6) {
1375 err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp); 1450 err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
1376 if (err < 0) 1451 if (err < 0)
1377 goto out; 1452 goto out;
1378 1453
1379 sock = *sockp; 1454 sk_change_net(sock->sk, net);
1380 1455
1381 memset(&udp6_addr, 0, sizeof(udp6_addr));
1382 udp6_addr.sin6_family = AF_INET6; 1456 udp6_addr.sin6_family = AF_INET6;
1383 memcpy(&udp6_addr.sin6_addr, cfg->local_ip6, 1457 memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1384 sizeof(udp6_addr.sin6_addr)); 1458 sizeof(udp6_addr.sin6_addr));
@@ -1400,13 +1474,12 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1400 } else 1474 } else
1401#endif 1475#endif
1402 { 1476 {
1403 err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); 1477 err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
1404 if (err < 0) 1478 if (err < 0)
1405 goto out; 1479 goto out;
1406 1480
1407 sock = *sockp; 1481 sk_change_net(sock->sk, net);
1408 1482
1409 memset(&udp_addr, 0, sizeof(udp_addr));
1410 udp_addr.sin_family = AF_INET; 1483 udp_addr.sin_family = AF_INET;
1411 udp_addr.sin_addr = cfg->local_ip; 1484 udp_addr.sin_addr = cfg->local_ip;
1412 udp_addr.sin_port = htons(cfg->local_udp_port); 1485 udp_addr.sin_port = htons(cfg->local_udp_port);
@@ -1433,14 +1506,13 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1433 case L2TP_ENCAPTYPE_IP: 1506 case L2TP_ENCAPTYPE_IP:
1434#if IS_ENABLED(CONFIG_IPV6) 1507#if IS_ENABLED(CONFIG_IPV6)
1435 if (cfg->local_ip6 && cfg->peer_ip6) { 1508 if (cfg->local_ip6 && cfg->peer_ip6) {
1436 err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP, 1509 err = sock_create_kern(AF_INET6, SOCK_DGRAM,
1437 sockp); 1510 IPPROTO_L2TP, &sock);
1438 if (err < 0) 1511 if (err < 0)
1439 goto out; 1512 goto out;
1440 1513
1441 sock = *sockp; 1514 sk_change_net(sock->sk, net);
1442 1515
1443 memset(&ip6_addr, 0, sizeof(ip6_addr));
1444 ip6_addr.l2tp_family = AF_INET6; 1516 ip6_addr.l2tp_family = AF_INET6;
1445 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6, 1517 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1446 sizeof(ip6_addr.l2tp_addr)); 1518 sizeof(ip6_addr.l2tp_addr));
@@ -1462,14 +1534,13 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1462 } else 1534 } else
1463#endif 1535#endif
1464 { 1536 {
1465 err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, 1537 err = sock_create_kern(AF_INET, SOCK_DGRAM,
1466 sockp); 1538 IPPROTO_L2TP, &sock);
1467 if (err < 0) 1539 if (err < 0)
1468 goto out; 1540 goto out;
1469 1541
1470 sock = *sockp; 1542 sk_change_net(sock->sk, net);
1471 1543
1472 memset(&ip_addr, 0, sizeof(ip_addr));
1473 ip_addr.l2tp_family = AF_INET; 1544 ip_addr.l2tp_family = AF_INET;
1474 ip_addr.l2tp_addr = cfg->local_ip; 1545 ip_addr.l2tp_addr = cfg->local_ip;
1475 ip_addr.l2tp_conn_id = tunnel_id; 1546 ip_addr.l2tp_conn_id = tunnel_id;
@@ -1493,8 +1564,10 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1493 } 1564 }
1494 1565
1495out: 1566out:
1567 *sockp = sock;
1496 if ((err < 0) && sock) { 1568 if ((err < 0) && sock) {
1497 sock_release(sock); 1569 kernel_sock_shutdown(sock, SHUT_RDWR);
1570 sk_release_kernel(sock->sk);
1498 *sockp = NULL; 1571 *sockp = NULL;
1499 } 1572 }
1500 1573
@@ -1517,15 +1590,23 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1517 * kernel socket. 1590 * kernel socket.
1518 */ 1591 */
1519 if (fd < 0) { 1592 if (fd < 0) {
1520 err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock); 1593 err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1594 cfg, &sock);
1521 if (err < 0) 1595 if (err < 0)
1522 goto err; 1596 goto err;
1523 } else { 1597 } else {
1524 err = -EBADF;
1525 sock = sockfd_lookup(fd, &err); 1598 sock = sockfd_lookup(fd, &err);
1526 if (!sock) { 1599 if (!sock) {
1527 pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n", 1600 pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1528 tunnel_id, fd, err); 1601 tunnel_id, fd, err);
1602 err = -EBADF;
1603 goto err;
1604 }
1605
1606 /* Reject namespace mismatches */
1607 if (!net_eq(sock_net(sock->sk), net)) {
1608 pr_err("tunl %u: netns mismatch\n", tunnel_id);
1609 err = -EINVAL;
1529 goto err; 1610 goto err;
1530 } 1611 }
1531 } 1612 }
@@ -1607,10 +1688,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1607 tunnel->old_sk_destruct = sk->sk_destruct; 1688 tunnel->old_sk_destruct = sk->sk_destruct;
1608 sk->sk_destruct = &l2tp_tunnel_destruct; 1689 sk->sk_destruct = &l2tp_tunnel_destruct;
1609 tunnel->sock = sk; 1690 tunnel->sock = sk;
1691 tunnel->fd = fd;
1610 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1692 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1611 1693
1612 sk->sk_allocation = GFP_ATOMIC; 1694 sk->sk_allocation = GFP_ATOMIC;
1613 1695
1696 /* Init delete workqueue struct */
1697 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1698
1614 /* Add tunnel to our list */ 1699 /* Add tunnel to our list */
1615 INIT_LIST_HEAD(&tunnel->list); 1700 INIT_LIST_HEAD(&tunnel->list);
1616 atomic_inc(&l2tp_tunnel_count); 1701 atomic_inc(&l2tp_tunnel_count);
@@ -1642,25 +1727,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1642 */ 1727 */
1643int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1728int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1644{ 1729{
1645 int err = 0; 1730 return (false == queue_work(l2tp_wq, &tunnel->del_work));
1646 struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
1647
1648 /* Force the tunnel socket to close. This will eventually
1649 * cause the tunnel to be deleted via the normal socket close
1650 * mechanisms when userspace closes the tunnel socket.
1651 */
1652 if (sock != NULL) {
1653 err = inet_shutdown(sock, 2);
1654
1655 /* If the tunnel's socket was created by the kernel,
1656 * close the socket here since the socket was not
1657 * created by userspace.
1658 */
1659 if (sock->file == NULL)
1660 err = inet_release(sock);
1661 }
1662
1663 return err;
1664} 1731}
1665EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1732EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1666 1733
@@ -1844,8 +1911,21 @@ static __net_init int l2tp_init_net(struct net *net)
1844 return 0; 1911 return 0;
1845} 1912}
1846 1913
1914static __net_exit void l2tp_exit_net(struct net *net)
1915{
1916 struct l2tp_net *pn = l2tp_pernet(net);
1917 struct l2tp_tunnel *tunnel = NULL;
1918
1919 rcu_read_lock_bh();
1920 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1921 (void)l2tp_tunnel_delete(tunnel);
1922 }
1923 rcu_read_unlock_bh();
1924}
1925
1847static struct pernet_operations l2tp_net_ops = { 1926static struct pernet_operations l2tp_net_ops = {
1848 .init = l2tp_init_net, 1927 .init = l2tp_init_net,
1928 .exit = l2tp_exit_net,
1849 .id = &l2tp_net_id, 1929 .id = &l2tp_net_id,
1850 .size = sizeof(struct l2tp_net), 1930 .size = sizeof(struct l2tp_net),
1851}; 1931};
@@ -1858,6 +1938,13 @@ static int __init l2tp_init(void)
1858 if (rc) 1938 if (rc)
1859 goto out; 1939 goto out;
1860 1940
1941 l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
1942 if (!l2tp_wq) {
1943 pr_err("alloc_workqueue failed\n");
1944 rc = -ENOMEM;
1945 goto out;
1946 }
1947
1861 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION); 1948 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1862 1949
1863out: 1950out:
@@ -1867,6 +1954,10 @@ out:
1867static void __exit l2tp_exit(void) 1954static void __exit l2tp_exit(void)
1868{ 1955{
1869 unregister_pernet_device(&l2tp_net_ops); 1956 unregister_pernet_device(&l2tp_net_ops);
1957 if (l2tp_wq) {
1958 destroy_workqueue(l2tp_wq);
1959 l2tp_wq = NULL;
1960 }
1870} 1961}
1871 1962
1872module_init(l2tp_init); 1963module_init(l2tp_init);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 56d583e083a7..8eb8f1d47f3a 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,7 +188,10 @@ struct l2tp_tunnel {
188 int (*recv_payload_hook)(struct sk_buff *skb); 188 int (*recv_payload_hook)(struct sk_buff *skb);
189 void (*old_sk_destruct)(struct sock *); 189 void (*old_sk_destruct)(struct sock *);
190 struct sock *sock; /* Parent socket */ 190 struct sock *sock; /* Parent socket */
191 int fd; 191 int fd; /* Parent fd, if tunnel socket
192 * was created by userspace */
193
194 struct work_struct del_work;
192 195
193 uint8_t priv[0]; /* private data */ 196 uint8_t priv[0]; /* private data */
194}; 197};
@@ -228,6 +231,8 @@ out:
228 return tunnel; 231 return tunnel;
229} 232}
230 233
234extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
235extern void l2tp_tunnel_sock_put(struct sock *sk);
231extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); 236extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
232extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 237extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
233extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 238extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 61d8b75d2686..f7ac8f42fee2 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -115,6 +115,7 @@ static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, in
115 */ 115 */
116static int l2tp_ip_recv(struct sk_buff *skb) 116static int l2tp_ip_recv(struct sk_buff *skb)
117{ 117{
118 struct net *net = dev_net(skb->dev);
118 struct sock *sk; 119 struct sock *sk;
119 u32 session_id; 120 u32 session_id;
120 u32 tunnel_id; 121 u32 tunnel_id;
@@ -142,7 +143,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
142 } 143 }
143 144
144 /* Ok, this is a data packet. Lookup the session. */ 145 /* Ok, this is a data packet. Lookup the session. */
145 session = l2tp_session_find(&init_net, NULL, session_id); 146 session = l2tp_session_find(net, NULL, session_id);
146 if (session == NULL) 147 if (session == NULL)
147 goto discard; 148 goto discard;
148 149
@@ -173,14 +174,14 @@ pass_up:
173 goto discard; 174 goto discard;
174 175
175 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 176 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
176 tunnel = l2tp_tunnel_find(&init_net, tunnel_id); 177 tunnel = l2tp_tunnel_find(net, tunnel_id);
177 if (tunnel != NULL) 178 if (tunnel != NULL)
178 sk = tunnel->sock; 179 sk = tunnel->sock;
179 else { 180 else {
180 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 181 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
181 182
182 read_lock_bh(&l2tp_ip_lock); 183 read_lock_bh(&l2tp_ip_lock);
183 sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id); 184 sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
184 read_unlock_bh(&l2tp_ip_lock); 185 read_unlock_bh(&l2tp_ip_lock);
185 } 186 }
186 187
@@ -239,6 +240,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
239{ 240{
240 struct inet_sock *inet = inet_sk(sk); 241 struct inet_sock *inet = inet_sk(sk);
241 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; 242 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
243 struct net *net = sock_net(sk);
242 int ret; 244 int ret;
243 int chk_addr_ret; 245 int chk_addr_ret;
244 246
@@ -251,7 +253,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
251 253
252 ret = -EADDRINUSE; 254 ret = -EADDRINUSE;
253 read_lock_bh(&l2tp_ip_lock); 255 read_lock_bh(&l2tp_ip_lock);
254 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) 256 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
257 sk->sk_bound_dev_if, addr->l2tp_conn_id))
255 goto out_in_use; 258 goto out_in_use;
256 259
257 read_unlock_bh(&l2tp_ip_lock); 260 read_unlock_bh(&l2tp_ip_lock);
@@ -260,7 +263,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
260 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) 263 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
261 goto out; 264 goto out;
262 265
263 chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr); 266 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
264 ret = -EADDRNOTAVAIL; 267 ret = -EADDRNOTAVAIL;
265 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && 268 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
266 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 269 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
@@ -369,7 +372,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
369 return 0; 372 return 0;
370 373
371drop: 374drop:
372 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); 375 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
373 kfree_skb(skb); 376 kfree_skb(skb);
374 return -1; 377 return -1;
375} 378}
@@ -605,6 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
605 608
606static struct net_protocol l2tp_ip_protocol __read_mostly = { 609static struct net_protocol l2tp_ip_protocol __read_mostly = {
607 .handler = l2tp_ip_recv, 610 .handler = l2tp_ip_recv,
611 .netns_ok = 1,
608}; 612};
609 613
610static int __init l2tp_ip_init(void) 614static int __init l2tp_ip_init(void)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 927547171bc7..8ee4a86ae996 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
554 memset(opt, 0, sizeof(struct ipv6_txoptions)); 554 memset(opt, 0, sizeof(struct ipv6_txoptions));
555 opt->tot_len = sizeof(struct ipv6_txoptions); 555 opt->tot_len = sizeof(struct ipv6_txoptions);
556 556
557 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 557 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
558 &hlimit, &tclass, &dontfrag); 558 &hlimit, &tclass, &dontfrag);
559 if (err < 0) { 559 if (err < 0) {
560 fl6_sock_release(flowlabel); 560 fl6_sock_release(flowlabel);
561 return err; 561 return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
646 struct msghdr *msg, size_t len, int noblock, 646 struct msghdr *msg, size_t len, int noblock,
647 int flags, int *addr_len) 647 int flags, int *addr_len)
648{ 648{
649 struct inet_sock *inet = inet_sk(sk); 649 struct ipv6_pinfo *np = inet6_sk(sk);
650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; 650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
651 size_t copied = 0; 651 size_t copied = 0;
652 int err = -EOPNOTSUPP; 652 int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
688 lsa->l2tp_scope_id = IP6CB(skb)->iif; 688 lsa->l2tp_scope_id = IP6CB(skb)->iif;
689 } 689 }
690 690
691 if (inet->cmsg_flags) 691 if (np->rxopt.all)
692 ip_cmsg_recv(msg, skb); 692 ip6_datagram_recv_ctl(sk, msg, skb);
693 693
694 if (flags & MSG_TRUNC) 694 if (flags & MSG_TRUNC)
695 copied = skb->len; 695 copied = skb->len;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index bbba3a19e944..c1bab22db85e 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -37,6 +37,7 @@ static struct genl_family l2tp_nl_family = {
37 .version = L2TP_GENL_VERSION, 37 .version = L2TP_GENL_VERSION,
38 .hdrsize = 0, 38 .hdrsize = 0,
39 .maxattr = L2TP_ATTR_MAX, 39 .maxattr = L2TP_ATTR_MAX,
40 .netnsok = true,
40}; 41};
41 42
42/* Accessed under genl lock */ 43/* Accessed under genl lock */
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 286366ef8930..3f4e3afc191a 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
388 struct l2tp_session *session; 388 struct l2tp_session *session;
389 struct l2tp_tunnel *tunnel; 389 struct l2tp_tunnel *tunnel;
390 struct pppol2tp_session *ps; 390 struct pppol2tp_session *ps;
391 int old_headroom;
392 int new_headroom;
393 int uhlen, headroom; 391 int uhlen, headroom;
394 392
395 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 393 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
408 if (tunnel == NULL) 406 if (tunnel == NULL)
409 goto abort_put_sess; 407 goto abort_put_sess;
410 408
411 old_headroom = skb_headroom(skb);
412 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 409 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
413 headroom = NET_SKB_PAD + 410 headroom = NET_SKB_PAD +
414 sizeof(struct iphdr) + /* IP header */ 411 sizeof(struct iphdr) + /* IP header */
@@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
418 if (skb_cow_head(skb, headroom)) 415 if (skb_cow_head(skb, headroom))
419 goto abort_put_sess_tun; 416 goto abort_put_sess_tun;
420 417
421 new_headroom = skb_headroom(skb);
422 skb->truesize += new_headroom - old_headroom;
423
424 /* Setup PPP header */ 418 /* Setup PPP header */
425 __skb_push(skb, sizeof(ppph)); 419 __skb_push(skb, sizeof(ppph));
426 skb->data[0] = ppph[0]; 420 skb->data[0] = ppph[0];
@@ -1789,7 +1783,8 @@ static __net_init int pppol2tp_init_net(struct net *net)
1789 struct proc_dir_entry *pde; 1783 struct proc_dir_entry *pde;
1790 int err = 0; 1784 int err = 0;
1791 1785
1792 pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); 1786 pde = proc_create("pppol2tp", S_IRUGO, net->proc_net,
1787 &pppol2tp_proc_fops);
1793 if (!pde) { 1788 if (!pde) {
1794 err = -ENOMEM; 1789 err = -ENOMEM;
1795 goto out; 1790 goto out;
@@ -1801,7 +1796,7 @@ out:
1801 1796
1802static __net_exit void pppol2tp_exit_net(struct net *net) 1797static __net_exit void pppol2tp_exit_net(struct net *net)
1803{ 1798{
1804 proc_net_remove(net, "pppol2tp"); 1799 remove_proc_entry("pppol2tp", net->proc_net);
1805} 1800}
1806 1801
1807static struct pernet_operations pppol2tp_net_ops = { 1802static struct pernet_operations pppol2tp_net_ops = {
diff --git a/net/lapb/Kconfig b/net/lapb/Kconfig
index f0b5efb31a00..6481839b76c9 100644
--- a/net/lapb/Kconfig
+++ b/net/lapb/Kconfig
@@ -3,8 +3,7 @@
3# 3#
4 4
5config LAPB 5config LAPB
6 tristate "LAPB Data Link Driver (EXPERIMENTAL)" 6 tristate "LAPB Data Link Driver"
7 depends on EXPERIMENTAL
8 ---help--- 7 ---help---
9 Link Access Procedure, Balanced (LAPB) is the data link layer (i.e. 8 Link Access Procedure, Balanced (LAPB) is the data link layer (i.e.
10 the lower) part of the X.25 protocol. It offers a reliable 9 the lower) part of the X.25 protocol. It offers a reliable
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index b4ecf267a34b..62535fe9f570 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -81,7 +81,7 @@ comment "Some wireless drivers require a rate control algorithm"
81 81
82config MAC80211_MESH 82config MAC80211_MESH
83 bool "Enable mac80211 mesh networking (pre-802.11s) support" 83 bool "Enable mac80211 mesh networking (pre-802.11s) support"
84 depends on MAC80211 && EXPERIMENTAL 84 depends on MAC80211
85 ---help--- 85 ---help---
86 This options enables support of Draft 802.11s mesh networking. 86 This options enables support of Draft 802.11s mesh networking.
87 The implementation is based on Draft 2.08 of the Mesh Networking 87 The implementation is based on Draft 2.08 of the Mesh Networking
@@ -258,6 +258,17 @@ config MAC80211_MESH_SYNC_DEBUG
258 258
259 Do not select this option. 259 Do not select this option.
260 260
261config MAC80211_MESH_PS_DEBUG
262 bool "Verbose mesh powersave debugging"
263 depends on MAC80211_DEBUG_MENU
264 depends on MAC80211_MESH
265 ---help---
266 Selecting this option causes mac80211 to print out very verbose mesh
267 powersave debugging messages (when mac80211 is taking part in a
268 mesh network).
269
270 Do not select this option.
271
261config MAC80211_TDLS_DEBUG 272config MAC80211_TDLS_DEBUG
262 bool "Verbose TDLS debugging" 273 bool "Verbose TDLS debugging"
263 depends on MAC80211_DEBUG_MENU 274 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4911202334d9..9d7d840aac6d 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -39,7 +39,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
39 mesh_pathtbl.o \ 39 mesh_pathtbl.o \
40 mesh_plink.o \ 40 mesh_plink.o \
41 mesh_hwmp.o \ 41 mesh_hwmp.o \
42 mesh_sync.o 42 mesh_sync.o \
43 mesh_ps.o
43 44
44mac80211-$(CONFIG_PM) += pm.o 45mac80211-$(CONFIG_PM) += pm.o
45 46
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 808338a1bce5..31bf2586fb84 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -83,8 +83,8 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
83 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, 83 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
84 &sta->sta, tid, NULL, 0)) 84 &sta->sta, tid, NULL, 0))
85 sdata_info(sta->sdata, 85 sdata_info(sta->sdata,
86 "HW problem - can not stop rx aggregation for tid %d\n", 86 "HW problem - can not stop rx aggregation for %pM tid %d\n",
87 tid); 87 sta->sta.addr, tid);
88 88
89 /* check if this is a self generated aggregation halt */ 89 /* check if this is a self generated aggregation halt */
90 if (initiator == WLAN_BACK_RECIPIENT && tx) 90 if (initiator == WLAN_BACK_RECIPIENT && tx)
@@ -159,7 +159,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
159 } 159 }
160 rcu_read_unlock(); 160 rcu_read_unlock();
161 161
162 ht_dbg(sta->sdata, "rx session timer expired on tid %d\n", (u16)*ptid); 162 ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
163 sta->sta.addr, (u16)*ptid);
163 164
164 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired); 165 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
165 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); 166 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
@@ -247,7 +248,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
247 status = WLAN_STATUS_REQUEST_DECLINED; 248 status = WLAN_STATUS_REQUEST_DECLINED;
248 249
249 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 250 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
250 ht_dbg(sta->sdata, "Suspend in progress - Denying ADDBA request\n"); 251 ht_dbg(sta->sdata,
252 "Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
253 sta->sta.addr, tid);
251 goto end_no_lock; 254 goto end_no_lock;
252 } 255 }
253 256
@@ -317,7 +320,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
317 320
318 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, 321 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
319 &sta->sta, tid, &start_seq_num, 0); 322 &sta->sta, tid, &start_seq_num, 0);
320 ht_dbg(sta->sdata, "Rx A-MPDU request on tid %d result %d\n", tid, ret); 323 ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
324 sta->sta.addr, tid, ret);
321 if (ret) { 325 if (ret) {
322 kfree(tid_agg_rx->reorder_buf); 326 kfree(tid_agg_rx->reorder_buf);
323 kfree(tid_agg_rx->reorder_time); 327 kfree(tid_agg_rx->reorder_time);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index eb9df22418f0..13b7683de5a4 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -149,16 +149,133 @@ void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
149 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); 149 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
150} 150}
151 151
152static inline int ieee80211_ac_from_tid(int tid)
153{
154 return ieee802_1d_to_ac[tid & 7];
155}
156
157/*
158 * When multiple aggregation sessions on multiple stations
159 * are being created/destroyed simultaneously, we need to
160 * refcount the global queue stop caused by that in order
161 * to not get into a situation where one of the aggregation
162 * setup or teardown re-enables queues before the other is
163 * ready to handle that.
164 *
165 * These two functions take care of this issue by keeping
166 * a global "agg_queue_stop" refcount.
167 */
168static void __acquires(agg_queue)
169ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
170{
171 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
172
173 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
174 ieee80211_stop_queue_by_reason(
175 &sdata->local->hw, queue,
176 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
177 __acquire(agg_queue);
178}
179
180static void __releases(agg_queue)
181ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
182{
183 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
184
185 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
186 ieee80211_wake_queue_by_reason(
187 &sdata->local->hw, queue,
188 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
189 __release(agg_queue);
190}
191
192/*
193 * splice packets from the STA's pending to the local pending,
194 * requires a call to ieee80211_agg_splice_finish later
195 */
196static void __acquires(agg_queue)
197ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
198 struct tid_ampdu_tx *tid_tx, u16 tid)
199{
200 struct ieee80211_local *local = sdata->local;
201 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
202 unsigned long flags;
203
204 ieee80211_stop_queue_agg(sdata, tid);
205
206 if (WARN(!tid_tx,
207 "TID %d gone but expected when splicing aggregates from the pending queue\n",
208 tid))
209 return;
210
211 if (!skb_queue_empty(&tid_tx->pending)) {
212 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
213 /* copy over remaining packets */
214 skb_queue_splice_tail_init(&tid_tx->pending,
215 &local->pending[queue]);
216 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
217 }
218}
219
220static void __releases(agg_queue)
221ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
222{
223 ieee80211_wake_queue_agg(sdata, tid);
224}
225
226static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
227{
228 struct tid_ampdu_tx *tid_tx;
229
230 lockdep_assert_held(&sta->ampdu_mlme.mtx);
231 lockdep_assert_held(&sta->lock);
232
233 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
234
235 /*
236 * When we get here, the TX path will not be lockless any more wrt.
237 * aggregation, since the OPERATIONAL bit has long been cleared.
238 * Thus it will block on getting the lock, if it occurs. So if we
239 * stop the queue now, we will not get any more packets, and any
240 * that might be being processed will wait for us here, thereby
241 * guaranteeing that no packets go to the tid_tx pending queue any
242 * more.
243 */
244
245 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
246
247 /* future packets must not find the tid_tx struct any more */
248 ieee80211_assign_tid_tx(sta, tid, NULL);
249
250 ieee80211_agg_splice_finish(sta->sdata, tid);
251
252 kfree_rcu(tid_tx, rcu_head);
253}
254
152int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 255int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
153 enum ieee80211_back_parties initiator, 256 enum ieee80211_agg_stop_reason reason)
154 bool tx)
155{ 257{
156 struct ieee80211_local *local = sta->local; 258 struct ieee80211_local *local = sta->local;
157 struct tid_ampdu_tx *tid_tx; 259 struct tid_ampdu_tx *tid_tx;
260 enum ieee80211_ampdu_mlme_action action;
158 int ret; 261 int ret;
159 262
160 lockdep_assert_held(&sta->ampdu_mlme.mtx); 263 lockdep_assert_held(&sta->ampdu_mlme.mtx);
161 264
265 switch (reason) {
266 case AGG_STOP_DECLINED:
267 case AGG_STOP_LOCAL_REQUEST:
268 case AGG_STOP_PEER_REQUEST:
269 action = IEEE80211_AMPDU_TX_STOP_CONT;
270 break;
271 case AGG_STOP_DESTROY_STA:
272 action = IEEE80211_AMPDU_TX_STOP_FLUSH;
273 break;
274 default:
275 WARN_ON_ONCE(1);
276 return -EINVAL;
277 }
278
162 spin_lock_bh(&sta->lock); 279 spin_lock_bh(&sta->lock);
163 280
164 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 281 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -167,10 +284,19 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
167 return -ENOENT; 284 return -ENOENT;
168 } 285 }
169 286
170 /* if we're already stopping ignore any new requests to stop */ 287 /*
288 * if we're already stopping ignore any new requests to stop
289 * unless we're destroying it in which case notify the driver
290 */
171 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 291 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
172 spin_unlock_bh(&sta->lock); 292 spin_unlock_bh(&sta->lock);
173 return -EALREADY; 293 if (reason != AGG_STOP_DESTROY_STA)
294 return -EALREADY;
295 ret = drv_ampdu_action(local, sta->sdata,
296 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
297 &sta->sta, tid, NULL, 0);
298 WARN_ON_ONCE(ret);
299 return 0;
174 } 300 }
175 301
176 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 302 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
@@ -212,11 +338,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
212 */ 338 */
213 synchronize_net(); 339 synchronize_net();
214 340
215 tid_tx->stop_initiator = initiator; 341 tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
216 tid_tx->tx_stop = tx; 342 WLAN_BACK_RECIPIENT :
343 WLAN_BACK_INITIATOR;
344 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
217 345
218 ret = drv_ampdu_action(local, sta->sdata, 346 ret = drv_ampdu_action(local, sta->sdata, action,
219 IEEE80211_AMPDU_TX_STOP,
220 &sta->sta, tid, NULL, 0); 347 &sta->sta, tid, NULL, 0);
221 348
222 /* HW shall not deny going back to legacy */ 349 /* HW shall not deny going back to legacy */
@@ -227,7 +354,17 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
227 */ 354 */
228 } 355 }
229 356
230 return ret; 357 /*
358 * In the case of AGG_STOP_DESTROY_STA, the driver won't
359 * necessarily call ieee80211_stop_tx_ba_cb(), so this may
360 * seem like we can leave the tid_tx data pending forever.
361 * This is true, in a way, but "forever" is only until the
362 * station struct is actually destroyed. In the meantime,
363 * leaving it around ensures that we don't transmit packets
364 * to the driver on this TID which might confuse it.
365 */
366
367 return 0;
231} 368}
232 369
233/* 370/*
@@ -253,91 +390,18 @@ static void sta_addba_resp_timer_expired(unsigned long data)
253 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 390 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
254 rcu_read_unlock(); 391 rcu_read_unlock();
255 ht_dbg(sta->sdata, 392 ht_dbg(sta->sdata,
256 "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n", 393 "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n",
257 tid); 394 sta->sta.addr, tid);
258 return; 395 return;
259 } 396 }
260 397
261 ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid); 398 ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
399 sta->sta.addr, tid);
262 400
263 ieee80211_stop_tx_ba_session(&sta->sta, tid); 401 ieee80211_stop_tx_ba_session(&sta->sta, tid);
264 rcu_read_unlock(); 402 rcu_read_unlock();
265} 403}
266 404
267static inline int ieee80211_ac_from_tid(int tid)
268{
269 return ieee802_1d_to_ac[tid & 7];
270}
271
272/*
273 * When multiple aggregation sessions on multiple stations
274 * are being created/destroyed simultaneously, we need to
275 * refcount the global queue stop caused by that in order
276 * to not get into a situation where one of the aggregation
277 * setup or teardown re-enables queues before the other is
278 * ready to handle that.
279 *
280 * These two functions take care of this issue by keeping
281 * a global "agg_queue_stop" refcount.
282 */
283static void __acquires(agg_queue)
284ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
285{
286 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
287
288 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
289 ieee80211_stop_queue_by_reason(
290 &sdata->local->hw, queue,
291 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
292 __acquire(agg_queue);
293}
294
295static void __releases(agg_queue)
296ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
297{
298 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
299
300 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
301 ieee80211_wake_queue_by_reason(
302 &sdata->local->hw, queue,
303 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
304 __release(agg_queue);
305}
306
307/*
308 * splice packets from the STA's pending to the local pending,
309 * requires a call to ieee80211_agg_splice_finish later
310 */
311static void __acquires(agg_queue)
312ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
313 struct tid_ampdu_tx *tid_tx, u16 tid)
314{
315 struct ieee80211_local *local = sdata->local;
316 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
317 unsigned long flags;
318
319 ieee80211_stop_queue_agg(sdata, tid);
320
321 if (WARN(!tid_tx,
322 "TID %d gone but expected when splicing aggregates from the pending queue\n",
323 tid))
324 return;
325
326 if (!skb_queue_empty(&tid_tx->pending)) {
327 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
328 /* copy over remaining packets */
329 skb_queue_splice_tail_init(&tid_tx->pending,
330 &local->pending[queue]);
331 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
332 }
333}
334
335static void __releases(agg_queue)
336ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
337{
338 ieee80211_wake_queue_agg(sdata, tid);
339}
340
341void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 405void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
342{ 406{
343 struct tid_ampdu_tx *tid_tx; 407 struct tid_ampdu_tx *tid_tx;
@@ -369,7 +433,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
369 &sta->sta, tid, &start_seq_num, 0); 433 &sta->sta, tid, &start_seq_num, 0);
370 if (ret) { 434 if (ret) {
371 ht_dbg(sdata, 435 ht_dbg(sdata,
372 "BA request denied - HW unavailable for tid %d\n", tid); 436 "BA request denied - HW unavailable for %pM tid %d\n",
437 sta->sta.addr, tid);
373 spin_lock_bh(&sta->lock); 438 spin_lock_bh(&sta->lock);
374 ieee80211_agg_splice_packets(sdata, tid_tx, tid); 439 ieee80211_agg_splice_packets(sdata, tid_tx, tid);
375 ieee80211_assign_tid_tx(sta, tid, NULL); 440 ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -382,7 +447,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
382 447
383 /* activate the timer for the recipient's addBA response */ 448 /* activate the timer for the recipient's addBA response */
384 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 449 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
385 ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid); 450 ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
451 sta->sta.addr, tid);
386 452
387 spin_lock_bh(&sta->lock); 453 spin_lock_bh(&sta->lock);
388 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; 454 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
@@ -429,7 +495,8 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
429 495
430 rcu_read_unlock(); 496 rcu_read_unlock();
431 497
432 ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid); 498 ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
499 sta->sta.addr, (u16)*ptid);
433 500
434 ieee80211_stop_tx_ba_session(&sta->sta, *ptid); 501 ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
435} 502}
@@ -465,7 +532,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
465 532
466 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 533 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
467 ht_dbg(sdata, 534 ht_dbg(sdata,
468 "BA sessions blocked - Denying BA session request\n"); 535 "BA sessions blocked - Denying BA session request %pM tid %d\n",
536 sta->sta.addr, tid);
469 return -EINVAL; 537 return -EINVAL;
470 } 538 }
471 539
@@ -506,8 +574,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
506 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 574 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
507 HT_AGG_RETRIES_PERIOD)) { 575 HT_AGG_RETRIES_PERIOD)) {
508 ht_dbg(sdata, 576 ht_dbg(sdata,
509 "BA request denied - waiting a grace period after %d failed requests on tid %u\n", 577 "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n",
510 sta->ampdu_mlme.addba_req_num[tid], tid); 578 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
511 ret = -EBUSY; 579 ret = -EBUSY;
512 goto err_unlock_sta; 580 goto err_unlock_sta;
513 } 581 }
@@ -516,8 +584,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
516 /* check if the TID is not in aggregation flow already */ 584 /* check if the TID is not in aggregation flow already */
517 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { 585 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
518 ht_dbg(sdata, 586 ht_dbg(sdata,
519 "BA request denied - session is not idle on tid %u\n", 587 "BA request denied - session is not idle on %pM tid %u\n",
520 tid); 588 sta->sta.addr, tid);
521 ret = -EAGAIN; 589 ret = -EAGAIN;
522 goto err_unlock_sta; 590 goto err_unlock_sta;
523 } 591 }
@@ -572,7 +640,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
572 640
573 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 641 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
574 642
575 ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid); 643 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
644 sta->sta.addr, tid);
576 645
577 drv_ampdu_action(local, sta->sdata, 646 drv_ampdu_action(local, sta->sdata,
578 IEEE80211_AMPDU_TX_OPERATIONAL, 647 IEEE80211_AMPDU_TX_OPERATIONAL,
@@ -660,14 +729,13 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
660EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 729EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
661 730
662int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 731int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
663 enum ieee80211_back_parties initiator, 732 enum ieee80211_agg_stop_reason reason)
664 bool tx)
665{ 733{
666 int ret; 734 int ret;
667 735
668 mutex_lock(&sta->ampdu_mlme.mtx); 736 mutex_lock(&sta->ampdu_mlme.mtx);
669 737
670 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator, tx); 738 ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);
671 739
672 mutex_unlock(&sta->ampdu_mlme.mtx); 740 mutex_unlock(&sta->ampdu_mlme.mtx);
673 741
@@ -743,7 +811,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
743 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 811 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
744 812
745 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 813 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
746 ht_dbg(sdata, "unexpected callback to A-MPDU stop\n"); 814 ht_dbg(sdata,
815 "unexpected callback to A-MPDU stop for %pM tid %d\n",
816 sta->sta.addr, tid);
747 goto unlock_sta; 817 goto unlock_sta;
748 } 818 }
749 819
@@ -751,24 +821,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
751 ieee80211_send_delba(sta->sdata, ra, tid, 821 ieee80211_send_delba(sta->sdata, ra, tid,
752 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 822 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
753 823
754 /* 824 ieee80211_remove_tid_tx(sta, tid);
755 * When we get here, the TX path will not be lockless any more wrt.
756 * aggregation, since the OPERATIONAL bit has long been cleared.
757 * Thus it will block on getting the lock, if it occurs. So if we
758 * stop the queue now, we will not get any more packets, and any
759 * that might be being processed will wait for us here, thereby
760 * guaranteeing that no packets go to the tid_tx pending queue any
761 * more.
762 */
763
764 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
765
766 /* future packets must not find the tid_tx struct any more */
767 ieee80211_assign_tid_tx(sta, tid, NULL);
768
769 ieee80211_agg_splice_finish(sta->sdata, tid);
770
771 kfree_rcu(tid_tx, rcu_head);
772 825
773 unlock_sta: 826 unlock_sta:
774 spin_unlock_bh(&sta->lock); 827 spin_unlock_bh(&sta->lock);
@@ -819,13 +872,15 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
819 goto out; 872 goto out;
820 873
821 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { 874 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
822 ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid); 875 ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
876 sta->sta.addr, tid);
823 goto out; 877 goto out;
824 } 878 }
825 879
826 del_timer_sync(&tid_tx->addba_resp_timer); 880 del_timer_sync(&tid_tx->addba_resp_timer);
827 881
828 ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid); 882 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
883 sta->sta.addr, tid);
829 884
830 /* 885 /*
831 * addba_resp_timer may have fired before we got here, and 886 * addba_resp_timer may have fired before we got here, and
@@ -835,8 +890,8 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
835 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || 890 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
836 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 891 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
837 ht_dbg(sta->sdata, 892 ht_dbg(sta->sdata,
838 "got addBA resp for tid %d but we already gave up\n", 893 "got addBA resp for %pM tid %d but we already gave up\n",
839 tid); 894 sta->sta.addr, tid);
840 goto out; 895 goto out;
841 } 896 }
842 897
@@ -868,8 +923,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
868 } 923 }
869 924
870 } else { 925 } else {
871 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 926 ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
872 false);
873 } 927 }
874 928
875 out: 929 out:
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 5c61677487cf..09d96a8f6c2c 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -164,7 +164,17 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
164 sta = sta_info_get(sdata, mac_addr); 164 sta = sta_info_get(sdata, mac_addr);
165 else 165 else
166 sta = sta_info_get_bss(sdata, mac_addr); 166 sta = sta_info_get_bss(sdata, mac_addr);
167 if (!sta) { 167 /*
168 * The ASSOC test makes sure the driver is ready to
169 * receive the key. When wpa_supplicant has roamed
170 * using FT, it attempts to set the key before
171 * association has completed, this rejects that attempt
172 * so it will set the key again after assocation.
173 *
174 * TODO: accept the key if we have a station entry and
175 * add it to the device after the station.
176 */
177 if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {
168 ieee80211_key_free(sdata->local, key); 178 ieee80211_key_free(sdata->local, key);
169 err = -ENOENT; 179 err = -ENOENT;
170 goto out_unlock; 180 goto out_unlock;
@@ -482,7 +492,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
482#ifdef CONFIG_MAC80211_MESH 492#ifdef CONFIG_MAC80211_MESH
483 sinfo->filled |= STATION_INFO_LLID | 493 sinfo->filled |= STATION_INFO_LLID |
484 STATION_INFO_PLID | 494 STATION_INFO_PLID |
485 STATION_INFO_PLINK_STATE; 495 STATION_INFO_PLINK_STATE |
496 STATION_INFO_LOCAL_PM |
497 STATION_INFO_PEER_PM |
498 STATION_INFO_NONPEER_PM;
486 499
487 sinfo->llid = le16_to_cpu(sta->llid); 500 sinfo->llid = le16_to_cpu(sta->llid);
488 sinfo->plid = le16_to_cpu(sta->plid); 501 sinfo->plid = le16_to_cpu(sta->plid);
@@ -491,6 +504,9 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
491 sinfo->filled |= STATION_INFO_T_OFFSET; 504 sinfo->filled |= STATION_INFO_T_OFFSET;
492 sinfo->t_offset = sta->t_offset; 505 sinfo->t_offset = sta->t_offset;
493 } 506 }
507 sinfo->local_pm = sta->local_pm;
508 sinfo->peer_pm = sta->peer_pm;
509 sinfo->nonpeer_pm = sta->nonpeer_pm;
494#endif 510#endif
495 } 511 }
496 512
@@ -510,6 +526,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
510 BIT(NL80211_STA_FLAG_WME) | 526 BIT(NL80211_STA_FLAG_WME) |
511 BIT(NL80211_STA_FLAG_MFP) | 527 BIT(NL80211_STA_FLAG_MFP) |
512 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 528 BIT(NL80211_STA_FLAG_AUTHENTICATED) |
529 BIT(NL80211_STA_FLAG_ASSOCIATED) |
513 BIT(NL80211_STA_FLAG_TDLS_PEER); 530 BIT(NL80211_STA_FLAG_TDLS_PEER);
514 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 531 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
515 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 532 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
@@ -521,6 +538,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
521 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 538 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
522 if (test_sta_flag(sta, WLAN_STA_AUTH)) 539 if (test_sta_flag(sta, WLAN_STA_AUTH))
523 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 540 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
541 if (test_sta_flag(sta, WLAN_STA_ASSOC))
542 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
524 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 543 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
525 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 544 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
526} 545}
@@ -909,11 +928,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
909 /* TODO: make hostapd tell us what it wants */ 928 /* TODO: make hostapd tell us what it wants */
910 sdata->smps_mode = IEEE80211_SMPS_OFF; 929 sdata->smps_mode = IEEE80211_SMPS_OFF;
911 sdata->needed_rx_chains = sdata->local->rx_chains; 930 sdata->needed_rx_chains = sdata->local->rx_chains;
931 sdata->radar_required = params->radar_required;
912 932
913 err = ieee80211_vif_use_channel(sdata, &params->chandef, 933 err = ieee80211_vif_use_channel(sdata, &params->chandef,
914 IEEE80211_CHANCTX_SHARED); 934 IEEE80211_CHANCTX_SHARED);
915 if (err) 935 if (err)
916 return err; 936 return err;
937 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
917 938
918 /* 939 /*
919 * Apply control port protocol, this allows us to 940 * Apply control port protocol, this allows us to
@@ -930,6 +951,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
930 951
931 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 952 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
932 sdata->vif.bss_conf.dtim_period = params->dtim_period; 953 sdata->vif.bss_conf.dtim_period = params->dtim_period;
954 sdata->vif.bss_conf.enable_beacon = true;
933 955
934 sdata->vif.bss_conf.ssid_len = params->ssid_len; 956 sdata->vif.bss_conf.ssid_len = params->ssid_len;
935 if (params->ssid_len) 957 if (params->ssid_len)
@@ -1009,7 +1031,16 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1009 if (old_probe_resp) 1031 if (old_probe_resp)
1010 kfree_rcu(old_probe_resp, rcu_head); 1032 kfree_rcu(old_probe_resp, rcu_head);
1011 1033
1012 sta_info_flush(local, sdata); 1034 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
1035 sta_info_flush_defer(vlan);
1036 sta_info_flush_defer(sdata);
1037 rcu_barrier();
1038 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
1039 sta_info_flush_cleanup(vlan);
1040 sta_info_flush_cleanup(sdata);
1041
1042 sdata->vif.bss_conf.enable_beacon = false;
1043 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
1013 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 1044 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
1014 1045
1015 drv_stop_ap(sdata->local, sdata); 1046 drv_stop_ap(sdata->local, sdata);
@@ -1018,6 +1049,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1018 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); 1049 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
1019 skb_queue_purge(&sdata->u.ap.ps.bc_buf); 1050 skb_queue_purge(&sdata->u.ap.ps.bc_buf);
1020 1051
1052 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
1021 ieee80211_vif_release_channel(sdata); 1053 ieee80211_vif_release_channel(sdata);
1022 1054
1023 return 0; 1055 return 0;
@@ -1067,6 +1099,58 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
1067 netif_rx_ni(skb); 1099 netif_rx_ni(skb);
1068} 1100}
1069 1101
1102static int sta_apply_auth_flags(struct ieee80211_local *local,
1103 struct sta_info *sta,
1104 u32 mask, u32 set)
1105{
1106 int ret;
1107
1108 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
1109 set & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
1110 !test_sta_flag(sta, WLAN_STA_AUTH)) {
1111 ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
1112 if (ret)
1113 return ret;
1114 }
1115
1116 if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) &&
1117 set & BIT(NL80211_STA_FLAG_ASSOCIATED) &&
1118 !test_sta_flag(sta, WLAN_STA_ASSOC)) {
1119 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
1120 if (ret)
1121 return ret;
1122 }
1123
1124 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
1125 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
1126 ret = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
1127 else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
1128 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
1129 else
1130 ret = 0;
1131 if (ret)
1132 return ret;
1133 }
1134
1135 if (mask & BIT(NL80211_STA_FLAG_ASSOCIATED) &&
1136 !(set & BIT(NL80211_STA_FLAG_ASSOCIATED)) &&
1137 test_sta_flag(sta, WLAN_STA_ASSOC)) {
1138 ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
1139 if (ret)
1140 return ret;
1141 }
1142
1143 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
1144 !(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) &&
1145 test_sta_flag(sta, WLAN_STA_AUTH)) {
1146 ret = sta_info_move_state(sta, IEEE80211_STA_NONE);
1147 if (ret)
1148 return ret;
1149 }
1150
1151 return 0;
1152}
1153
1070static int sta_apply_parameters(struct ieee80211_local *local, 1154static int sta_apply_parameters(struct ieee80211_local *local,
1071 struct sta_info *sta, 1155 struct sta_info *sta,
1072 struct station_parameters *params) 1156 struct station_parameters *params)
@@ -1084,52 +1168,20 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1084 mask = params->sta_flags_mask; 1168 mask = params->sta_flags_mask;
1085 set = params->sta_flags_set; 1169 set = params->sta_flags_set;
1086 1170
1087 /* 1171 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1088 * In mesh mode, we can clear AUTHENTICATED flag but must 1172 /*
1089 * also make ASSOCIATED follow appropriately for the driver 1173 * In mesh mode, ASSOCIATED isn't part of the nl80211
1090 * API. See also below, after AUTHORIZED changes. 1174 * API but must follow AUTHENTICATED for driver state.
1091 */ 1175 */
1092 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) { 1176 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED))
1093 /* cfg80211 should not allow this in non-mesh modes */ 1177 mask |= BIT(NL80211_STA_FLAG_ASSOCIATED);
1094 if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif))) 1178 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
1095 return -EINVAL; 1179 set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
1096
1097 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
1098 !test_sta_flag(sta, WLAN_STA_AUTH)) {
1099 ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
1100 if (ret)
1101 return ret;
1102 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
1103 if (ret)
1104 return ret;
1105 }
1106 }
1107
1108 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
1109 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
1110 ret = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
1111 else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
1112 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
1113 if (ret)
1114 return ret;
1115 }
1116
1117 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
1118 /* cfg80211 should not allow this in non-mesh modes */
1119 if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif)))
1120 return -EINVAL;
1121
1122 if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) &&
1123 test_sta_flag(sta, WLAN_STA_AUTH)) {
1124 ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
1125 if (ret)
1126 return ret;
1127 ret = sta_info_move_state(sta, IEEE80211_STA_NONE);
1128 if (ret)
1129 return ret;
1130 }
1131 } 1180 }
1132 1181
1182 ret = sta_apply_auth_flags(local, sta, mask, set);
1183 if (ret)
1184 return ret;
1133 1185
1134 if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { 1186 if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
1135 if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) 1187 if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
@@ -1175,10 +1227,11 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1175 sta->sta.aid = params->aid; 1227 sta->sta.aid = params->aid;
1176 1228
1177 /* 1229 /*
1178 * FIXME: updating the following information is racy when this 1230 * Some of the following updates would be racy if called on an
1179 * function is called from ieee80211_change_station(). 1231 * existing station, via ieee80211_change_station(). However,
1180 * However, all this information should be static so 1232 * all such changes are rejected by cfg80211 except for updates
1181 * maybe we should just reject attemps to change it. 1233 * changing the supported rates on an existing but not yet used
1234 * TDLS peer.
1182 */ 1235 */
1183 1236
1184 if (params->listen_interval >= 0) 1237 if (params->listen_interval >= 0)
@@ -1199,36 +1252,62 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1199 1252
1200 if (params->ht_capa) 1253 if (params->ht_capa)
1201 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 1254 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
1202 params->ht_capa, 1255 params->ht_capa, sta);
1203 &sta->sta.ht_cap);
1204 1256
1205 if (params->vht_capa) 1257 if (params->vht_capa)
1206 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, 1258 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
1207 params->vht_capa, 1259 params->vht_capa, sta);
1208 &sta->sta.vht_cap);
1209 1260
1210 if (ieee80211_vif_is_mesh(&sdata->vif)) { 1261 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1211#ifdef CONFIG_MAC80211_MESH 1262#ifdef CONFIG_MAC80211_MESH
1212 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) 1263 u32 changed = 0;
1264 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) {
1213 switch (params->plink_state) { 1265 switch (params->plink_state) {
1214 case NL80211_PLINK_LISTEN:
1215 case NL80211_PLINK_ESTAB: 1266 case NL80211_PLINK_ESTAB:
1267 if (sta->plink_state != NL80211_PLINK_ESTAB)
1268 changed = mesh_plink_inc_estab_count(
1269 sdata);
1270 sta->plink_state = params->plink_state;
1271
1272 ieee80211_mps_sta_status_update(sta);
1273 changed |= ieee80211_mps_set_sta_local_pm(sta,
1274 sdata->u.mesh.mshcfg.power_mode);
1275 break;
1276 case NL80211_PLINK_LISTEN:
1216 case NL80211_PLINK_BLOCKED: 1277 case NL80211_PLINK_BLOCKED:
1278 case NL80211_PLINK_OPN_SNT:
1279 case NL80211_PLINK_OPN_RCVD:
1280 case NL80211_PLINK_CNF_RCVD:
1281 case NL80211_PLINK_HOLDING:
1282 if (sta->plink_state == NL80211_PLINK_ESTAB)
1283 changed = mesh_plink_dec_estab_count(
1284 sdata);
1217 sta->plink_state = params->plink_state; 1285 sta->plink_state = params->plink_state;
1286
1287 ieee80211_mps_sta_status_update(sta);
1288 changed |=
1289 ieee80211_mps_local_status_update(sdata);
1218 break; 1290 break;
1219 default: 1291 default:
1220 /* nothing */ 1292 /* nothing */
1221 break; 1293 break;
1222 } 1294 }
1223 else 1295 } else {
1224 switch (params->plink_action) { 1296 switch (params->plink_action) {
1225 case PLINK_ACTION_OPEN: 1297 case PLINK_ACTION_OPEN:
1226 mesh_plink_open(sta); 1298 changed |= mesh_plink_open(sta);
1227 break; 1299 break;
1228 case PLINK_ACTION_BLOCK: 1300 case PLINK_ACTION_BLOCK:
1229 mesh_plink_block(sta); 1301 changed |= mesh_plink_block(sta);
1230 break; 1302 break;
1231 } 1303 }
1304 }
1305
1306 if (params->local_pm)
1307 changed |=
1308 ieee80211_mps_set_sta_local_pm(sta,
1309 params->local_pm);
1310 ieee80211_bss_info_change_notify(sdata, changed);
1232#endif 1311#endif
1233 } 1312 }
1234 1313
@@ -1263,6 +1342,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1263 if (!sta) 1342 if (!sta)
1264 return -ENOMEM; 1343 return -ENOMEM;
1265 1344
1345 /*
1346 * defaults -- if userspace wants something else we'll
1347 * change it accordingly in sta_apply_parameters()
1348 */
1266 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 1349 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
1267 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 1350 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
1268 1351
@@ -1299,7 +1382,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1299static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, 1382static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1300 u8 *mac) 1383 u8 *mac)
1301{ 1384{
1302 struct ieee80211_local *local = wiphy_priv(wiphy);
1303 struct ieee80211_sub_if_data *sdata; 1385 struct ieee80211_sub_if_data *sdata;
1304 1386
1305 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1387 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1307,7 +1389,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1307 if (mac) 1389 if (mac)
1308 return sta_info_destroy_addr_bss(sdata, mac); 1390 return sta_info_destroy_addr_bss(sdata, mac);
1309 1391
1310 sta_info_flush(local, sdata); 1392 sta_info_flush(sdata);
1311 return 0; 1393 return 0;
1312} 1394}
1313 1395
@@ -1330,9 +1412,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1330 return -ENOENT; 1412 return -ENOENT;
1331 } 1413 }
1332 1414
1333 /* in station mode, supported rates are only valid with TDLS */ 1415 /* in station mode, some updates are only valid with TDLS */
1334 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1416 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1335 params->supported_rates && 1417 (params->supported_rates || params->ht_capa || params->vht_capa ||
1418 params->sta_modify_mask ||
1419 (params->sta_flags_mask & BIT(NL80211_STA_FLAG_WME))) &&
1336 !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { 1420 !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
1337 mutex_unlock(&local->sta_mtx); 1421 mutex_unlock(&local->sta_mtx);
1338 return -EINVAL; 1422 return -EINVAL;
@@ -1416,13 +1500,13 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
1416 return -ENOENT; 1500 return -ENOENT;
1417 } 1501 }
1418 1502
1419 err = mesh_path_add(dst, sdata); 1503 err = mesh_path_add(sdata, dst);
1420 if (err) { 1504 if (err) {
1421 rcu_read_unlock(); 1505 rcu_read_unlock();
1422 return err; 1506 return err;
1423 } 1507 }
1424 1508
1425 mpath = mesh_path_lookup(dst, sdata); 1509 mpath = mesh_path_lookup(sdata, dst);
1426 if (!mpath) { 1510 if (!mpath) {
1427 rcu_read_unlock(); 1511 rcu_read_unlock();
1428 return -ENXIO; 1512 return -ENXIO;
@@ -1434,12 +1518,12 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
1434} 1518}
1435 1519
1436static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, 1520static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
1437 u8 *dst) 1521 u8 *dst)
1438{ 1522{
1439 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1523 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1440 1524
1441 if (dst) 1525 if (dst)
1442 return mesh_path_del(dst, sdata); 1526 return mesh_path_del(sdata, dst);
1443 1527
1444 mesh_path_flush_by_iface(sdata); 1528 mesh_path_flush_by_iface(sdata);
1445 return 0; 1529 return 0;
@@ -1463,7 +1547,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
1463 return -ENOENT; 1547 return -ENOENT;
1464 } 1548 }
1465 1549
1466 mpath = mesh_path_lookup(dst, sdata); 1550 mpath = mesh_path_lookup(sdata, dst);
1467 if (!mpath) { 1551 if (!mpath) {
1468 rcu_read_unlock(); 1552 rcu_read_unlock();
1469 return -ENOENT; 1553 return -ENOENT;
@@ -1527,7 +1611,7 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
1527 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1611 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1528 1612
1529 rcu_read_lock(); 1613 rcu_read_lock();
1530 mpath = mesh_path_lookup(dst, sdata); 1614 mpath = mesh_path_lookup(sdata, dst);
1531 if (!mpath) { 1615 if (!mpath) {
1532 rcu_read_unlock(); 1616 rcu_read_unlock();
1533 return -ENOENT; 1617 return -ENOENT;
@@ -1548,7 +1632,7 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
1548 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1632 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1549 1633
1550 rcu_read_lock(); 1634 rcu_read_lock();
1551 mpath = mesh_path_lookup_by_idx(idx, sdata); 1635 mpath = mesh_path_lookup_by_idx(sdata, idx);
1552 if (!mpath) { 1636 if (!mpath) {
1553 rcu_read_unlock(); 1637 rcu_read_unlock();
1554 return -ENOENT; 1638 return -ENOENT;
@@ -1613,6 +1697,9 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
1613 memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, 1697 memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate,
1614 sizeof(setup->mcast_rate)); 1698 sizeof(setup->mcast_rate));
1615 1699
1700 sdata->vif.bss_conf.beacon_int = setup->beacon_interval;
1701 sdata->vif.bss_conf.dtim_period = setup->dtim_period;
1702
1616 return 0; 1703 return 0;
1617} 1704}
1618 1705
@@ -1711,6 +1798,14 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1711 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask)) 1798 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask))
1712 conf->dot11MeshHWMPconfirmationInterval = 1799 conf->dot11MeshHWMPconfirmationInterval =
1713 nconf->dot11MeshHWMPconfirmationInterval; 1800 nconf->dot11MeshHWMPconfirmationInterval;
1801 if (_chg_mesh_attr(NL80211_MESHCONF_POWER_MODE, mask)) {
1802 conf->power_mode = nconf->power_mode;
1803 ieee80211_mps_local_status_update(sdata);
1804 }
1805 if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask))
1806 conf->dot11MeshAwakeWindowDuration =
1807 nconf->dot11MeshAwakeWindowDuration;
1808 ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1714 return 0; 1809 return 0;
1715} 1810}
1716 1811
@@ -1736,9 +1831,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
1736 if (err) 1831 if (err)
1737 return err; 1832 return err;
1738 1833
1739 ieee80211_start_mesh(sdata); 1834 return ieee80211_start_mesh(sdata);
1740
1741 return 0;
1742} 1835}
1743 1836
1744static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) 1837static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
@@ -1992,7 +2085,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
1992{ 2085{
1993 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2086 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1994 2087
1995 memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate)); 2088 memcpy(sdata->vif.bss_conf.mcast_rate, rate,
2089 sizeof(int) * IEEE80211_NUM_BANDS);
1996 2090
1997 return 0; 2091 return 0;
1998} 2092}
@@ -2195,7 +2289,8 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2195 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2289 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2196 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2290 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2197 2291
2198 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2292 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2293 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
2199 return -EOPNOTSUPP; 2294 return -EOPNOTSUPP;
2200 2295
2201 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 2296 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
@@ -2301,7 +2396,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2301 INIT_LIST_HEAD(&roc->dependents); 2396 INIT_LIST_HEAD(&roc->dependents);
2302 2397
2303 /* if there's one pending or we're scanning, queue this one */ 2398 /* if there's one pending or we're scanning, queue this one */
2304 if (!list_empty(&local->roc_list) || local->scanning) 2399 if (!list_empty(&local->roc_list) ||
2400 local->scanning || local->radar_detect_enabled)
2305 goto out_check_combine; 2401 goto out_check_combine;
2306 2402
2307 /* if not HW assist, just queue & schedule work */ 2403 /* if not HW assist, just queue & schedule work */
@@ -2551,6 +2647,37 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
2551 return ieee80211_cancel_roc(local, cookie, false); 2647 return ieee80211_cancel_roc(local, cookie, false);
2552} 2648}
2553 2649
2650static int ieee80211_start_radar_detection(struct wiphy *wiphy,
2651 struct net_device *dev,
2652 struct cfg80211_chan_def *chandef)
2653{
2654 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2655 struct ieee80211_local *local = sdata->local;
2656 unsigned long timeout;
2657 int err;
2658
2659 if (!list_empty(&local->roc_list) || local->scanning)
2660 return -EBUSY;
2661
2662 /* whatever, but channel contexts should not complain about that one */
2663 sdata->smps_mode = IEEE80211_SMPS_OFF;
2664 sdata->needed_rx_chains = local->rx_chains;
2665 sdata->radar_required = true;
2666
2667 mutex_lock(&local->iflist_mtx);
2668 err = ieee80211_vif_use_channel(sdata, chandef,
2669 IEEE80211_CHANCTX_SHARED);
2670 mutex_unlock(&local->iflist_mtx);
2671 if (err)
2672 return err;
2673
2674 timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
2675 ieee80211_queue_delayed_work(&sdata->local->hw,
2676 &sdata->dfs_cac_timer_work, timeout);
2677
2678 return 0;
2679}
2680
2554static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 2681static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2555 struct ieee80211_channel *chan, bool offchan, 2682 struct ieee80211_channel *chan, bool offchan,
2556 unsigned int wait, const u8 *buf, size_t len, 2683 unsigned int wait, const u8 *buf, size_t len,
@@ -2655,7 +2782,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2655 goto out_unlock; 2782 goto out_unlock;
2656 } 2783 }
2657 2784
2658 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; 2785 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
2786 IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
2659 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) 2787 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
2660 IEEE80211_SKB_CB(skb)->hw_queue = 2788 IEEE80211_SKB_CB(skb)->hw_queue =
2661 local->hw.offchannel_tx_hw_queue; 2789 local->hw.offchannel_tx_hw_queue;
@@ -3255,4 +3383,5 @@ struct cfg80211_ops mac80211_config_ops = {
3255 .get_et_stats = ieee80211_get_et_stats, 3383 .get_et_stats = ieee80211_get_et_stats,
3256 .get_et_strings = ieee80211_get_et_strings, 3384 .get_et_strings = ieee80211_get_et_strings,
3257 .get_channel = ieee80211_cfg_get_channel, 3385 .get_channel = ieee80211_cfg_get_channel,
3386 .start_radar_detection = ieee80211_start_radar_detection,
3258}; 3387};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 53f03120db55..78c0d90dd641 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -4,11 +4,12 @@
4 4
5#include <linux/nl80211.h> 5#include <linux/nl80211.h>
6#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/rtnetlink.h>
7#include <net/cfg80211.h> 8#include <net/cfg80211.h>
8#include "ieee80211_i.h" 9#include "ieee80211_i.h"
9#include "driver-ops.h" 10#include "driver-ops.h"
10 11
11static void ieee80211_change_chandef(struct ieee80211_local *local, 12static void ieee80211_change_chanctx(struct ieee80211_local *local,
12 struct ieee80211_chanctx *ctx, 13 struct ieee80211_chanctx *ctx,
13 const struct cfg80211_chan_def *chandef) 14 const struct cfg80211_chan_def *chandef)
14{ 15{
@@ -48,7 +49,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
48 if (!compat) 49 if (!compat)
49 continue; 50 continue;
50 51
51 ieee80211_change_chandef(local, ctx, compat); 52 ieee80211_change_chanctx(local, ctx, compat);
52 53
53 return ctx; 54 return ctx;
54 } 55 }
@@ -90,6 +91,10 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
90 91
91 list_add_rcu(&ctx->list, &local->chanctx_list); 92 list_add_rcu(&ctx->list, &local->chanctx_list);
92 93
94 mutex_lock(&local->mtx);
95 ieee80211_recalc_idle(local);
96 mutex_unlock(&local->mtx);
97
93 return ctx; 98 return ctx;
94} 99}
95 100
@@ -109,6 +114,10 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
109 114
110 list_del_rcu(&ctx->list); 115 list_del_rcu(&ctx->list);
111 kfree_rcu(ctx, rcu_head); 116 kfree_rcu(ctx, rcu_head);
117
118 mutex_lock(&local->mtx);
119 ieee80211_recalc_idle(local);
120 mutex_unlock(&local->mtx);
112} 121}
113 122
114static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 123static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
@@ -127,6 +136,11 @@ static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
127 ctx->refcount++; 136 ctx->refcount++;
128 137
129 ieee80211_recalc_txpower(sdata); 138 ieee80211_recalc_txpower(sdata);
139 sdata->vif.bss_conf.idle = false;
140
141 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
142 sdata->vif.type != NL80211_IFTYPE_MONITOR)
143 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
130 144
131 return 0; 145 return 0;
132} 146}
@@ -161,7 +175,7 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
161 if (WARN_ON_ONCE(!compat)) 175 if (WARN_ON_ONCE(!compat))
162 return; 176 return;
163 177
164 ieee80211_change_chandef(local, ctx, compat); 178 ieee80211_change_chanctx(local, ctx, compat);
165} 179}
166 180
167static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 181static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
@@ -174,11 +188,18 @@ static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
174 ctx->refcount--; 188 ctx->refcount--;
175 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL); 189 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
176 190
191 sdata->vif.bss_conf.idle = true;
192
193 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
194 sdata->vif.type != NL80211_IFTYPE_MONITOR)
195 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
196
177 drv_unassign_vif_chanctx(local, sdata, ctx); 197 drv_unassign_vif_chanctx(local, sdata, ctx);
178 198
179 if (ctx->refcount > 0) { 199 if (ctx->refcount > 0) {
180 ieee80211_recalc_chanctx_chantype(sdata->local, ctx); 200 ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
181 ieee80211_recalc_smps_chanctx(local, ctx); 201 ieee80211_recalc_smps_chanctx(local, ctx);
202 ieee80211_recalc_radar_chanctx(local, ctx);
182 } 203 }
183} 204}
184 205
@@ -202,6 +223,37 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
202 ieee80211_free_chanctx(local, ctx); 223 ieee80211_free_chanctx(local, ctx);
203} 224}
204 225
226void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
227 struct ieee80211_chanctx *chanctx)
228{
229 struct ieee80211_sub_if_data *sdata;
230 bool radar_enabled = false;
231
232 lockdep_assert_held(&local->chanctx_mtx);
233
234 rcu_read_lock();
235 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
236 if (sdata->radar_required) {
237 radar_enabled = true;
238 break;
239 }
240 }
241 rcu_read_unlock();
242
243 if (radar_enabled == chanctx->conf.radar_enabled)
244 return;
245
246 chanctx->conf.radar_enabled = radar_enabled;
247 local->radar_detect_enabled = chanctx->conf.radar_enabled;
248
249 if (!local->use_chanctx) {
250 local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
251 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
252 }
253
254 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
255}
256
205void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, 257void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
206 struct ieee80211_chanctx *chanctx) 258 struct ieee80211_chanctx *chanctx)
207{ 259{
@@ -317,6 +369,56 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
317 } 369 }
318 370
319 ieee80211_recalc_smps_chanctx(local, ctx); 371 ieee80211_recalc_smps_chanctx(local, ctx);
372 ieee80211_recalc_radar_chanctx(local, ctx);
373 out:
374 mutex_unlock(&local->chanctx_mtx);
375 return ret;
376}
377
378int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
379 const struct cfg80211_chan_def *chandef,
380 u32 *changed)
381{
382 struct ieee80211_local *local = sdata->local;
383 struct ieee80211_chanctx_conf *conf;
384 struct ieee80211_chanctx *ctx;
385 int ret;
386
387 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
388 IEEE80211_CHAN_DISABLED))
389 return -EINVAL;
390
391 mutex_lock(&local->chanctx_mtx);
392 if (cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) {
393 ret = 0;
394 goto out;
395 }
396
397 if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
398 sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
399 ret = -EINVAL;
400 goto out;
401 }
402
403 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
404 lockdep_is_held(&local->chanctx_mtx));
405 if (!conf) {
406 ret = -EINVAL;
407 goto out;
408 }
409
410 ctx = container_of(conf, struct ieee80211_chanctx, conf);
411 if (!cfg80211_chandef_compatible(&conf->def, chandef)) {
412 ret = -EINVAL;
413 goto out;
414 }
415
416 sdata->vif.bss_conf.chandef = *chandef;
417
418 ieee80211_recalc_chanctx_chantype(local, ctx);
419
420 *changed |= BSS_CHANGED_BANDWIDTH;
421 ret = 0;
320 out: 422 out:
321 mutex_unlock(&local->chanctx_mtx); 423 mutex_unlock(&local->chanctx_mtx);
322 return ret; 424 return ret;
@@ -331,6 +433,59 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
331 mutex_unlock(&sdata->local->chanctx_mtx); 433 mutex_unlock(&sdata->local->chanctx_mtx);
332} 434}
333 435
436void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
437{
438 struct ieee80211_local *local = sdata->local;
439 struct ieee80211_sub_if_data *ap;
440 struct ieee80211_chanctx_conf *conf;
441
442 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
443 return;
444
445 ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
446
447 mutex_lock(&local->chanctx_mtx);
448
449 conf = rcu_dereference_protected(ap->vif.chanctx_conf,
450 lockdep_is_held(&local->chanctx_mtx));
451 rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
452 mutex_unlock(&local->chanctx_mtx);
453}
454
455void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
456 bool clear)
457{
458 struct ieee80211_local *local = sdata->local;
459 struct ieee80211_sub_if_data *vlan;
460 struct ieee80211_chanctx_conf *conf;
461
462 ASSERT_RTNL();
463
464 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
465 return;
466
467 mutex_lock(&local->chanctx_mtx);
468
469 /*
470 * Check that conf exists, even when clearing this function
471 * must be called with the AP's channel context still there
472 * as it would otherwise cause VLANs to have an invalid
473 * channel context pointer for a while, possibly pointing
474 * to a channel context that has already been freed.
475 */
476 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
477 lockdep_is_held(&local->chanctx_mtx));
478 WARN_ON(!conf);
479
480 if (clear)
481 conf = NULL;
482
483 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
484 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
485
486 mutex_unlock(&local->chanctx_mtx);
487}
488
334void ieee80211_iter_chan_contexts_atomic( 489void ieee80211_iter_chan_contexts_atomic(
335 struct ieee80211_hw *hw, 490 struct ieee80211_hw *hw,
336 void (*iter)(struct ieee80211_hw *hw, 491 void (*iter)(struct ieee80211_hw *hw,
@@ -343,7 +498,8 @@ void ieee80211_iter_chan_contexts_atomic(
343 498
344 rcu_read_lock(); 499 rcu_read_lock();
345 list_for_each_entry_rcu(ctx, &local->chanctx_list, list) 500 list_for_each_entry_rcu(ctx, &local->chanctx_list, list)
346 iter(hw, &ctx->conf, iter_data); 501 if (ctx->driver_present)
502 iter(hw, &ctx->conf, iter_data);
347 rcu_read_unlock(); 503 rcu_read_unlock();
348} 504}
349EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic); 505EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic);
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index 8f383a576016..4ccc5ed6237d 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -44,6 +44,12 @@
44#define MAC80211_MESH_SYNC_DEBUG 0 44#define MAC80211_MESH_SYNC_DEBUG 0
45#endif 45#endif
46 46
47#ifdef CONFIG_MAC80211_MESH_PS_DEBUG
48#define MAC80211_MESH_PS_DEBUG 1
49#else
50#define MAC80211_MESH_PS_DEBUG 0
51#endif
52
47#ifdef CONFIG_MAC80211_TDLS_DEBUG 53#ifdef CONFIG_MAC80211_TDLS_DEBUG
48#define MAC80211_TDLS_DEBUG 1 54#define MAC80211_TDLS_DEBUG 1
49#else 55#else
@@ -151,6 +157,10 @@ do { \
151 _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \ 157 _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \
152 sdata, fmt, ##__VA_ARGS__) 158 sdata, fmt, ##__VA_ARGS__)
153 159
160#define mps_dbg(sdata, fmt, ...) \
161 _sdata_dbg(MAC80211_MESH_PS_DEBUG, \
162 sdata, fmt, ##__VA_ARGS__)
163
154#define tdls_dbg(sdata, fmt, ...) \ 164#define tdls_dbg(sdata, fmt, ...) \
155 _sdata_dbg(MAC80211_TDLS_DEBUG, \ 165 _sdata_dbg(MAC80211_TDLS_DEBUG, \
156 sdata, fmt, ##__VA_ARGS__) 166 sdata, fmt, ##__VA_ARGS__)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 466f4b45dd94..b0e32d628114 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -121,8 +121,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
121 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n"); 121 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
122 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 122 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
123 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n"); 123 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
124 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) 124 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
125 sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_PERIOD\n"); 125 sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_BEFORE_ASSOC\n");
126 if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT) 126 if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
127 sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n"); 127 sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
128 if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) 128 if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
@@ -151,8 +151,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
151 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n"); 151 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
152 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW) 152 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
153 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n"); 153 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
154 if (local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)
155 sf += snprintf(buf + sf, mxln - sf, "SCAN_WHILE_IDLE\n");
156 154
157 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 155 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
158 kfree(buf); 156 kfree(buf);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index cbde5cc49a40..059bbb82e84f 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -515,6 +515,9 @@ IEEE80211_IF_FILE(dot11MeshHWMProotInterval,
515 u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC); 515 u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC);
516IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval, 516IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
517 u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC); 517 u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC);
518IEEE80211_IF_FILE(power_mode, u.mesh.mshcfg.power_mode, DEC);
519IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration,
520 u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC);
518#endif 521#endif
519 522
520#define DEBUGFS_ADD_MODE(name, mode) \ 523#define DEBUGFS_ADD_MODE(name, mode) \
@@ -620,6 +623,8 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
620 MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout); 623 MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout);
621 MESHPARAMS_ADD(dot11MeshHWMProotInterval); 624 MESHPARAMS_ADD(dot11MeshHWMProotInterval);
622 MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval); 625 MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
626 MESHPARAMS_ADD(power_mode);
627 MESHPARAMS_ADD(dot11MeshAwakeWindowDuration);
623#undef MESHPARAMS_ADD 628#undef MESHPARAMS_ADD
624} 629}
625#endif 630#endif
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 6fb1168b9f16..c7591f73dbc3 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -65,7 +65,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
65 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" 65 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
66 66
67 int res = scnprintf(buf, sizeof(buf), 67 int res = scnprintf(buf, sizeof(buf),
68 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 68 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
69 TEST(AUTH), TEST(ASSOC), TEST(PS_STA), 69 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
70 TEST(PS_DRIVER), TEST(AUTHORIZED), 70 TEST(PS_DRIVER), TEST(AUTHORIZED),
71 TEST(SHORT_PREAMBLE), 71 TEST(SHORT_PREAMBLE),
@@ -74,7 +74,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
74 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), 74 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
75 TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), 75 TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
76 TEST(INSERTED), TEST(RATE_CONTROL), 76 TEST(INSERTED), TEST(RATE_CONTROL),
77 TEST(TOFFSET_KNOWN)); 77 TEST(TOFFSET_KNOWN), TEST(MPSP_OWNER),
78 TEST(MPSP_RECIPIENT));
78#undef TEST 79#undef TEST
79 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 80 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
80} 81}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 698dc7e6f309..ee56d0779d8b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -207,6 +207,17 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
207{ 207{
208 might_sleep(); 208 might_sleep();
209 209
210 if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON |
211 BSS_CHANGED_BEACON_ENABLED) &&
212 sdata->vif.type != NL80211_IFTYPE_AP &&
213 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
214 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
215 return;
216
217 if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
218 sdata->vif.type == NL80211_IFTYPE_MONITOR))
219 return;
220
210 check_sdata_in_driver(sdata); 221 check_sdata_in_driver(sdata);
211 222
212 trace_drv_bss_info_changed(local, sdata, info, changed); 223 trace_drv_bss_info_changed(local, sdata, info, changed);
@@ -520,6 +531,43 @@ static inline void drv_sta_remove_debugfs(struct ieee80211_local *local,
520 local->ops->sta_remove_debugfs(&local->hw, &sdata->vif, 531 local->ops->sta_remove_debugfs(&local->hw, &sdata->vif,
521 sta, dir); 532 sta, dir);
522} 533}
534
535static inline
536void drv_add_interface_debugfs(struct ieee80211_local *local,
537 struct ieee80211_sub_if_data *sdata)
538{
539 might_sleep();
540
541 check_sdata_in_driver(sdata);
542
543 if (!local->ops->add_interface_debugfs)
544 return;
545
546 local->ops->add_interface_debugfs(&local->hw, &sdata->vif,
547 sdata->debugfs.dir);
548}
549
550static inline
551void drv_remove_interface_debugfs(struct ieee80211_local *local,
552 struct ieee80211_sub_if_data *sdata)
553{
554 might_sleep();
555
556 check_sdata_in_driver(sdata);
557
558 if (!local->ops->remove_interface_debugfs)
559 return;
560
561 local->ops->remove_interface_debugfs(&local->hw, &sdata->vif,
562 sdata->debugfs.dir);
563}
564#else
565static inline
566void drv_add_interface_debugfs(struct ieee80211_local *local,
567 struct ieee80211_sub_if_data *sdata) {}
568static inline
569void drv_remove_interface_debugfs(struct ieee80211_local *local,
570 struct ieee80211_sub_if_data *sdata) {}
523#endif 571#endif
524 572
525static inline __must_check 573static inline __must_check
@@ -561,7 +609,8 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
561 check_sdata_in_driver(sdata); 609 check_sdata_in_driver(sdata);
562 610
563 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED && 611 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
564 sdata->vif.type != NL80211_IFTYPE_ADHOC); 612 (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
613 sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
565 614
566 trace_drv_sta_rc_update(local, sdata, sta, changed); 615 trace_drv_sta_rc_update(local, sdata, sta, changed);
567 if (local->ops->sta_rc_update) 616 if (local->ops->sta_rc_update)
@@ -837,11 +886,12 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
837} 886}
838 887
839static inline void drv_rssi_callback(struct ieee80211_local *local, 888static inline void drv_rssi_callback(struct ieee80211_local *local,
889 struct ieee80211_sub_if_data *sdata,
840 const enum ieee80211_rssi_event event) 890 const enum ieee80211_rssi_event event)
841{ 891{
842 trace_drv_rssi_callback(local, event); 892 trace_drv_rssi_callback(local, sdata, event);
843 if (local->ops->rssi_callback) 893 if (local->ops->rssi_callback)
844 local->ops->rssi_callback(&local->hw, event); 894 local->ops->rssi_callback(&local->hw, &sdata->vif, event);
845 trace_drv_return_void(local); 895 trace_drv_return_void(local);
846} 896}
847 897
@@ -913,6 +963,8 @@ static inline int drv_add_chanctx(struct ieee80211_local *local,
913 if (local->ops->add_chanctx) 963 if (local->ops->add_chanctx)
914 ret = local->ops->add_chanctx(&local->hw, &ctx->conf); 964 ret = local->ops->add_chanctx(&local->hw, &ctx->conf);
915 trace_drv_return_int(local, ret); 965 trace_drv_return_int(local, ret);
966 if (!ret)
967 ctx->driver_present = true;
916 968
917 return ret; 969 return ret;
918} 970}
@@ -924,6 +976,7 @@ static inline void drv_remove_chanctx(struct ieee80211_local *local,
924 if (local->ops->remove_chanctx) 976 if (local->ops->remove_chanctx)
925 local->ops->remove_chanctx(&local->hw, &ctx->conf); 977 local->ops->remove_chanctx(&local->hw, &ctx->conf);
926 trace_drv_return_void(local); 978 trace_drv_return_void(local);
979 ctx->driver_present = false;
927} 980}
928 981
929static inline void drv_change_chanctx(struct ieee80211_local *local, 982static inline void drv_change_chanctx(struct ieee80211_local *local,
@@ -931,8 +984,10 @@ static inline void drv_change_chanctx(struct ieee80211_local *local,
931 u32 changed) 984 u32 changed)
932{ 985{
933 trace_drv_change_chanctx(local, ctx, changed); 986 trace_drv_change_chanctx(local, ctx, changed);
934 if (local->ops->change_chanctx) 987 if (local->ops->change_chanctx) {
988 WARN_ON_ONCE(!ctx->driver_present);
935 local->ops->change_chanctx(&local->hw, &ctx->conf, changed); 989 local->ops->change_chanctx(&local->hw, &ctx->conf, changed);
990 }
936 trace_drv_return_void(local); 991 trace_drv_return_void(local);
937} 992}
938 993
@@ -945,10 +1000,12 @@ static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
945 check_sdata_in_driver(sdata); 1000 check_sdata_in_driver(sdata);
946 1001
947 trace_drv_assign_vif_chanctx(local, sdata, ctx); 1002 trace_drv_assign_vif_chanctx(local, sdata, ctx);
948 if (local->ops->assign_vif_chanctx) 1003 if (local->ops->assign_vif_chanctx) {
1004 WARN_ON_ONCE(!ctx->driver_present);
949 ret = local->ops->assign_vif_chanctx(&local->hw, 1005 ret = local->ops->assign_vif_chanctx(&local->hw,
950 &sdata->vif, 1006 &sdata->vif,
951 &ctx->conf); 1007 &ctx->conf);
1008 }
952 trace_drv_return_int(local, ret); 1009 trace_drv_return_int(local, ret);
953 1010
954 return ret; 1011 return ret;
@@ -961,10 +1018,12 @@ static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
961 check_sdata_in_driver(sdata); 1018 check_sdata_in_driver(sdata);
962 1019
963 trace_drv_unassign_vif_chanctx(local, sdata, ctx); 1020 trace_drv_unassign_vif_chanctx(local, sdata, ctx);
964 if (local->ops->unassign_vif_chanctx) 1021 if (local->ops->unassign_vif_chanctx) {
1022 WARN_ON_ONCE(!ctx->driver_present);
965 local->ops->unassign_vif_chanctx(&local->hw, 1023 local->ops->unassign_vif_chanctx(&local->hw,
966 &sdata->vif, 1024 &sdata->vif,
967 &ctx->conf); 1025 &ctx->conf);
1026 }
968 trace_drv_return_void(local); 1027 trace_drv_return_void(local);
969} 1028}
970 1029
@@ -1003,4 +1062,32 @@ static inline void drv_restart_complete(struct ieee80211_local *local)
1003 trace_drv_return_void(local); 1062 trace_drv_return_void(local);
1004} 1063}
1005 1064
1065static inline void
1066drv_set_default_unicast_key(struct ieee80211_local *local,
1067 struct ieee80211_sub_if_data *sdata,
1068 int key_idx)
1069{
1070 check_sdata_in_driver(sdata);
1071
1072 WARN_ON_ONCE(key_idx < -1 || key_idx > 3);
1073
1074 trace_drv_set_default_unicast_key(local, sdata, key_idx);
1075 if (local->ops->set_default_unicast_key)
1076 local->ops->set_default_unicast_key(&local->hw, &sdata->vif,
1077 key_idx);
1078 trace_drv_return_void(local);
1079}
1080
1081#if IS_ENABLED(CONFIG_IPV6)
1082static inline void drv_ipv6_addr_change(struct ieee80211_local *local,
1083 struct ieee80211_sub_if_data *sdata,
1084 struct inet6_dev *idev)
1085{
1086 trace_drv_ipv6_addr_change(local, sdata);
1087 if (local->ops->ipv6_addr_change)
1088 local->ops->ipv6_addr_change(&local->hw, &sdata->vif, idev);
1089 trace_drv_return_void(local);
1090}
1091#endif
1092
1006#endif /* __MAC80211_DRIVER_OPS */ 1093#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index a71d891794a4..0db25d4bb223 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -37,6 +37,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
37 u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask); 37 u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
38 int i; 38 int i;
39 39
40 if (!ht_cap->ht_supported)
41 return;
42
40 if (sdata->vif.type != NL80211_IFTYPE_STATION) { 43 if (sdata->vif.type != NL80211_IFTYPE_STATION) {
41 /* AP interfaces call this code when adding new stations, 44 /* AP interfaces call this code when adding new stations,
42 * so just silently ignore non station interfaces. 45 * so just silently ignore non station interfaces.
@@ -62,6 +65,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
62 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40); 65 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
63 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40); 66 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
64 67
68 /* Allow user to disable SGI-20 (SGI-40 is handled above) */
69 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_20);
70
65 /* Allow user to disable the max-AMSDU bit. */ 71 /* Allow user to disable the max-AMSDU bit. */
66 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU); 72 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
67 73
@@ -86,22 +92,24 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
86} 92}
87 93
88 94
89void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, 95bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
90 struct ieee80211_supported_band *sband, 96 struct ieee80211_supported_band *sband,
91 struct ieee80211_ht_cap *ht_cap_ie, 97 const struct ieee80211_ht_cap *ht_cap_ie,
92 struct ieee80211_sta_ht_cap *ht_cap) 98 struct sta_info *sta)
93{ 99{
100 struct ieee80211_sta_ht_cap ht_cap;
94 u8 ampdu_info, tx_mcs_set_cap; 101 u8 ampdu_info, tx_mcs_set_cap;
95 int i, max_tx_streams; 102 int i, max_tx_streams;
103 bool changed;
104 enum ieee80211_sta_rx_bandwidth bw;
105 enum ieee80211_smps_mode smps_mode;
96 106
97 BUG_ON(!ht_cap); 107 memset(&ht_cap, 0, sizeof(ht_cap));
98
99 memset(ht_cap, 0, sizeof(*ht_cap));
100 108
101 if (!ht_cap_ie || !sband->ht_cap.ht_supported) 109 if (!ht_cap_ie || !sband->ht_cap.ht_supported)
102 return; 110 goto apply;
103 111
104 ht_cap->ht_supported = true; 112 ht_cap.ht_supported = true;
105 113
106 /* 114 /*
107 * The bits listed in this expression should be 115 * The bits listed in this expression should be
@@ -109,7 +117,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
109 * advertises more then we can't use those thus 117 * advertises more then we can't use those thus
110 * we mask them out. 118 * we mask them out.
111 */ 119 */
112 ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & 120 ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) &
113 (sband->ht_cap.cap | 121 (sband->ht_cap.cap |
114 ~(IEEE80211_HT_CAP_LDPC_CODING | 122 ~(IEEE80211_HT_CAP_LDPC_CODING |
115 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 123 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -117,30 +125,31 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
117 IEEE80211_HT_CAP_SGI_20 | 125 IEEE80211_HT_CAP_SGI_20 |
118 IEEE80211_HT_CAP_SGI_40 | 126 IEEE80211_HT_CAP_SGI_40 |
119 IEEE80211_HT_CAP_DSSSCCK40)); 127 IEEE80211_HT_CAP_DSSSCCK40));
128
120 /* 129 /*
121 * The STBC bits are asymmetric -- if we don't have 130 * The STBC bits are asymmetric -- if we don't have
122 * TX then mask out the peer's RX and vice versa. 131 * TX then mask out the peer's RX and vice versa.
123 */ 132 */
124 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)) 133 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
125 ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC; 134 ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC;
126 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)) 135 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC))
127 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; 136 ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC;
128 137
129 ampdu_info = ht_cap_ie->ampdu_params_info; 138 ampdu_info = ht_cap_ie->ampdu_params_info;
130 ht_cap->ampdu_factor = 139 ht_cap.ampdu_factor =
131 ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR; 140 ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR;
132 ht_cap->ampdu_density = 141 ht_cap.ampdu_density =
133 (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; 142 (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
134 143
135 /* own MCS TX capabilities */ 144 /* own MCS TX capabilities */
136 tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; 145 tx_mcs_set_cap = sband->ht_cap.mcs.tx_params;
137 146
138 /* Copy peer MCS TX capabilities, the driver might need them. */ 147 /* Copy peer MCS TX capabilities, the driver might need them. */
139 ht_cap->mcs.tx_params = ht_cap_ie->mcs.tx_params; 148 ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params;
140 149
141 /* can we TX with MCS rates? */ 150 /* can we TX with MCS rates? */
142 if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) 151 if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED))
143 return; 152 goto apply;
144 153
145 /* Counting from 0, therefore +1 */ 154 /* Counting from 0, therefore +1 */
146 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF) 155 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF)
@@ -158,37 +167,90 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
158 * - remainder are multiple spatial streams using unequal modulation 167 * - remainder are multiple spatial streams using unequal modulation
159 */ 168 */
160 for (i = 0; i < max_tx_streams; i++) 169 for (i = 0; i < max_tx_streams; i++)
161 ht_cap->mcs.rx_mask[i] = 170 ht_cap.mcs.rx_mask[i] =
162 sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; 171 sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i];
163 172
164 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) 173 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION)
165 for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; 174 for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE;
166 i < IEEE80211_HT_MCS_MASK_LEN; i++) 175 i < IEEE80211_HT_MCS_MASK_LEN; i++)
167 ht_cap->mcs.rx_mask[i] = 176 ht_cap.mcs.rx_mask[i] =
168 sband->ht_cap.mcs.rx_mask[i] & 177 sband->ht_cap.mcs.rx_mask[i] &
169 ht_cap_ie->mcs.rx_mask[i]; 178 ht_cap_ie->mcs.rx_mask[i];
170 179
171 /* handle MCS rate 32 too */ 180 /* handle MCS rate 32 too */
172 if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) 181 if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
173 ht_cap->mcs.rx_mask[32/8] |= 1; 182 ht_cap.mcs.rx_mask[32/8] |= 1;
174 183
184 apply:
175 /* 185 /*
176 * If user has specified capability over-rides, take care 186 * If user has specified capability over-rides, take care
177 * of that here. 187 * of that here.
178 */ 188 */
179 ieee80211_apply_htcap_overrides(sdata, ht_cap); 189 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
190
191 changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
192
193 memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
194
195 switch (sdata->vif.bss_conf.chandef.width) {
196 default:
197 WARN_ON_ONCE(1);
198 /* fall through */
199 case NL80211_CHAN_WIDTH_20_NOHT:
200 case NL80211_CHAN_WIDTH_20:
201 bw = IEEE80211_STA_RX_BW_20;
202 break;
203 case NL80211_CHAN_WIDTH_40:
204 case NL80211_CHAN_WIDTH_80:
205 case NL80211_CHAN_WIDTH_80P80:
206 case NL80211_CHAN_WIDTH_160:
207 bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
208 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
209 break;
210 }
211
212 if (bw != sta->sta.bandwidth)
213 changed = true;
214 sta->sta.bandwidth = bw;
215
216 sta->cur_max_bandwidth =
217 ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
218 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
219
220 switch ((ht_cap.cap & IEEE80211_HT_CAP_SM_PS)
221 >> IEEE80211_HT_CAP_SM_PS_SHIFT) {
222 case WLAN_HT_CAP_SM_PS_INVALID:
223 case WLAN_HT_CAP_SM_PS_STATIC:
224 smps_mode = IEEE80211_SMPS_STATIC;
225 break;
226 case WLAN_HT_CAP_SM_PS_DYNAMIC:
227 smps_mode = IEEE80211_SMPS_DYNAMIC;
228 break;
229 case WLAN_HT_CAP_SM_PS_DISABLED:
230 smps_mode = IEEE80211_SMPS_OFF;
231 break;
232 }
233
234 if (smps_mode != sta->sta.smps_mode)
235 changed = true;
236 sta->sta.smps_mode = smps_mode;
237
238 return changed;
180} 239}
181 240
182void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx) 241void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
242 enum ieee80211_agg_stop_reason reason)
183{ 243{
184 int i; 244 int i;
185 245
186 cancel_work_sync(&sta->ampdu_mlme.work); 246 cancel_work_sync(&sta->ampdu_mlme.work);
187 247
188 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 248 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
189 __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR, tx); 249 __ieee80211_stop_tx_ba_session(sta, i, reason);
190 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 250 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
191 WLAN_REASON_QSTA_LEAVE_QBSS, tx); 251 WLAN_REASON_QSTA_LEAVE_QBSS,
252 reason != AGG_STOP_DESTROY_STA &&
253 reason != AGG_STOP_PEER_REQUEST);
192 } 254 }
193} 255}
194 256
@@ -245,8 +307,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
245 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 307 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
246 &tid_tx->state)) 308 &tid_tx->state))
247 ___ieee80211_stop_tx_ba_session(sta, tid, 309 ___ieee80211_stop_tx_ba_session(sta, tid,
248 WLAN_BACK_INITIATOR, 310 AGG_STOP_LOCAL_REQUEST);
249 true);
250 } 311 }
251 mutex_unlock(&sta->ampdu_mlme.mtx); 312 mutex_unlock(&sta->ampdu_mlme.mtx);
252} 313}
@@ -314,8 +375,7 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
314 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0, 375 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0,
315 true); 376 true);
316 else 377 else
317 __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 378 __ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_PEER_REQUEST);
318 true);
319} 379}
320 380
321int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, 381int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
@@ -387,6 +447,9 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
387 if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF)) 447 if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF))
388 smps_mode = IEEE80211_SMPS_AUTOMATIC; 448 smps_mode = IEEE80211_SMPS_AUTOMATIC;
389 449
450 if (sdata->u.mgd.driver_smps_mode == smps_mode)
451 return;
452
390 sdata->u.mgd.driver_smps_mode = smps_mode; 453 sdata->u.mgd.driver_smps_mode = smps_mode;
391 454
392 ieee80211_queue_work(&sdata->local->hw, 455 ieee80211_queue_work(&sdata->local->hw,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 8881fc77fb13..40b71dfcc79d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -67,7 +67,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
67 skb_reserve(skb, sdata->local->hw.extra_tx_headroom); 67 skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
68 68
69 if (!ether_addr_equal(ifibss->bssid, bssid)) 69 if (!ether_addr_equal(ifibss->bssid, bssid))
70 sta_info_flush(sdata->local, sdata); 70 sta_info_flush(sdata);
71 71
72 /* if merging, indicate to driver that we leave the old IBSS */ 72 /* if merging, indicate to driver that we leave the old IBSS */
73 if (sdata->vif.bss_conf.ibss_joined) { 73 if (sdata->vif.bss_conf.ibss_joined) {
@@ -191,6 +191,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
191 191
192 rcu_assign_pointer(ifibss->presp, skb); 192 rcu_assign_pointer(ifibss->presp, skb);
193 193
194 sdata->vif.bss_conf.enable_beacon = true;
194 sdata->vif.bss_conf.beacon_int = beacon_int; 195 sdata->vif.bss_conf.beacon_int = beacon_int;
195 sdata->vif.bss_conf.basic_rates = basic_rates; 196 sdata->vif.bss_conf.basic_rates = basic_rates;
196 bss_change = BSS_CHANGED_BEACON_INT; 197 bss_change = BSS_CHANGED_BEACON_INT;
@@ -227,7 +228,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
227 228
228 bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan, 229 bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
229 mgmt, skb->len, 0, GFP_KERNEL); 230 mgmt, skb->len, 0, GFP_KERNEL);
230 cfg80211_put_bss(bss); 231 cfg80211_put_bss(local->hw.wiphy, bss);
231 netif_carrier_on(sdata->dev); 232 netif_carrier_on(sdata->dev);
232 cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL); 233 cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
233} 234}
@@ -241,6 +242,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
241 u32 basic_rates; 242 u32 basic_rates;
242 int i, j; 243 int i, j;
243 u16 beacon_int = cbss->beacon_interval; 244 u16 beacon_int = cbss->beacon_interval;
245 const struct cfg80211_bss_ies *ies;
246 u64 tsf;
244 247
245 lockdep_assert_held(&sdata->u.ibss.mtx); 248 lockdep_assert_held(&sdata->u.ibss.mtx);
246 249
@@ -264,13 +267,17 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
264 } 267 }
265 } 268 }
266 269
270 rcu_read_lock();
271 ies = rcu_dereference(cbss->ies);
272 tsf = ies->tsf;
273 rcu_read_unlock();
274
267 __ieee80211_sta_join_ibss(sdata, cbss->bssid, 275 __ieee80211_sta_join_ibss(sdata, cbss->bssid,
268 beacon_int, 276 beacon_int,
269 cbss->channel, 277 cbss->channel,
270 basic_rates, 278 basic_rates,
271 cbss->capability, 279 cbss->capability,
272 cbss->tsf, 280 tsf, false);
273 false);
274} 281}
275 282
276static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta, 283static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
@@ -301,7 +308,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
301 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n", 308 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
302 sdata->vif.addr, addr, sdata->u.ibss.bssid); 309 sdata->vif.addr, addr, sdata->u.ibss.bssid);
303 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0, 310 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0,
304 addr, sdata->u.ibss.bssid, NULL, 0, 0); 311 addr, sdata->u.ibss.bssid, NULL, 0, 0, 0);
305 } 312 }
306 return sta; 313 return sta;
307} 314}
@@ -421,15 +428,13 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
421 * has actually implemented this. 428 * has actually implemented this.
422 */ 429 */
423 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0, 430 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0,
424 mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0); 431 mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0);
425} 432}
426 433
427static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 434static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
428 struct ieee80211_mgmt *mgmt, 435 struct ieee80211_mgmt *mgmt, size_t len,
429 size_t len,
430 struct ieee80211_rx_status *rx_status, 436 struct ieee80211_rx_status *rx_status,
431 struct ieee802_11_elems *elems, 437 struct ieee802_11_elems *elems)
432 bool beacon)
433{ 438{
434 struct ieee80211_local *local = sdata->local; 439 struct ieee80211_local *local = sdata->local;
435 int freq; 440 int freq;
@@ -491,33 +496,26 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
491 if (sta && elems->ht_operation && elems->ht_cap_elem && 496 if (sta && elems->ht_operation && elems->ht_cap_elem &&
492 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { 497 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
493 /* we both use HT */ 498 /* we both use HT */
494 struct ieee80211_sta_ht_cap sta_ht_cap_new; 499 struct ieee80211_ht_cap htcap_ie;
495 struct cfg80211_chan_def chandef; 500 struct cfg80211_chan_def chandef;
496 501
497 ieee80211_ht_oper_to_chandef(channel, 502 ieee80211_ht_oper_to_chandef(channel,
498 elems->ht_operation, 503 elems->ht_operation,
499 &chandef); 504 &chandef);
500 505
501 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 506 memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
502 elems->ht_cap_elem,
503 &sta_ht_cap_new);
504 507
505 /* 508 /*
506 * fall back to HT20 if we don't use or use 509 * fall back to HT20 if we don't use or use
507 * the other extension channel 510 * the other extension channel
508 */ 511 */
509 if (chandef.width != NL80211_CHAN_WIDTH_40 || 512 if (cfg80211_get_chandef_type(&chandef) !=
510 cfg80211_get_chandef_type(&chandef) !=
511 sdata->u.ibss.channel_type) 513 sdata->u.ibss.channel_type)
512 sta_ht_cap_new.cap &= 514 htcap_ie.cap_info &=
513 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 515 cpu_to_le16(~IEEE80211_HT_CAP_SUP_WIDTH_20_40);
514 516
515 if (memcmp(&sta->sta.ht_cap, &sta_ht_cap_new, 517 rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(
516 sizeof(sta_ht_cap_new))) { 518 sdata, sband, &htcap_ie, sta);
517 memcpy(&sta->sta.ht_cap, &sta_ht_cap_new,
518 sizeof(sta_ht_cap_new));
519 rates_updated = true;
520 }
521 } 519 }
522 520
523 if (sta && rates_updated) { 521 if (sta && rates_updated) {
@@ -530,14 +528,14 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
530 } 528 }
531 529
532 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, 530 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
533 channel, beacon); 531 channel);
534 if (!bss) 532 if (!bss)
535 return; 533 return;
536 534
537 cbss = container_of((void *)bss, struct cfg80211_bss, priv); 535 cbss = container_of((void *)bss, struct cfg80211_bss, priv);
538 536
539 /* was just updated in ieee80211_bss_info_update */ 537 /* same for beacon and probe response */
540 beacon_timestamp = cbss->tsf; 538 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
541 539
542 /* check if we need to merge IBSS */ 540 /* check if we need to merge IBSS */
543 541
@@ -703,8 +701,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
703 sdata_info(sdata, 701 sdata_info(sdata,
704 "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n"); 702 "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
705 703
706 ieee80211_request_internal_scan(sdata, 704 ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
707 ifibss->ssid, ifibss->ssid_len, NULL); 705 NULL);
708} 706}
709 707
710static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 708static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -802,9 +800,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
802 IEEE80211_SCAN_INTERVAL)) { 800 IEEE80211_SCAN_INTERVAL)) {
803 sdata_info(sdata, "Trigger new scan to find an IBSS to join\n"); 801 sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
804 802
805 ieee80211_request_internal_scan(sdata, 803 ieee80211_request_ibss_scan(sdata, ifibss->ssid,
806 ifibss->ssid, ifibss->ssid_len, 804 ifibss->ssid_len, chan);
807 ifibss->fixed_channel ? ifibss->channel : NULL);
808 } else { 805 } else {
809 int interval = IEEE80211_SCAN_INTERVAL; 806 int interval = IEEE80211_SCAN_INTERVAL;
810 807
@@ -878,14 +875,21 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
878 ieee80211_tx_skb(sdata, skb); 875 ieee80211_tx_skb(sdata, skb);
879} 876}
880 877
881static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, 878static
882 struct ieee80211_mgmt *mgmt, 879void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata,
883 size_t len, 880 struct ieee80211_mgmt *mgmt, size_t len,
884 struct ieee80211_rx_status *rx_status) 881 struct ieee80211_rx_status *rx_status)
885{ 882{
886 size_t baselen; 883 size_t baselen;
887 struct ieee802_11_elems elems; 884 struct ieee802_11_elems elems;
888 885
886 BUILD_BUG_ON(offsetof(typeof(mgmt->u.probe_resp), variable) !=
887 offsetof(typeof(mgmt->u.beacon), variable));
888
889 /*
890 * either beacon or probe_resp but the variable field is at the
891 * same offset
892 */
889 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 893 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
890 if (baselen > len) 894 if (baselen > len)
891 return; 895 return;
@@ -893,25 +897,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
893 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 897 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
894 &elems); 898 &elems);
895 899
896 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 900 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
897}
898
899static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
900 struct ieee80211_mgmt *mgmt,
901 size_t len,
902 struct ieee80211_rx_status *rx_status)
903{
904 size_t baselen;
905 struct ieee802_11_elems elems;
906
907 /* Process beacon from the current BSS */
908 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
909 if (baselen > len)
910 return;
911
912 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
913
914 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
915} 901}
916 902
917void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 903void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -935,12 +921,9 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
935 ieee80211_rx_mgmt_probe_req(sdata, skb); 921 ieee80211_rx_mgmt_probe_req(sdata, skb);
936 break; 922 break;
937 case IEEE80211_STYPE_PROBE_RESP: 923 case IEEE80211_STYPE_PROBE_RESP:
938 ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len,
939 rx_status);
940 break;
941 case IEEE80211_STYPE_BEACON: 924 case IEEE80211_STYPE_BEACON:
942 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, 925 ieee80211_rx_mgmt_probe_beacon(sdata, mgmt, skb->len,
943 rx_status); 926 rx_status);
944 break; 927 break;
945 case IEEE80211_STYPE_AUTH: 928 case IEEE80211_STYPE_AUTH:
946 ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); 929 ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len);
@@ -1118,10 +1101,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1118 1101
1119 mutex_unlock(&sdata->u.ibss.mtx); 1102 mutex_unlock(&sdata->u.ibss.mtx);
1120 1103
1121 mutex_lock(&sdata->local->mtx);
1122 ieee80211_recalc_idle(sdata->local);
1123 mutex_unlock(&sdata->local->mtx);
1124
1125 /* 1104 /*
1126 * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is 1105 * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
1127 * reserved, but an HT STA shall protect HT transmissions as though 1106 * reserved, but an HT STA shall protect HT transmissions as though
@@ -1175,7 +1154,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1175 1154
1176 if (cbss) { 1155 if (cbss) {
1177 cfg80211_unlink_bss(local->hw.wiphy, cbss); 1156 cfg80211_unlink_bss(local->hw.wiphy, cbss);
1178 cfg80211_put_bss(cbss); 1157 cfg80211_put_bss(local->hw.wiphy, cbss);
1179 } 1158 }
1180 } 1159 }
1181 1160
@@ -1183,7 +1162,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1183 memset(ifibss->bssid, 0, ETH_ALEN); 1162 memset(ifibss->bssid, 0, ETH_ALEN);
1184 ifibss->ssid_len = 0; 1163 ifibss->ssid_len = 0;
1185 1164
1186 sta_info_flush(sdata->local, sdata); 1165 sta_info_flush(sdata);
1187 1166
1188 spin_lock_bh(&ifibss->incomplete_lock); 1167 spin_lock_bh(&ifibss->incomplete_lock);
1189 while (!list_empty(&ifibss->incomplete_stations)) { 1168 while (!list_empty(&ifibss->incomplete_stations)) {
@@ -1206,6 +1185,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1206 RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); 1185 RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
1207 sdata->vif.bss_conf.ibss_joined = false; 1186 sdata->vif.bss_conf.ibss_joined = false;
1208 sdata->vif.bss_conf.ibss_creator = false; 1187 sdata->vif.bss_conf.ibss_creator = false;
1188 sdata->vif.bss_conf.enable_beacon = false;
1189 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
1209 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1190 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
1210 BSS_CHANGED_IBSS); 1191 BSS_CHANGED_IBSS);
1211 synchronize_rcu(); 1192 synchronize_rcu();
@@ -1217,9 +1198,5 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1217 1198
1218 mutex_unlock(&sdata->u.ibss.mtx); 1199 mutex_unlock(&sdata->u.ibss.mtx);
1219 1200
1220 mutex_lock(&local->mtx);
1221 ieee80211_recalc_idle(sdata->local);
1222 mutex_unlock(&local->mtx);
1223
1224 return 0; 1201 return 0;
1225} 1202}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 42d0d0267730..388580a1bada 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -86,25 +86,11 @@ struct ieee80211_fragment_entry {
86 86
87 87
88struct ieee80211_bss { 88struct ieee80211_bss {
89 /* don't want to look up all the time */ 89 u32 device_ts_beacon, device_ts_presp;
90 size_t ssid_len;
91 u8 ssid[IEEE80211_MAX_SSID_LEN];
92
93 u32 device_ts;
94
95 u8 dtim_period;
96 90
97 bool wmm_used; 91 bool wmm_used;
98 bool uapsd_supported; 92 bool uapsd_supported;
99 93
100 unsigned long last_probe_resp;
101
102#ifdef CONFIG_MAC80211_MESH
103 u8 *mesh_id;
104 size_t mesh_id_len;
105 u8 *mesh_cfg;
106#endif
107
108#define IEEE80211_MAX_SUPP_RATES 32 94#define IEEE80211_MAX_SUPP_RATES 32
109 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 95 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
110 size_t supp_rates_len; 96 size_t supp_rates_len;
@@ -140,7 +126,6 @@ enum ieee80211_bss_corrupt_data_flags {
140 126
141/** 127/**
142 * enum ieee80211_valid_data_flags - BSS valid data flags 128 * enum ieee80211_valid_data_flags - BSS valid data flags
143 * @IEEE80211_BSS_VALID_DTIM: DTIM data was gathered from non-corrupt IE
144 * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE 129 * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
145 * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE 130 * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
146 * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE 131 * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
@@ -151,37 +136,11 @@ enum ieee80211_bss_corrupt_data_flags {
151 * beacon/probe response. 136 * beacon/probe response.
152 */ 137 */
153enum ieee80211_bss_valid_data_flags { 138enum ieee80211_bss_valid_data_flags {
154 IEEE80211_BSS_VALID_DTIM = BIT(0),
155 IEEE80211_BSS_VALID_WMM = BIT(1), 139 IEEE80211_BSS_VALID_WMM = BIT(1),
156 IEEE80211_BSS_VALID_RATES = BIT(2), 140 IEEE80211_BSS_VALID_RATES = BIT(2),
157 IEEE80211_BSS_VALID_ERP = BIT(3) 141 IEEE80211_BSS_VALID_ERP = BIT(3)
158}; 142};
159 143
160static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
161{
162#ifdef CONFIG_MAC80211_MESH
163 return bss->mesh_cfg;
164#endif
165 return NULL;
166}
167
168static inline u8 *bss_mesh_id(struct ieee80211_bss *bss)
169{
170#ifdef CONFIG_MAC80211_MESH
171 return bss->mesh_id;
172#endif
173 return NULL;
174}
175
176static inline u8 bss_mesh_id_len(struct ieee80211_bss *bss)
177{
178#ifdef CONFIG_MAC80211_MESH
179 return bss->mesh_id_len;
180#endif
181 return 0;
182}
183
184
185typedef unsigned __bitwise__ ieee80211_tx_result; 144typedef unsigned __bitwise__ ieee80211_tx_result;
186#define TX_CONTINUE ((__force ieee80211_tx_result) 0u) 145#define TX_CONTINUE ((__force ieee80211_tx_result) 0u)
187#define TX_DROP ((__force ieee80211_tx_result) 1u) 146#define TX_DROP ((__force ieee80211_tx_result) 1u)
@@ -384,6 +343,7 @@ struct ieee80211_mgd_auth_data {
384 u8 key[WLAN_KEY_LEN_WEP104]; 343 u8 key[WLAN_KEY_LEN_WEP104];
385 u8 key_len, key_idx; 344 u8 key_len, key_idx;
386 bool done; 345 bool done;
346 bool timeout_started;
387 347
388 u16 sae_trans, sae_status; 348 u16 sae_trans, sae_status;
389 size_t data_len; 349 size_t data_len;
@@ -403,12 +363,14 @@ struct ieee80211_mgd_assoc_data {
403 u8 ssid_len; 363 u8 ssid_len;
404 u8 supp_rates_len; 364 u8 supp_rates_len;
405 bool wmm, uapsd; 365 bool wmm, uapsd;
406 bool have_beacon; 366 bool have_beacon, need_beacon;
407 bool sent_assoc;
408 bool synced; 367 bool synced;
368 bool timeout_started;
409 369
410 u8 ap_ht_param; 370 u8 ap_ht_param;
411 371
372 struct ieee80211_vht_cap ap_vht_cap;
373
412 size_t ie_len; 374 size_t ie_len;
413 u8 ie[]; 375 u8 ie[];
414}; 376};
@@ -427,6 +389,7 @@ struct ieee80211_if_managed {
427 unsigned long probe_timeout; 389 unsigned long probe_timeout;
428 int probe_send_count; 390 int probe_send_count;
429 bool nullfunc_failed; 391 bool nullfunc_failed;
392 bool connection_loss;
430 393
431 struct mutex mtx; 394 struct mutex mtx;
432 struct cfg80211_bss *associated; 395 struct cfg80211_bss *associated;
@@ -440,6 +403,7 @@ struct ieee80211_if_managed {
440 unsigned long timers_running; /* used for quiesce/restart */ 403 unsigned long timers_running; /* used for quiesce/restart */
441 bool powersave; /* powersave requested for this iface */ 404 bool powersave; /* powersave requested for this iface */
442 bool broken_ap; /* AP is broken -- turn off powersave */ 405 bool broken_ap; /* AP is broken -- turn off powersave */
406 u8 dtim_period;
443 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 407 enum ieee80211_smps_mode req_smps, /* requested smps mode */
444 driver_smps_mode; /* smps mode request */ 408 driver_smps_mode; /* smps mode request */
445 409
@@ -450,6 +414,10 @@ struct ieee80211_if_managed {
450 bool beacon_crc_valid; 414 bool beacon_crc_valid;
451 u32 beacon_crc; 415 u32 beacon_crc;
452 416
417 bool status_acked;
418 bool status_received;
419 __le16 status_fc;
420
453 enum { 421 enum {
454 IEEE80211_MFP_DISABLED, 422 IEEE80211_MFP_DISABLED,
455 IEEE80211_MFP_OPTIONAL, 423 IEEE80211_MFP_OPTIONAL,
@@ -612,6 +580,9 @@ struct ieee80211_if_mesh {
612 u32 mesh_seqnum; 580 u32 mesh_seqnum;
613 bool accepting_plinks; 581 bool accepting_plinks;
614 int num_gates; 582 int num_gates;
583 struct beacon_data __rcu *beacon;
584 /* just protects beacon updates for now */
585 struct mutex mtx;
615 const u8 *ie; 586 const u8 *ie;
616 u8 ie_len; 587 u8 ie_len;
617 enum { 588 enum {
@@ -624,6 +595,11 @@ struct ieee80211_if_mesh {
624 s64 sync_offset_clockdrift_max; 595 s64 sync_offset_clockdrift_max;
625 spinlock_t sync_offset_lock; 596 spinlock_t sync_offset_lock;
626 bool adjusting_tbtt; 597 bool adjusting_tbtt;
598 /* mesh power save */
599 enum nl80211_mesh_power_mode nonpeer_pm;
600 int ps_peers_light_sleep;
601 int ps_peers_deep_sleep;
602 struct ps_data ps;
627}; 603};
628 604
629#ifdef CONFIG_MAC80211_MESH 605#ifdef CONFIG_MAC80211_MESH
@@ -662,10 +638,13 @@ enum ieee80211_sub_if_data_flags {
662 * change handling while the interface is up 638 * change handling while the interface is up
663 * @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel 639 * @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel
664 * mode, so queues are stopped 640 * mode, so queues are stopped
641 * @SDATA_STATE_OFFCHANNEL_BEACON_STOPPED: Beaconing was stopped due
642 * to offchannel, reset when offchannel returns
665 */ 643 */
666enum ieee80211_sdata_state_bits { 644enum ieee80211_sdata_state_bits {
667 SDATA_STATE_RUNNING, 645 SDATA_STATE_RUNNING,
668 SDATA_STATE_OFFCHANNEL, 646 SDATA_STATE_OFFCHANNEL,
647 SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
669}; 648};
670 649
671/** 650/**
@@ -688,6 +667,7 @@ struct ieee80211_chanctx {
688 667
689 enum ieee80211_chanctx_mode mode; 668 enum ieee80211_chanctx_mode mode;
690 int refcount; 669 int refcount;
670 bool driver_present;
691 671
692 struct ieee80211_chanctx_conf conf; 672 struct ieee80211_chanctx_conf conf;
693}; 673};
@@ -714,9 +694,6 @@ struct ieee80211_sub_if_data {
714 694
715 char name[IFNAMSIZ]; 695 char name[IFNAMSIZ];
716 696
717 /* to detect idle changes */
718 bool old_idle;
719
720 /* Fragment table for host-based reassembly */ 697 /* Fragment table for host-based reassembly */
721 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 698 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
722 unsigned int fragment_next; 699 unsigned int fragment_next;
@@ -744,14 +721,15 @@ struct ieee80211_sub_if_data {
744 struct work_struct work; 721 struct work_struct work;
745 struct sk_buff_head skb_queue; 722 struct sk_buff_head skb_queue;
746 723
747 bool arp_filter_state;
748
749 u8 needed_rx_chains; 724 u8 needed_rx_chains;
750 enum ieee80211_smps_mode smps_mode; 725 enum ieee80211_smps_mode smps_mode;
751 726
752 int user_power_level; /* in dBm */ 727 int user_power_level; /* in dBm */
753 int ap_power_level; /* in dBm */ 728 int ap_power_level; /* in dBm */
754 729
730 bool radar_required;
731 struct delayed_work dfs_cac_timer_work;
732
755 /* 733 /*
756 * AP this belongs to: self in AP mode and 734 * AP this belongs to: self in AP mode and
757 * corresponding AP in VLAN mode, NULL for 735 * corresponding AP in VLAN mode, NULL for
@@ -773,6 +751,10 @@ struct ieee80211_sub_if_data {
773 u32 mntr_flags; 751 u32 mntr_flags;
774 } u; 752 } u;
775 753
754 spinlock_t cleanup_stations_lock;
755 struct list_head cleanup_stations;
756 struct work_struct cleanup_stations_wk;
757
776#ifdef CONFIG_MAC80211_DEBUGFS 758#ifdef CONFIG_MAC80211_DEBUGFS
777 struct { 759 struct {
778 struct dentry *dir; 760 struct dentry *dir;
@@ -782,6 +764,11 @@ struct ieee80211_sub_if_data {
782 struct dentry *default_mgmt_key; 764 struct dentry *default_mgmt_key;
783 } debugfs; 765 } debugfs;
784#endif 766#endif
767
768#ifdef CONFIG_PM
769 struct ieee80211_bss_conf suspend_bss_conf;
770#endif
771
785 /* must be last, dynamically sized area in this! */ 772 /* must be last, dynamically sized area in this! */
786 struct ieee80211_vif vif; 773 struct ieee80211_vif vif;
787}; 774};
@@ -830,6 +817,7 @@ enum queue_stop_reason {
830 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 817 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
831 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 818 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
832 IEEE80211_QUEUE_STOP_REASON_SKB_ADD, 819 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
820 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
833}; 821};
834 822
835#ifdef CONFIG_MAC80211_LEDS 823#ifdef CONFIG_MAC80211_LEDS
@@ -962,6 +950,10 @@ struct ieee80211_local {
962 /* wowlan is enabled -- don't reconfig on resume */ 950 /* wowlan is enabled -- don't reconfig on resume */
963 bool wowlan; 951 bool wowlan;
964 952
953 /* DFS/radar detection is enabled */
954 bool radar_detect_enabled;
955 struct work_struct radar_detected_work;
956
965 /* number of RX chains the hardware has */ 957 /* number of RX chains the hardware has */
966 u8 rx_chains; 958 u8 rx_chains;
967 959
@@ -976,14 +968,7 @@ struct ieee80211_local {
976 struct sk_buff_head skb_queue; 968 struct sk_buff_head skb_queue;
977 struct sk_buff_head skb_queue_unreliable; 969 struct sk_buff_head skb_queue_unreliable;
978 970
979 /* 971 spinlock_t rx_path_lock;
980 * Internal FIFO queue which is shared between multiple rx path
981 * stages. Its main task is to provide a serialization mechanism,
982 * so all rx handlers can enjoy having exclusive access to their
983 * private data structures.
984 */
985 struct sk_buff_head rx_skb_queue;
986 bool running_rx_handler; /* protected by rx_skb_queue.lock */
987 972
988 /* Station data */ 973 /* Station data */
989 /* 974 /*
@@ -1117,14 +1102,13 @@ struct ieee80211_local {
1117 struct timer_list dynamic_ps_timer; 1102 struct timer_list dynamic_ps_timer;
1118 struct notifier_block network_latency_notifier; 1103 struct notifier_block network_latency_notifier;
1119 struct notifier_block ifa_notifier; 1104 struct notifier_block ifa_notifier;
1105 struct notifier_block ifa6_notifier;
1120 1106
1121 /* 1107 /*
1122 * The dynamic ps timeout configured from user space via WEXT - 1108 * The dynamic ps timeout configured from user space via WEXT -
1123 * this will override whatever chosen by mac80211 internally. 1109 * this will override whatever chosen by mac80211 internally.
1124 */ 1110 */
1125 int dynamic_ps_forced_timeout; 1111 int dynamic_ps_forced_timeout;
1126 int dynamic_ps_user_timeout;
1127 bool disable_dynamic_ps;
1128 1112
1129 int user_power_level; /* in dBm, for all interfaces */ 1113 int user_power_level; /* in dBm, for all interfaces */
1130 1114
@@ -1182,40 +1166,41 @@ struct ieee80211_ra_tid {
1182 1166
1183/* Parsed Information Elements */ 1167/* Parsed Information Elements */
1184struct ieee802_11_elems { 1168struct ieee802_11_elems {
1185 u8 *ie_start; 1169 const u8 *ie_start;
1186 size_t total_len; 1170 size_t total_len;
1187 1171
1188 /* pointers to IEs */ 1172 /* pointers to IEs */
1189 u8 *ssid; 1173 const u8 *ssid;
1190 u8 *supp_rates; 1174 const u8 *supp_rates;
1191 u8 *fh_params; 1175 const u8 *fh_params;
1192 u8 *ds_params; 1176 const u8 *ds_params;
1193 u8 *cf_params; 1177 const u8 *cf_params;
1194 struct ieee80211_tim_ie *tim; 1178 const struct ieee80211_tim_ie *tim;
1195 u8 *ibss_params; 1179 const u8 *ibss_params;
1196 u8 *challenge; 1180 const u8 *challenge;
1197 u8 *wpa; 1181 const u8 *rsn;
1198 u8 *rsn; 1182 const u8 *erp_info;
1199 u8 *erp_info; 1183 const u8 *ext_supp_rates;
1200 u8 *ext_supp_rates; 1184 const u8 *wmm_info;
1201 u8 *wmm_info; 1185 const u8 *wmm_param;
1202 u8 *wmm_param; 1186 const struct ieee80211_ht_cap *ht_cap_elem;
1203 struct ieee80211_ht_cap *ht_cap_elem; 1187 const struct ieee80211_ht_operation *ht_operation;
1204 struct ieee80211_ht_operation *ht_operation; 1188 const struct ieee80211_vht_cap *vht_cap_elem;
1205 struct ieee80211_vht_cap *vht_cap_elem; 1189 const struct ieee80211_vht_operation *vht_operation;
1206 struct ieee80211_vht_operation *vht_operation; 1190 const struct ieee80211_meshconf_ie *mesh_config;
1207 struct ieee80211_meshconf_ie *mesh_config; 1191 const u8 *mesh_id;
1208 u8 *mesh_id; 1192 const u8 *peering;
1209 u8 *peering; 1193 const __le16 *awake_window;
1210 u8 *preq; 1194 const u8 *preq;
1211 u8 *prep; 1195 const u8 *prep;
1212 u8 *perr; 1196 const u8 *perr;
1213 struct ieee80211_rann_ie *rann; 1197 const struct ieee80211_rann_ie *rann;
1214 struct ieee80211_channel_sw_ie *ch_switch_ie; 1198 const struct ieee80211_channel_sw_ie *ch_switch_ie;
1215 u8 *country_elem; 1199 const u8 *country_elem;
1216 u8 *pwr_constr_elem; 1200 const u8 *pwr_constr_elem;
1217 u8 *quiet_elem; /* first quite element */ 1201 const u8 *quiet_elem; /* first quite element */
1218 u8 *timeout_int; 1202 const u8 *timeout_int;
1203 const u8 *opmode_notif;
1219 1204
1220 /* length of them, respectively */ 1205 /* length of them, respectively */
1221 u8 ssid_len; 1206 u8 ssid_len;
@@ -1226,7 +1211,6 @@ struct ieee802_11_elems {
1226 u8 tim_len; 1211 u8 tim_len;
1227 u8 ibss_params_len; 1212 u8 ibss_params_len;
1228 u8 challenge_len; 1213 u8 challenge_len;
1229 u8 wpa_len;
1230 u8 rsn_len; 1214 u8 rsn_len;
1231 u8 erp_info_len; 1215 u8 erp_info_len;
1232 u8 ext_supp_rates_len; 1216 u8 ext_supp_rates_len;
@@ -1295,10 +1279,10 @@ void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
1295int ieee80211_max_network_latency(struct notifier_block *nb, 1279int ieee80211_max_network_latency(struct notifier_block *nb,
1296 unsigned long data, void *dummy); 1280 unsigned long data, void *dummy);
1297int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); 1281int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
1298void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1282void
1299 struct ieee80211_channel_sw_ie *sw_elem, 1283ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1300 struct ieee80211_bss *bss, 1284 const struct ieee80211_channel_sw_ie *sw_elem,
1301 u64 timestamp); 1285 struct ieee80211_bss *bss, u64 timestamp);
1302void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); 1286void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
1303void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); 1287void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
1304void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); 1288void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
@@ -1307,6 +1291,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1307void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); 1291void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1308void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); 1292void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1309void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); 1293void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
1294void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
1295 __le16 fc, bool acked);
1310 1296
1311/* IBSS code */ 1297/* IBSS code */
1312void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1298void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1329,9 +1315,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1329 1315
1330/* scan/BSS handling */ 1316/* scan/BSS handling */
1331void ieee80211_scan_work(struct work_struct *work); 1317void ieee80211_scan_work(struct work_struct *work);
1332int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, 1318int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
1333 const u8 *ssid, u8 ssid_len, 1319 const u8 *ssid, u8 ssid_len,
1334 struct ieee80211_channel *chan); 1320 struct ieee80211_channel *chan);
1335int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 1321int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
1336 struct cfg80211_scan_request *req); 1322 struct cfg80211_scan_request *req);
1337void ieee80211_scan_cancel(struct ieee80211_local *local); 1323void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1345,8 +1331,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
1345 struct ieee80211_mgmt *mgmt, 1331 struct ieee80211_mgmt *mgmt,
1346 size_t len, 1332 size_t len,
1347 struct ieee802_11_elems *elems, 1333 struct ieee802_11_elems *elems,
1348 struct ieee80211_channel *channel, 1334 struct ieee80211_channel *channel);
1349 bool beacon);
1350void ieee80211_rx_bss_put(struct ieee80211_local *local, 1335void ieee80211_rx_bss_put(struct ieee80211_local *local,
1351 struct ieee80211_bss *bss); 1336 struct ieee80211_bss *bss);
1352 1337
@@ -1357,10 +1342,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
1357void ieee80211_sched_scan_stopped_work(struct work_struct *work); 1342void ieee80211_sched_scan_stopped_work(struct work_struct *work);
1358 1343
1359/* off-channel helpers */ 1344/* off-channel helpers */
1360void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, 1345void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
1361 bool offchannel_ps_enable); 1346void ieee80211_offchannel_return(struct ieee80211_local *local);
1362void ieee80211_offchannel_return(struct ieee80211_local *local,
1363 bool offchannel_ps_disable);
1364void ieee80211_roc_setup(struct ieee80211_local *local); 1347void ieee80211_roc_setup(struct ieee80211_local *local);
1365void ieee80211_start_next_roc(struct ieee80211_local *local); 1348void ieee80211_start_next_roc(struct ieee80211_local *local);
1366void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); 1349void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
@@ -1405,10 +1388,10 @@ void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
1405/* HT */ 1388/* HT */
1406void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, 1389void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
1407 struct ieee80211_sta_ht_cap *ht_cap); 1390 struct ieee80211_sta_ht_cap *ht_cap);
1408void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, 1391bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
1409 struct ieee80211_supported_band *sband, 1392 struct ieee80211_supported_band *sband,
1410 struct ieee80211_ht_cap *ht_cap_ie, 1393 const struct ieee80211_ht_cap *ht_cap_ie,
1411 struct ieee80211_sta_ht_cap *ht_cap); 1394 struct sta_info *sta);
1412void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1395void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
1413 const u8 *da, u16 tid, 1396 const u8 *da, u16 tid,
1414 u16 initiator, u16 reason_code); 1397 u16 initiator, u16 reason_code);
@@ -1421,7 +1404,8 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
1421 u16 initiator, u16 reason, bool stop); 1404 u16 initiator, u16 reason, bool stop);
1422void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 1405void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
1423 u16 initiator, u16 reason, bool stop); 1406 u16 initiator, u16 reason, bool stop);
1424void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx); 1407void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
1408 enum ieee80211_agg_stop_reason reason);
1425void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, 1409void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
1426 struct sta_info *sta, 1410 struct sta_info *sta,
1427 struct ieee80211_mgmt *mgmt, size_t len); 1411 struct ieee80211_mgmt *mgmt, size_t len);
@@ -1435,11 +1419,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
1435 size_t len); 1419 size_t len);
1436 1420
1437int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1421int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1438 enum ieee80211_back_parties initiator, 1422 enum ieee80211_agg_stop_reason reason);
1439 bool tx);
1440int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1423int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1441 enum ieee80211_back_parties initiator, 1424 enum ieee80211_agg_stop_reason reason);
1442 bool tx);
1443void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); 1425void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
1444void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1426void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
1445void ieee80211_ba_session_work(struct work_struct *work); 1427void ieee80211_ba_session_work(struct work_struct *work);
@@ -1449,10 +1431,17 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
1449u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs); 1431u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs);
1450 1432
1451/* VHT */ 1433/* VHT */
1452void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, 1434void
1453 struct ieee80211_supported_band *sband, 1435ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
1454 struct ieee80211_vht_cap *vht_cap_ie, 1436 struct ieee80211_supported_band *sband,
1455 struct ieee80211_sta_vht_cap *vht_cap); 1437 const struct ieee80211_vht_cap *vht_cap_ie,
1438 struct sta_info *sta);
1439enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
1440void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1441void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
1442 struct sta_info *sta, u8 opmode,
1443 enum ieee80211_band band, bool nss_only);
1444
1456/* Spectrum management */ 1445/* Spectrum management */
1457void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1446void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1458 struct ieee80211_mgmt *mgmt, 1447 struct ieee80211_mgmt *mgmt,
@@ -1570,8 +1559,9 @@ static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1570 1559
1571void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1560void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1572 u16 transaction, u16 auth_alg, u16 status, 1561 u16 transaction, u16 auth_alg, u16 status,
1573 u8 *extra, size_t extra_len, const u8 *bssid, 1562 const u8 *extra, size_t extra_len, const u8 *bssid,
1574 const u8 *da, const u8 *key, u8 key_len, u8 key_idx); 1563 const u8 *da, const u8 *key, u8 key_len, u8 key_idx,
1564 u32 tx_flags);
1575void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 1565void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
1576 const u8 *bssid, u16 stype, u16 reason, 1566 const u8 *bssid, u16 stype, u16 reason,
1577 bool send_frame, u8 *frame_buf); 1567 bool send_frame, u8 *frame_buf);
@@ -1588,7 +1578,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1588void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1578void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1589 const u8 *ssid, size_t ssid_len, 1579 const u8 *ssid, size_t ssid_len,
1590 const u8 *ie, size_t ie_len, 1580 const u8 *ie, size_t ie_len,
1591 u32 ratemask, bool directed, bool no_cck, 1581 u32 ratemask, bool directed, u32 tx_flags,
1592 struct ieee80211_channel *channel, bool scan); 1582 struct ieee80211_channel *channel, bool scan);
1593 1583
1594void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1584void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -1620,17 +1610,31 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1620 1610
1621/* channel management */ 1611/* channel management */
1622void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan, 1612void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
1623 struct ieee80211_ht_operation *ht_oper, 1613 const struct ieee80211_ht_operation *ht_oper,
1624 struct cfg80211_chan_def *chandef); 1614 struct cfg80211_chan_def *chandef);
1625 1615
1626int __must_check 1616int __must_check
1627ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, 1617ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
1628 const struct cfg80211_chan_def *chandef, 1618 const struct cfg80211_chan_def *chandef,
1629 enum ieee80211_chanctx_mode mode); 1619 enum ieee80211_chanctx_mode mode);
1620int __must_check
1621ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
1622 const struct cfg80211_chan_def *chandef,
1623 u32 *changed);
1630void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata); 1624void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
1625void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
1626void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
1627 bool clear);
1631 1628
1632void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, 1629void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
1633 struct ieee80211_chanctx *chanctx); 1630 struct ieee80211_chanctx *chanctx);
1631void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
1632 struct ieee80211_chanctx *chanctx);
1633
1634void ieee80211_dfs_cac_timer(unsigned long data);
1635void ieee80211_dfs_cac_timer_work(struct work_struct *work);
1636void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
1637void ieee80211_dfs_radar_detected_work(struct work_struct *work);
1634 1638
1635#ifdef CONFIG_MAC80211_NOINLINE 1639#ifdef CONFIG_MAC80211_NOINLINE
1636#define debug_noinline noinline 1640#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 09a80b55cf5a..2c059e54e885 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -78,8 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); 78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
79} 79}
80 80
81static u32 ieee80211_idle_off(struct ieee80211_local *local, 81static u32 ieee80211_idle_off(struct ieee80211_local *local)
82 const char *reason)
83{ 82{
84 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) 83 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
85 return 0; 84 return 0;
@@ -99,125 +98,51 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
99 return IEEE80211_CONF_CHANGE_IDLE; 98 return IEEE80211_CONF_CHANGE_IDLE;
100} 99}
101 100
102static u32 __ieee80211_recalc_idle(struct ieee80211_local *local) 101void ieee80211_recalc_idle(struct ieee80211_local *local)
103{ 102{
104 struct ieee80211_sub_if_data *sdata; 103 bool working = false, scanning, active;
105 int count = 0;
106 bool working = false, scanning = false;
107 unsigned int led_trig_start = 0, led_trig_stop = 0; 104 unsigned int led_trig_start = 0, led_trig_stop = 0;
108 struct ieee80211_roc_work *roc; 105 struct ieee80211_roc_work *roc;
106 u32 change;
109 107
110#ifdef CONFIG_PROVE_LOCKING
111 WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
112 !lockdep_is_held(&local->iflist_mtx));
113#endif
114 lockdep_assert_held(&local->mtx); 108 lockdep_assert_held(&local->mtx);
115 109
116 list_for_each_entry(sdata, &local->interfaces, list) { 110 active = !list_empty(&local->chanctx_list);
117 if (!ieee80211_sdata_running(sdata)) {
118 sdata->vif.bss_conf.idle = true;
119 continue;
120 }
121
122 sdata->old_idle = sdata->vif.bss_conf.idle;
123
124 /* do not count disabled managed interfaces */
125 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
126 !sdata->u.mgd.associated &&
127 !sdata->u.mgd.auth_data &&
128 !sdata->u.mgd.assoc_data) {
129 sdata->vif.bss_conf.idle = true;
130 continue;
131 }
132 /* do not count unused IBSS interfaces */
133 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
134 !sdata->u.ibss.ssid_len) {
135 sdata->vif.bss_conf.idle = true;
136 continue;
137 }
138
139 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
140 continue;
141
142 /* count everything else */
143 sdata->vif.bss_conf.idle = false;
144 count++;
145 }
146 111
147 if (!local->ops->remain_on_channel) { 112 if (!local->ops->remain_on_channel) {
148 list_for_each_entry(roc, &local->roc_list, list) { 113 list_for_each_entry(roc, &local->roc_list, list) {
149 working = true; 114 working = true;
150 roc->sdata->vif.bss_conf.idle = false; 115 break;
151 } 116 }
152 } 117 }
153 118
154 sdata = rcu_dereference_protected(local->scan_sdata, 119 scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
155 lockdep_is_held(&local->mtx)); 120 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
156 if (sdata && !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
157 scanning = true;
158 sdata->vif.bss_conf.idle = false;
159 }
160
161 list_for_each_entry(sdata, &local->interfaces, list) {
162 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
163 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
164 sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
165 continue;
166 if (sdata->old_idle == sdata->vif.bss_conf.idle)
167 continue;
168 if (!ieee80211_sdata_running(sdata))
169 continue;
170 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
171 }
172 121
173 if (working || scanning) 122 if (working || scanning)
174 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; 123 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
175 else 124 else
176 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; 125 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
177 126
178 if (count) 127 if (active)
179 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; 128 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
180 else 129 else
181 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; 130 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
182 131
183 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); 132 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
184 133
185 if (working) 134 if (working || scanning || active)
186 return ieee80211_idle_off(local, "working"); 135 change = ieee80211_idle_off(local);
187 if (scanning)
188 return ieee80211_idle_off(local, "scanning");
189 if (!count)
190 return ieee80211_idle_on(local);
191 else 136 else
192 return ieee80211_idle_off(local, "in use"); 137 change = ieee80211_idle_on(local);
193 138 if (change)
194 return 0; 139 ieee80211_hw_config(local, change);
195}
196
197void ieee80211_recalc_idle(struct ieee80211_local *local)
198{
199 u32 chg;
200
201 mutex_lock(&local->iflist_mtx);
202 chg = __ieee80211_recalc_idle(local);
203 mutex_unlock(&local->iflist_mtx);
204 if (chg)
205 ieee80211_hw_config(local, chg);
206} 140}
207 141
208static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) 142static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
209{ 143{
210 int meshhdrlen; 144 if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN)
211 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
212
213 meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
214
215 /* FIX: what would be proper limits for MTU?
216 * This interface uses 802.3 frames. */
217 if (new_mtu < 256 ||
218 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
219 return -EINVAL; 145 return -EINVAL;
220 }
221 146
222 dev->mtu = new_mtu; 147 dev->mtu = new_mtu;
223 return 0; 148 return 0;
@@ -369,7 +294,8 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
369 } 294 }
370 } 295 }
371 296
372 if ((sdata->vif.type != NL80211_IFTYPE_AP) || 297 if ((sdata->vif.type != NL80211_IFTYPE_AP &&
298 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) ||
373 !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) { 299 !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
374 sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; 300 sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
375 return 0; 301 return 0;
@@ -586,11 +512,13 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
586 512
587 switch (sdata->vif.type) { 513 switch (sdata->vif.type) {
588 case NL80211_IFTYPE_AP_VLAN: 514 case NL80211_IFTYPE_AP_VLAN:
589 /* no need to tell driver, but set carrier */ 515 /* no need to tell driver, but set carrier and chanctx */
590 if (rtnl_dereference(sdata->bss->beacon)) 516 if (rtnl_dereference(sdata->bss->beacon)) {
517 ieee80211_vif_vlan_copy_chanctx(sdata);
591 netif_carrier_on(dev); 518 netif_carrier_on(dev);
592 else 519 } else {
593 netif_carrier_off(dev); 520 netif_carrier_off(dev);
521 }
594 break; 522 break;
595 case NL80211_IFTYPE_MONITOR: 523 case NL80211_IFTYPE_MONITOR:
596 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { 524 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -628,6 +556,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
628 goto err_del_interface; 556 goto err_del_interface;
629 } 557 }
630 558
559 drv_add_interface_debugfs(local, sdata);
560
631 if (sdata->vif.type == NL80211_IFTYPE_AP) { 561 if (sdata->vif.type == NL80211_IFTYPE_AP) {
632 local->fif_pspoll++; 562 local->fif_pspoll++;
633 local->fif_probe_req++; 563 local->fif_probe_req++;
@@ -701,10 +631,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
701 if (sdata->flags & IEEE80211_SDATA_PROMISC) 631 if (sdata->flags & IEEE80211_SDATA_PROMISC)
702 atomic_inc(&local->iff_promiscs); 632 atomic_inc(&local->iff_promiscs);
703 633
704 mutex_lock(&local->mtx);
705 hw_reconf_flags |= __ieee80211_recalc_idle(local);
706 mutex_unlock(&local->mtx);
707
708 if (coming_up) 634 if (coming_up)
709 local->open_count++; 635 local->open_count++;
710 636
@@ -754,7 +680,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
754 unsigned long flags; 680 unsigned long flags;
755 struct sk_buff *skb, *tmp; 681 struct sk_buff *skb, *tmp;
756 u32 hw_reconf_flags = 0; 682 u32 hw_reconf_flags = 0;
757 int i; 683 int i, flushed;
684 struct ps_data *ps;
758 685
759 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 686 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
760 687
@@ -769,6 +696,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
769 696
770 ieee80211_roc_purge(sdata); 697 ieee80211_roc_purge(sdata);
771 698
699 if (sdata->vif.type == NL80211_IFTYPE_STATION)
700 ieee80211_mgd_stop(sdata);
701
772 /* 702 /*
773 * Remove all stations associated with this interface. 703 * Remove all stations associated with this interface.
774 * 704 *
@@ -779,11 +709,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
779 * (because if we remove a STA after ops->remove_interface() 709 * (because if we remove a STA after ops->remove_interface()
780 * the driver will have removed the vif info already!) 710 * the driver will have removed the vif info already!)
781 * 711 *
782 * This is relevant only in AP, WDS and mesh modes, since in 712 * This is relevant only in WDS mode, in all other modes we've
783 * all other modes we've already removed all stations when 713 * already removed all stations when disconnecting or similar,
784 * disconnecting etc. 714 * so warn otherwise.
715 *
716 * We call sta_info_flush_cleanup() later, to combine RCU waits.
785 */ 717 */
786 sta_info_flush(local, sdata); 718 flushed = sta_info_flush_defer(sdata);
719 WARN_ON_ONCE((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
720 (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1));
787 721
788 /* 722 /*
789 * Don't count this interface for promisc/allmulti while it 723 * Don't count this interface for promisc/allmulti while it
@@ -820,6 +754,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
820 754
821 cancel_work_sync(&sdata->recalc_smps); 755 cancel_work_sync(&sdata->recalc_smps);
822 756
757 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
758
759 if (sdata->wdev.cac_started) {
760 mutex_lock(&local->iflist_mtx);
761 ieee80211_vif_release_channel(sdata);
762 mutex_unlock(&local->iflist_mtx);
763 cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
764 GFP_KERNEL);
765 }
766
823 /* APs need special treatment */ 767 /* APs need special treatment */
824 if (sdata->vif.type == NL80211_IFTYPE_AP) { 768 if (sdata->vif.type == NL80211_IFTYPE_AP) {
825 struct ieee80211_sub_if_data *vlan, *tmpsdata; 769 struct ieee80211_sub_if_data *vlan, *tmpsdata;
@@ -829,8 +773,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
829 u.vlan.list) 773 u.vlan.list)
830 dev_close(vlan->dev); 774 dev_close(vlan->dev);
831 WARN_ON(!list_empty(&sdata->u.ap.vlans)); 775 WARN_ON(!list_empty(&sdata->u.ap.vlans));
832 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) { 776 } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
833 ieee80211_mgd_stop(sdata); 777 /* remove all packets in parent bc_buf pointing to this dev */
778 ps = &sdata->bss->ps;
779
780 spin_lock_irqsave(&ps->bc_buf.lock, flags);
781 skb_queue_walk_safe(&ps->bc_buf, skb, tmp) {
782 if (skb->dev == sdata->dev) {
783 __skb_unlink(skb, &ps->bc_buf);
784 local->total_ps_buffered--;
785 ieee80211_free_txskb(&local->hw, skb);
786 }
787 }
788 spin_unlock_irqrestore(&ps->bc_buf.lock, flags);
834 } 789 }
835 790
836 if (going_down) 791 if (going_down)
@@ -839,6 +794,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
839 switch (sdata->vif.type) { 794 switch (sdata->vif.type) {
840 case NL80211_IFTYPE_AP_VLAN: 795 case NL80211_IFTYPE_AP_VLAN:
841 list_del(&sdata->u.vlan.list); 796 list_del(&sdata->u.vlan.list);
797 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
842 /* no need to tell driver */ 798 /* no need to tell driver */
843 break; 799 break;
844 case NL80211_IFTYPE_MONITOR: 800 case NL80211_IFTYPE_MONITOR:
@@ -865,19 +821,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
865 cancel_work_sync(&sdata->work); 821 cancel_work_sync(&sdata->work);
866 /* 822 /*
867 * When we get here, the interface is marked down. 823 * When we get here, the interface is marked down.
868 * Call rcu_barrier() to wait both for the RX path 824 *
869 * should it be using the interface and enqueuing 825 * sta_info_flush_cleanup() requires rcu_barrier()
870 * frames at this very time on another CPU, and 826 * first to wait for the station call_rcu() calls
871 * for the sta free call_rcu callbacks. 827 * to complete, here we need at least sychronize_rcu()
828 * it to wait for the RX path in case it is using the
829 * interface and enqueuing frames at this very time on
830 * another CPU.
872 */ 831 */
873 rcu_barrier(); 832 rcu_barrier();
874 833 sta_info_flush_cleanup(sdata);
875 /*
876 * free_sta_rcu() enqueues a work for the actual
877 * sta cleanup, so we need to flush it while
878 * sdata is still valid.
879 */
880 flush_workqueue(local->workqueue);
881 834
882 skb_queue_purge(&sdata->skb_queue); 835 skb_queue_purge(&sdata->skb_queue);
883 836
@@ -887,16 +840,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
887 */ 840 */
888 ieee80211_free_keys(sdata); 841 ieee80211_free_keys(sdata);
889 842
843 drv_remove_interface_debugfs(local, sdata);
844
890 if (going_down) 845 if (going_down)
891 drv_remove_interface(local, sdata); 846 drv_remove_interface(local, sdata);
892 } 847 }
893 848
894 sdata->bss = NULL; 849 sdata->bss = NULL;
895 850
896 mutex_lock(&local->mtx);
897 hw_reconf_flags |= __ieee80211_recalc_idle(local);
898 mutex_unlock(&local->mtx);
899
900 ieee80211_recalc_ps(local, -1); 851 ieee80211_recalc_ps(local, -1);
901 852
902 if (local->open_count == 0) { 853 if (local->open_count == 0) {
@@ -976,7 +927,6 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
976 */ 927 */
977static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) 928static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
978{ 929{
979 struct ieee80211_local *local = sdata->local;
980 int flushed; 930 int flushed;
981 int i; 931 int i;
982 932
@@ -992,7 +942,7 @@ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
992 if (ieee80211_vif_is_mesh(&sdata->vif)) 942 if (ieee80211_vif_is_mesh(&sdata->vif))
993 mesh_rmc_free(sdata); 943 mesh_rmc_free(sdata);
994 944
995 flushed = sta_info_flush(local, sdata); 945 flushed = sta_info_flush(sdata);
996 WARN_ON(flushed); 946 WARN_ON(flushed);
997} 947}
998 948
@@ -1233,6 +1183,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1233 case NL80211_IFTYPE_AP: 1183 case NL80211_IFTYPE_AP:
1234 skb_queue_head_init(&sdata->u.ap.ps.bc_buf); 1184 skb_queue_head_init(&sdata->u.ap.ps.bc_buf);
1235 INIT_LIST_HEAD(&sdata->u.ap.vlans); 1185 INIT_LIST_HEAD(&sdata->u.ap.vlans);
1186 sdata->vif.bss_conf.bssid = sdata->vif.addr;
1236 break; 1187 break;
1237 case NL80211_IFTYPE_P2P_CLIENT: 1188 case NL80211_IFTYPE_P2P_CLIENT:
1238 type = NL80211_IFTYPE_STATION; 1189 type = NL80211_IFTYPE_STATION;
@@ -1240,9 +1191,11 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1240 sdata->vif.p2p = true; 1191 sdata->vif.p2p = true;
1241 /* fall through */ 1192 /* fall through */
1242 case NL80211_IFTYPE_STATION: 1193 case NL80211_IFTYPE_STATION:
1194 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
1243 ieee80211_sta_setup_sdata(sdata); 1195 ieee80211_sta_setup_sdata(sdata);
1244 break; 1196 break;
1245 case NL80211_IFTYPE_ADHOC: 1197 case NL80211_IFTYPE_ADHOC:
1198 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
1246 ieee80211_ibss_setup_sdata(sdata); 1199 ieee80211_ibss_setup_sdata(sdata);
1247 break; 1200 break;
1248 case NL80211_IFTYPE_MESH_POINT: 1201 case NL80211_IFTYPE_MESH_POINT:
@@ -1256,8 +1209,12 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1256 MONITOR_FLAG_OTHER_BSS; 1209 MONITOR_FLAG_OTHER_BSS;
1257 break; 1210 break;
1258 case NL80211_IFTYPE_WDS: 1211 case NL80211_IFTYPE_WDS:
1212 sdata->vif.bss_conf.bssid = NULL;
1213 break;
1259 case NL80211_IFTYPE_AP_VLAN: 1214 case NL80211_IFTYPE_AP_VLAN:
1215 break;
1260 case NL80211_IFTYPE_P2P_DEVICE: 1216 case NL80211_IFTYPE_P2P_DEVICE:
1217 sdata->vif.bss_conf.bssid = sdata->vif.addr;
1261 break; 1218 break;
1262 case NL80211_IFTYPE_UNSPECIFIED: 1219 case NL80211_IFTYPE_UNSPECIFIED:
1263 case NUM_NL80211_IFTYPES: 1220 case NUM_NL80211_IFTYPES:
@@ -1498,6 +1455,15 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1498 mutex_unlock(&local->iflist_mtx); 1455 mutex_unlock(&local->iflist_mtx);
1499} 1456}
1500 1457
1458static void ieee80211_cleanup_sdata_stas_wk(struct work_struct *wk)
1459{
1460 struct ieee80211_sub_if_data *sdata;
1461
1462 sdata = container_of(wk, struct ieee80211_sub_if_data, cleanup_stations_wk);
1463
1464 ieee80211_cleanup_sdata_stas(sdata);
1465}
1466
1501int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1467int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1502 struct wireless_dev **new_wdev, enum nl80211_iftype type, 1468 struct wireless_dev **new_wdev, enum nl80211_iftype type,
1503 struct vif_params *params) 1469 struct vif_params *params)
@@ -1564,15 +1530,18 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1564 /* initialise type-independent data */ 1530 /* initialise type-independent data */
1565 sdata->wdev.wiphy = local->hw.wiphy; 1531 sdata->wdev.wiphy = local->hw.wiphy;
1566 sdata->local = local; 1532 sdata->local = local;
1567#ifdef CONFIG_INET
1568 sdata->arp_filter_state = true;
1569#endif
1570 1533
1571 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) 1534 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
1572 skb_queue_head_init(&sdata->fragments[i].skb_list); 1535 skb_queue_head_init(&sdata->fragments[i].skb_list);
1573 1536
1574 INIT_LIST_HEAD(&sdata->key_list); 1537 INIT_LIST_HEAD(&sdata->key_list);
1575 1538
1539 spin_lock_init(&sdata->cleanup_stations_lock);
1540 INIT_LIST_HEAD(&sdata->cleanup_stations);
1541 INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
1542 INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work,
1543 ieee80211_dfs_cac_timer_work);
1544
1576 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 1545 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1577 struct ieee80211_supported_band *sband; 1546 struct ieee80211_supported_band *sband;
1578 sband = local->hw.wiphy->bands[i]; 1547 sband = local->hw.wiphy->bands[i];
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 619c5d697999..ef252eb58c36 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -204,8 +204,11 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
204 if (idx >= 0 && idx < NUM_DEFAULT_KEYS) 204 if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
205 key = key_mtx_dereference(sdata->local, sdata->keys[idx]); 205 key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
206 206
207 if (uni) 207 if (uni) {
208 rcu_assign_pointer(sdata->default_unicast_key, key); 208 rcu_assign_pointer(sdata->default_unicast_key, key);
209 drv_set_default_unicast_key(sdata->local, sdata, idx);
210 }
211
209 if (multi) 212 if (multi)
210 rcu_assign_pointer(sdata->default_multicast_key, key); 213 rcu_assign_pointer(sdata->default_multicast_key, key);
211 214
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1b087fff93e7..d0dd11153a6c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -23,6 +23,7 @@
23#include <linux/inetdevice.h> 23#include <linux/inetdevice.h>
24#include <net/net_namespace.h> 24#include <net/net_namespace.h>
25#include <net/cfg80211.h> 25#include <net/cfg80211.h>
26#include <net/addrconf.h>
26 27
27#include "ieee80211_i.h" 28#include "ieee80211_i.h"
28#include "driver-ops.h" 29#include "driver-ops.h"
@@ -33,8 +34,6 @@
33#include "cfg.h" 34#include "cfg.h"
34#include "debugfs.h" 35#include "debugfs.h"
35 36
36static struct lock_class_key ieee80211_rx_skb_queue_class;
37
38void ieee80211_configure_filter(struct ieee80211_local *local) 37void ieee80211_configure_filter(struct ieee80211_local *local)
39{ 38{
40 u64 mc; 39 u64 mc;
@@ -207,76 +206,10 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
207 u32 changed) 206 u32 changed)
208{ 207{
209 struct ieee80211_local *local = sdata->local; 208 struct ieee80211_local *local = sdata->local;
210 static const u8 zero[ETH_ALEN] = { 0 };
211 209
212 if (!changed) 210 if (!changed)
213 return; 211 return;
214 212
215 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
216 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
217 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
218 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
219 else if (sdata->vif.type == NL80211_IFTYPE_AP)
220 sdata->vif.bss_conf.bssid = sdata->vif.addr;
221 else if (sdata->vif.type == NL80211_IFTYPE_WDS)
222 sdata->vif.bss_conf.bssid = NULL;
223 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
224 sdata->vif.bss_conf.bssid = zero;
225 } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
226 sdata->vif.bss_conf.bssid = sdata->vif.addr;
227 WARN_ONCE(changed & ~(BSS_CHANGED_IDLE),
228 "P2P Device BSS changed %#x", changed);
229 } else {
230 WARN_ON(1);
231 return;
232 }
233
234 switch (sdata->vif.type) {
235 case NL80211_IFTYPE_AP:
236 case NL80211_IFTYPE_ADHOC:
237 case NL80211_IFTYPE_WDS:
238 case NL80211_IFTYPE_MESH_POINT:
239 break;
240 default:
241 /* do not warn to simplify caller in scan.c */
242 changed &= ~BSS_CHANGED_BEACON_ENABLED;
243 if (WARN_ON(changed & BSS_CHANGED_BEACON))
244 return;
245 break;
246 }
247
248 if (changed & BSS_CHANGED_BEACON_ENABLED) {
249 if (local->quiescing || !ieee80211_sdata_running(sdata) ||
250 test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
251 sdata->vif.bss_conf.enable_beacon = false;
252 } else {
253 /*
254 * Beacon should be enabled, but AP mode must
255 * check whether there is a beacon configured.
256 */
257 switch (sdata->vif.type) {
258 case NL80211_IFTYPE_AP:
259 sdata->vif.bss_conf.enable_beacon =
260 !!sdata->u.ap.beacon;
261 break;
262 case NL80211_IFTYPE_ADHOC:
263 sdata->vif.bss_conf.enable_beacon =
264 !!sdata->u.ibss.presp;
265 break;
266#ifdef CONFIG_MAC80211_MESH
267 case NL80211_IFTYPE_MESH_POINT:
268 sdata->vif.bss_conf.enable_beacon =
269 !!sdata->u.mesh.mesh_id_len;
270 break;
271#endif
272 default:
273 /* not reached */
274 WARN_ON(1);
275 break;
276 }
277 }
278 }
279
280 drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed); 213 drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
281} 214}
282 215
@@ -415,27 +348,19 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
415 348
416 /* Copy the addresses to the bss_conf list */ 349 /* Copy the addresses to the bss_conf list */
417 ifa = idev->ifa_list; 350 ifa = idev->ifa_list;
418 while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) { 351 while (ifa) {
419 bss_conf->arp_addr_list[c] = ifa->ifa_address; 352 if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN)
353 bss_conf->arp_addr_list[c] = ifa->ifa_address;
420 ifa = ifa->ifa_next; 354 ifa = ifa->ifa_next;
421 c++; 355 c++;
422 } 356 }
423 357
424 /* If not all addresses fit the list, disable filtering */
425 if (ifa) {
426 sdata->arp_filter_state = false;
427 c = 0;
428 } else {
429 sdata->arp_filter_state = true;
430 }
431 bss_conf->arp_addr_cnt = c; 358 bss_conf->arp_addr_cnt = c;
432 359
433 /* Configure driver only if associated (which also implies it is up) */ 360 /* Configure driver only if associated (which also implies it is up) */
434 if (ifmgd->associated) { 361 if (ifmgd->associated)
435 bss_conf->arp_filter_enabled = sdata->arp_filter_state;
436 ieee80211_bss_info_change_notify(sdata, 362 ieee80211_bss_info_change_notify(sdata,
437 BSS_CHANGED_ARP_FILTER); 363 BSS_CHANGED_ARP_FILTER);
438 }
439 364
440 mutex_unlock(&ifmgd->mtx); 365 mutex_unlock(&ifmgd->mtx);
441 366
@@ -443,6 +368,37 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
443} 368}
444#endif 369#endif
445 370
371#if IS_ENABLED(CONFIG_IPV6)
372static int ieee80211_ifa6_changed(struct notifier_block *nb,
373 unsigned long data, void *arg)
374{
375 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg;
376 struct inet6_dev *idev = ifa->idev;
377 struct net_device *ndev = ifa->idev->dev;
378 struct ieee80211_local *local =
379 container_of(nb, struct ieee80211_local, ifa6_notifier);
380 struct wireless_dev *wdev = ndev->ieee80211_ptr;
381 struct ieee80211_sub_if_data *sdata;
382
383 /* Make sure it's our interface that got changed */
384 if (!wdev || wdev->wiphy != local->hw.wiphy)
385 return NOTIFY_DONE;
386
387 sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
388
389 /*
390 * For now only support station mode. This is mostly because
391 * doing AP would have to handle AP_VLAN in some way ...
392 */
393 if (sdata->vif.type != NL80211_IFTYPE_STATION)
394 return NOTIFY_DONE;
395
396 drv_ipv6_addr_change(local, sdata, idev);
397
398 return NOTIFY_DONE;
399}
400#endif
401
446static int ieee80211_napi_poll(struct napi_struct *napi, int budget) 402static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
447{ 403{
448 struct ieee80211_local *local = 404 struct ieee80211_local *local =
@@ -537,6 +493,7 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
537 493
538 .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 494 .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
539 IEEE80211_HT_CAP_MAX_AMSDU | 495 IEEE80211_HT_CAP_MAX_AMSDU |
496 IEEE80211_HT_CAP_SGI_20 |
540 IEEE80211_HT_CAP_SGI_40), 497 IEEE80211_HT_CAP_SGI_40),
541 .mcs = { 498 .mcs = {
542 .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 499 .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
@@ -544,6 +501,11 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
544 }, 501 },
545}; 502};
546 503
504static const u8 extended_capabilities[] = {
505 0, 0, 0, 0, 0, 0, 0,
506 WLAN_EXT_CAPA8_OPMODE_NOTIF,
507};
508
547struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 509struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
548 const struct ieee80211_ops *ops) 510 const struct ieee80211_ops *ops)
549{ 511{
@@ -600,6 +562,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
600 WIPHY_FLAG_REPORTS_OBSS | 562 WIPHY_FLAG_REPORTS_OBSS |
601 WIPHY_FLAG_OFFCHAN_TX; 563 WIPHY_FLAG_OFFCHAN_TX;
602 564
565 wiphy->extended_capabilities = extended_capabilities;
566 wiphy->extended_capabilities_mask = extended_capabilities;
567 wiphy->extended_capabilities_len = ARRAY_SIZE(extended_capabilities);
568
603 if (ops->remain_on_channel) 569 if (ops->remain_on_channel)
604 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 570 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
605 571
@@ -653,25 +619,19 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
653 619
654 mutex_init(&local->key_mtx); 620 mutex_init(&local->key_mtx);
655 spin_lock_init(&local->filter_lock); 621 spin_lock_init(&local->filter_lock);
622 spin_lock_init(&local->rx_path_lock);
656 spin_lock_init(&local->queue_stop_reason_lock); 623 spin_lock_init(&local->queue_stop_reason_lock);
657 624
658 INIT_LIST_HEAD(&local->chanctx_list); 625 INIT_LIST_HEAD(&local->chanctx_list);
659 mutex_init(&local->chanctx_mtx); 626 mutex_init(&local->chanctx_mtx);
660 627
661 /*
662 * The rx_skb_queue is only accessed from tasklets,
663 * but other SKB queues are used from within IRQ
664 * context. Therefore, this one needs a different
665 * locking class so our direct, non-irq-safe use of
666 * the queue's lock doesn't throw lockdep warnings.
667 */
668 skb_queue_head_init_class(&local->rx_skb_queue,
669 &ieee80211_rx_skb_queue_class);
670
671 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 628 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
672 629
673 INIT_WORK(&local->restart_work, ieee80211_restart_work); 630 INIT_WORK(&local->restart_work, ieee80211_restart_work);
674 631
632 INIT_WORK(&local->radar_detected_work,
633 ieee80211_dfs_radar_detected_work);
634
675 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 635 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
676 local->smps_mode = IEEE80211_SMPS_OFF; 636 local->smps_mode = IEEE80211_SMPS_OFF;
677 637
@@ -747,9 +707,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
747 return -EINVAL; 707 return -EINVAL;
748#endif 708#endif
749 709
750 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
751 return -EINVAL;
752
753 if (!local->use_chanctx) { 710 if (!local->use_chanctx) {
754 for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { 711 for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
755 const struct ieee80211_iface_combination *comb; 712 const struct ieee80211_iface_combination *comb;
@@ -767,6 +724,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
767 */ 724 */
768 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS)) 725 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))
769 return -EINVAL; 726 return -EINVAL;
727
728 /* DFS currently not supported with channel context drivers */
729 for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
730 const struct ieee80211_iface_combination *comb;
731
732 comb = &local->hw.wiphy->iface_combinations[i];
733
734 if (comb->radar_detect_widths)
735 return -EINVAL;
736 }
770 } 737 }
771 738
772 /* Only HW csum features are currently compatible with mac80211 */ 739 /* Only HW csum features are currently compatible with mac80211 */
@@ -1049,12 +1016,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1049 goto fail_ifa; 1016 goto fail_ifa;
1050#endif 1017#endif
1051 1018
1019#if IS_ENABLED(CONFIG_IPV6)
1020 local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed;
1021 result = register_inet6addr_notifier(&local->ifa6_notifier);
1022 if (result)
1023 goto fail_ifa6;
1024#endif
1025
1052 netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll, 1026 netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
1053 local->hw.napi_weight); 1027 local->hw.napi_weight);
1054 1028
1055 return 0; 1029 return 0;
1056 1030
1031#if IS_ENABLED(CONFIG_IPV6)
1032 fail_ifa6:
1057#ifdef CONFIG_INET 1033#ifdef CONFIG_INET
1034 unregister_inetaddr_notifier(&local->ifa_notifier);
1035#endif
1036#endif
1037#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
1058 fail_ifa: 1038 fail_ifa:
1059 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, 1039 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
1060 &local->network_latency_notifier); 1040 &local->network_latency_notifier);
@@ -1090,6 +1070,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1090#ifdef CONFIG_INET 1070#ifdef CONFIG_INET
1091 unregister_inetaddr_notifier(&local->ifa_notifier); 1071 unregister_inetaddr_notifier(&local->ifa_notifier);
1092#endif 1072#endif
1073#if IS_ENABLED(CONFIG_IPV6)
1074 unregister_inet6addr_notifier(&local->ifa6_notifier);
1075#endif
1093 1076
1094 rtnl_lock(); 1077 rtnl_lock();
1095 1078
@@ -1113,7 +1096,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1113 wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); 1096 wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
1114 skb_queue_purge(&local->skb_queue); 1097 skb_queue_purge(&local->skb_queue);
1115 skb_queue_purge(&local->skb_queue_unreliable); 1098 skb_queue_purge(&local->skb_queue_unreliable);
1116 skb_queue_purge(&local->rx_skb_queue);
1117 1099
1118 destroy_workqueue(local->workqueue); 1100 destroy_workqueue(local->workqueue);
1119 wiphy_unregister(local->hw.wiphy); 1101 wiphy_unregister(local->hw.wiphy);
@@ -1191,8 +1173,7 @@ static void __exit ieee80211_exit(void)
1191 rc80211_minstrel_ht_exit(); 1173 rc80211_minstrel_ht_exit();
1192 rc80211_minstrel_exit(); 1174 rc80211_minstrel_exit();
1193 1175
1194 if (mesh_allocated) 1176 ieee80211s_stop();
1195 ieee80211s_stop();
1196 1177
1197 ieee80211_iface_exit(); 1178 ieee80211_iface_exit();
1198 1179
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 1bf03f9ff3ba..29ce2aa87e7b 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -17,19 +17,14 @@
17#define TMR_RUNNING_MP 1 17#define TMR_RUNNING_MP 1
18#define TMR_RUNNING_MPR 2 18#define TMR_RUNNING_MPR 2
19 19
20int mesh_allocated; 20static int mesh_allocated;
21static struct kmem_cache *rm_cache; 21static struct kmem_cache *rm_cache;
22 22
23#ifdef CONFIG_MAC80211_MESH
24bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt) 23bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
25{ 24{
26 return (mgmt->u.action.u.mesh_action.action_code == 25 return (mgmt->u.action.u.mesh_action.action_code ==
27 WLAN_MESH_ACTION_HWMP_PATH_SELECTION); 26 WLAN_MESH_ACTION_HWMP_PATH_SELECTION);
28} 27}
29#else
30bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
31{ return false; }
32#endif
33 28
34void ieee80211s_init(void) 29void ieee80211s_init(void)
35{ 30{
@@ -41,6 +36,8 @@ void ieee80211s_init(void)
41 36
42void ieee80211s_stop(void) 37void ieee80211s_stop(void)
43{ 38{
39 if (!mesh_allocated)
40 return;
44 mesh_pathtbl_unregister(); 41 mesh_pathtbl_unregister();
45 kmem_cache_destroy(rm_cache); 42 kmem_cache_destroy(rm_cache);
46} 43}
@@ -95,24 +92,22 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
95 (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && 92 (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
96 (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && 93 (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
97 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) 94 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
98 goto mismatch; 95 return false;
99 96
100 ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata), 97 ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata),
101 &basic_rates); 98 &basic_rates);
102 99
103 if (sdata->vif.bss_conf.basic_rates != basic_rates) 100 if (sdata->vif.bss_conf.basic_rates != basic_rates)
104 goto mismatch; 101 return false;
105 102
106 ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan, 103 ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
107 ie->ht_operation, &sta_chan_def); 104 ie->ht_operation, &sta_chan_def);
108 105
109 if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef, 106 if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
110 &sta_chan_def)) 107 &sta_chan_def))
111 goto mismatch; 108 return false;
112 109
113 return true; 110 return true;
114mismatch:
115 return false;
116} 111}
117 112
118/** 113/**
@@ -123,7 +118,7 @@ mismatch:
123bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) 118bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
124{ 119{
125 return (ie->mesh_config->meshconf_cap & 120 return (ie->mesh_config->meshconf_cap &
126 IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; 121 IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS) != 0;
127} 122}
128 123
129/** 124/**
@@ -154,6 +149,31 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
154 return changed; 149 return changed;
155} 150}
156 151
152/*
153 * mesh_sta_cleanup - clean up any mesh sta state
154 *
155 * @sta: mesh sta to clean up.
156 */
157void mesh_sta_cleanup(struct sta_info *sta)
158{
159 struct ieee80211_sub_if_data *sdata = sta->sdata;
160 u32 changed;
161
162 /*
163 * maybe userspace handles peer allocation and peering, but in either
164 * case the beacon is still generated by the kernel and we might need
165 * an update.
166 */
167 changed = mesh_accept_plinks_update(sdata);
168 if (sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
169 changed |= mesh_plink_deactivate(sta);
170 del_timer_sync(&sta->plink_timer);
171 }
172
173 if (changed)
174 ieee80211_mbss_info_change_notify(sdata, changed);
175}
176
157int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) 177int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
158{ 178{
159 int i; 179 int i;
@@ -163,7 +183,7 @@ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
163 return -ENOMEM; 183 return -ENOMEM;
164 sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; 184 sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
165 for (i = 0; i < RMC_BUCKETS; i++) 185 for (i = 0; i < RMC_BUCKETS; i++)
166 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list); 186 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
167 return 0; 187 return 0;
168} 188}
169 189
@@ -176,11 +196,12 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
176 if (!sdata->u.mesh.rmc) 196 if (!sdata->u.mesh.rmc)
177 return; 197 return;
178 198
179 for (i = 0; i < RMC_BUCKETS; i++) 199 for (i = 0; i < RMC_BUCKETS; i++) {
180 list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) { 200 list_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
181 list_del(&p->list); 201 list_del(&p->list);
182 kmem_cache_free(rm_cache, p); 202 kmem_cache_free(rm_cache, p);
183 } 203 }
204 }
184 205
185 kfree(rmc); 206 kfree(rmc);
186 sdata->u.mesh.rmc = NULL; 207 sdata->u.mesh.rmc = NULL;
@@ -189,6 +210,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
189/** 210/**
190 * mesh_rmc_check - Check frame in recent multicast cache and add if absent. 211 * mesh_rmc_check - Check frame in recent multicast cache and add if absent.
191 * 212 *
213 * @sdata: interface
192 * @sa: source address 214 * @sa: source address
193 * @mesh_hdr: mesh_header 215 * @mesh_hdr: mesh_header
194 * 216 *
@@ -198,8 +220,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
198 * received this frame lately. If the frame is not in the cache, it is added to 220 * received this frame lately. If the frame is not in the cache, it is added to
199 * it. 221 * it.
200 */ 222 */
201int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, 223int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
202 struct ieee80211_sub_if_data *sdata) 224 const u8 *sa, struct ieee80211s_hdr *mesh_hdr)
203{ 225{
204 struct mesh_rmc *rmc = sdata->u.mesh.rmc; 226 struct mesh_rmc *rmc = sdata->u.mesh.rmc;
205 u32 seqnum = 0; 227 u32 seqnum = 0;
@@ -210,15 +232,14 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
210 /* Don't care about endianness since only match matters */ 232 /* Don't care about endianness since only match matters */
211 memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); 233 memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
212 idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; 234 idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
213 list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) { 235 list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
214 ++entries; 236 ++entries;
215 if (time_after(jiffies, p->exp_time) || 237 if (time_after(jiffies, p->exp_time) ||
216 (entries == RMC_QUEUE_MAX_LEN)) { 238 entries == RMC_QUEUE_MAX_LEN) {
217 list_del(&p->list); 239 list_del(&p->list);
218 kmem_cache_free(rm_cache, p); 240 kmem_cache_free(rm_cache, p);
219 --entries; 241 --entries;
220 } else if ((seqnum == p->seqnum) && 242 } else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa))
221 (ether_addr_equal(sa, p->sa)))
222 return -1; 243 return -1;
223 } 244 }
224 245
@@ -229,12 +250,12 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
229 p->seqnum = seqnum; 250 p->seqnum = seqnum;
230 p->exp_time = jiffies + RMC_TIMEOUT; 251 p->exp_time = jiffies + RMC_TIMEOUT;
231 memcpy(p->sa, sa, ETH_ALEN); 252 memcpy(p->sa, sa, ETH_ALEN);
232 list_add(&p->list, &rmc->bucket[idx].list); 253 list_add(&p->list, &rmc->bucket[idx]);
233 return 0; 254 return 0;
234} 255}
235 256
236int 257int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
237mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) 258 struct sk_buff *skb)
238{ 259{
239 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 260 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
240 u8 *pos, neighbors; 261 u8 *pos, neighbors;
@@ -265,16 +286,18 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
265 /* Mesh capability */ 286 /* Mesh capability */
266 *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING; 287 *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
267 *pos |= ifmsh->accepting_plinks ? 288 *pos |= ifmsh->accepting_plinks ?
268 IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 289 IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
290 /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
291 *pos |= ifmsh->ps_peers_deep_sleep ?
292 IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
269 *pos++ |= ifmsh->adjusting_tbtt ? 293 *pos++ |= ifmsh->adjusting_tbtt ?
270 IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; 294 IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
271 *pos++ = 0x00; 295 *pos++ = 0x00;
272 296
273 return 0; 297 return 0;
274} 298}
275 299
276int 300int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
277mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
278{ 301{
279 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 302 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
280 u8 *pos; 303 u8 *pos;
@@ -291,8 +314,31 @@ mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
291 return 0; 314 return 0;
292} 315}
293 316
294int 317static int mesh_add_awake_window_ie(struct ieee80211_sub_if_data *sdata,
295mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) 318 struct sk_buff *skb)
319{
320 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
321 u8 *pos;
322
323 /* see IEEE802.11-2012 13.14.6 */
324 if (ifmsh->ps_peers_light_sleep == 0 &&
325 ifmsh->ps_peers_deep_sleep == 0 &&
326 ifmsh->nonpeer_pm == NL80211_MESH_POWER_ACTIVE)
327 return 0;
328
329 if (skb_tailroom(skb) < 4)
330 return -ENOMEM;
331
332 pos = skb_put(skb, 2 + 2);
333 *pos++ = WLAN_EID_MESH_AWAKE_WINDOW;
334 *pos++ = 2;
335 put_unaligned_le16(ifmsh->mshcfg.dot11MeshAwakeWindowDuration, pos);
336
337 return 0;
338}
339
340int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
341 struct sk_buff *skb)
296{ 342{
297 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 343 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
298 u8 offset, len; 344 u8 offset, len;
@@ -315,8 +361,7 @@ mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
315 return 0; 361 return 0;
316} 362}
317 363
318int 364int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
319mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
320{ 365{
321 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 366 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
322 u8 len = 0; 367 u8 len = 0;
@@ -344,11 +389,9 @@ mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
344 return 0; 389 return 0;
345} 390}
346 391
347int mesh_add_ds_params_ie(struct sk_buff *skb, 392static int mesh_add_ds_params_ie(struct ieee80211_sub_if_data *sdata,
348 struct ieee80211_sub_if_data *sdata) 393 struct sk_buff *skb)
349{ 394{
350 struct ieee80211_local *local = sdata->local;
351 struct ieee80211_supported_band *sband;
352 struct ieee80211_chanctx_conf *chanctx_conf; 395 struct ieee80211_chanctx_conf *chanctx_conf;
353 struct ieee80211_channel *chan; 396 struct ieee80211_channel *chan;
354 u8 *pos; 397 u8 *pos;
@@ -365,19 +408,16 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
365 chan = chanctx_conf->def.chan; 408 chan = chanctx_conf->def.chan;
366 rcu_read_unlock(); 409 rcu_read_unlock();
367 410
368 sband = local->hw.wiphy->bands[chan->band]; 411 pos = skb_put(skb, 2 + 1);
369 if (sband->band == IEEE80211_BAND_2GHZ) { 412 *pos++ = WLAN_EID_DS_PARAMS;
370 pos = skb_put(skb, 2 + 1); 413 *pos++ = 1;
371 *pos++ = WLAN_EID_DS_PARAMS; 414 *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
372 *pos++ = 1;
373 *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
374 }
375 415
376 return 0; 416 return 0;
377} 417}
378 418
379int mesh_add_ht_cap_ie(struct sk_buff *skb, 419int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
380 struct ieee80211_sub_if_data *sdata) 420 struct sk_buff *skb)
381{ 421{
382 struct ieee80211_local *local = sdata->local; 422 struct ieee80211_local *local = sdata->local;
383 enum ieee80211_band band = ieee80211_get_sdata_band(sdata); 423 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
@@ -398,8 +438,8 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
398 return 0; 438 return 0;
399} 439}
400 440
401int mesh_add_ht_oper_ie(struct sk_buff *skb, 441int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
402 struct ieee80211_sub_if_data *sdata) 442 struct sk_buff *skb)
403{ 443{
404 struct ieee80211_local *local = sdata->local; 444 struct ieee80211_local *local = sdata->local;
405 struct ieee80211_chanctx_conf *chanctx_conf; 445 struct ieee80211_chanctx_conf *chanctx_conf;
@@ -434,6 +474,7 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
434 474
435 return 0; 475 return 0;
436} 476}
477
437static void ieee80211_mesh_path_timer(unsigned long data) 478static void ieee80211_mesh_path_timer(unsigned long data)
438{ 479{
439 struct ieee80211_sub_if_data *sdata = 480 struct ieee80211_sub_if_data *sdata =
@@ -479,7 +520,7 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
479 520
480/** 521/**
481 * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame 522 * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame
482 * @hdr: 802.11 frame header 523 * @hdr: 802.11 frame header
483 * @fc: frame control field 524 * @fc: frame control field
484 * @meshda: destination address in the mesh 525 * @meshda: destination address in the mesh
485 * @meshsa: source address address in the mesh. Same as TA, as frame is 526 * @meshsa: source address address in the mesh. Same as TA, as frame is
@@ -510,8 +551,8 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
510 551
511/** 552/**
512 * ieee80211_new_mesh_header - create a new mesh header 553 * ieee80211_new_mesh_header - create a new mesh header
513 * @meshhdr: uninitialized mesh header
514 * @sdata: mesh interface to be used 554 * @sdata: mesh interface to be used
555 * @meshhdr: uninitialized mesh header
515 * @addr4or5: 1st address in the ae header, which may correspond to address 4 556 * @addr4or5: 1st address in the ae header, which may correspond to address 4
516 * (if addr6 is NULL) or address 5 (if addr6 is present). It may 557 * (if addr6 is NULL) or address 5 (if addr6 is present). It may
517 * be NULL. 558 * be NULL.
@@ -520,42 +561,49 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
520 * 561 *
521 * Return the header length. 562 * Return the header length.
522 */ 563 */
523int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 564int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
524 struct ieee80211_sub_if_data *sdata, char *addr4or5, 565 struct ieee80211s_hdr *meshhdr,
525 char *addr6) 566 const char *addr4or5, const char *addr6)
526{ 567{
527 int aelen = 0; 568 if (WARN_ON(!addr4or5 && addr6))
528 BUG_ON(!addr4or5 && addr6); 569 return 0;
570
529 memset(meshhdr, 0, sizeof(*meshhdr)); 571 memset(meshhdr, 0, sizeof(*meshhdr));
572
530 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; 573 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
574
575 /* FIXME: racy -- TX on multiple queues can be concurrent */
531 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); 576 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
532 sdata->u.mesh.mesh_seqnum++; 577 sdata->u.mesh.mesh_seqnum++;
578
533 if (addr4or5 && !addr6) { 579 if (addr4or5 && !addr6) {
534 meshhdr->flags |= MESH_FLAGS_AE_A4; 580 meshhdr->flags |= MESH_FLAGS_AE_A4;
535 aelen += ETH_ALEN;
536 memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); 581 memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN);
582 return 2 * ETH_ALEN;
537 } else if (addr4or5 && addr6) { 583 } else if (addr4or5 && addr6) {
538 meshhdr->flags |= MESH_FLAGS_AE_A5_A6; 584 meshhdr->flags |= MESH_FLAGS_AE_A5_A6;
539 aelen += 2 * ETH_ALEN;
540 memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); 585 memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN);
541 memcpy(meshhdr->eaddr2, addr6, ETH_ALEN); 586 memcpy(meshhdr->eaddr2, addr6, ETH_ALEN);
587 return 3 * ETH_ALEN;
542 } 588 }
543 return 6 + aelen; 589
590 return ETH_ALEN;
544} 591}
545 592
546static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, 593static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata)
547 struct ieee80211_if_mesh *ifmsh)
548{ 594{
595 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
549 u32 changed; 596 u32 changed;
550 597
551 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 598 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
552 mesh_path_expire(sdata); 599 mesh_path_expire(sdata);
553 600
554 changed = mesh_accept_plinks_update(sdata); 601 changed = mesh_accept_plinks_update(sdata);
555 ieee80211_bss_info_change_notify(sdata, changed); 602 ieee80211_mbss_info_change_notify(sdata, changed);
556 603
557 mod_timer(&ifmsh->housekeeping_timer, 604 mod_timer(&ifmsh->housekeeping_timer,
558 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); 605 round_jiffies(jiffies +
606 IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
559} 607}
560 608
561static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) 609static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
@@ -603,10 +651,149 @@ void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
603} 651}
604#endif 652#endif
605 653
606void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) 654static int
655ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
656{
657 struct beacon_data *bcn;
658 int head_len, tail_len;
659 struct sk_buff *skb;
660 struct ieee80211_mgmt *mgmt;
661 struct ieee80211_chanctx_conf *chanctx_conf;
662 enum ieee80211_band band;
663 u8 *pos;
664 struct ieee80211_sub_if_data *sdata;
665 int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
666 sizeof(mgmt->u.beacon);
667
668 sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
669 rcu_read_lock();
670 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
671 band = chanctx_conf->def.chan->band;
672 rcu_read_unlock();
673
674 head_len = hdr_len +
675 2 + /* NULL SSID */
676 2 + 8 + /* supported rates */
677 2 + 3; /* DS params */
678 tail_len = 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
679 2 + sizeof(struct ieee80211_ht_cap) +
680 2 + sizeof(struct ieee80211_ht_operation) +
681 2 + ifmsh->mesh_id_len +
682 2 + sizeof(struct ieee80211_meshconf_ie) +
683 2 + sizeof(__le16) + /* awake window */
684 ifmsh->ie_len;
685
686 bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL);
687 /* need an skb for IE builders to operate on */
688 skb = dev_alloc_skb(max(head_len, tail_len));
689
690 if (!bcn || !skb)
691 goto out_free;
692
693 /*
694 * pointers go into the block we allocated,
695 * memory is | beacon_data | head | tail |
696 */
697 bcn->head = ((u8 *) bcn) + sizeof(*bcn);
698
699 /* fill in the head */
700 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
701 memset(mgmt, 0, hdr_len);
702 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
703 IEEE80211_STYPE_BEACON);
704 eth_broadcast_addr(mgmt->da);
705 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
706 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
707 ieee80211_mps_set_frame_flags(sdata, NULL, (void *) mgmt);
708 mgmt->u.beacon.beacon_int =
709 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
710 mgmt->u.beacon.capab_info |= cpu_to_le16(
711 sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
712
713 pos = skb_put(skb, 2);
714 *pos++ = WLAN_EID_SSID;
715 *pos++ = 0x0;
716
717 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
718 mesh_add_ds_params_ie(sdata, skb))
719 goto out_free;
720
721 bcn->head_len = skb->len;
722 memcpy(bcn->head, skb->data, bcn->head_len);
723
724 /* now the tail */
725 skb_trim(skb, 0);
726 bcn->tail = bcn->head + bcn->head_len;
727
728 if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
729 mesh_add_rsn_ie(sdata, skb) ||
730 mesh_add_ht_cap_ie(sdata, skb) ||
731 mesh_add_ht_oper_ie(sdata, skb) ||
732 mesh_add_meshid_ie(sdata, skb) ||
733 mesh_add_meshconf_ie(sdata, skb) ||
734 mesh_add_awake_window_ie(sdata, skb) ||
735 mesh_add_vendor_ies(sdata, skb))
736 goto out_free;
737
738 bcn->tail_len = skb->len;
739 memcpy(bcn->tail, skb->data, bcn->tail_len);
740
741 dev_kfree_skb(skb);
742 rcu_assign_pointer(ifmsh->beacon, bcn);
743 return 0;
744out_free:
745 kfree(bcn);
746 dev_kfree_skb(skb);
747 return -ENOMEM;
748}
749
750static int
751ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh)
752{
753 struct ieee80211_sub_if_data *sdata;
754 struct beacon_data *old_bcn;
755 int ret;
756 sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
757
758 mutex_lock(&ifmsh->mtx);
759
760 old_bcn = rcu_dereference_protected(ifmsh->beacon,
761 lockdep_is_held(&ifmsh->mtx));
762 ret = ieee80211_mesh_build_beacon(ifmsh);
763 if (ret)
764 /* just reuse old beacon */
765 goto out;
766
767 if (old_bcn)
768 kfree_rcu(old_bcn, rcu_head);
769out:
770 mutex_unlock(&ifmsh->mtx);
771 return ret;
772}
773
774void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
775 u32 changed)
776{
777 if (sdata->vif.bss_conf.enable_beacon &&
778 (changed & (BSS_CHANGED_BEACON |
779 BSS_CHANGED_HT |
780 BSS_CHANGED_BASIC_RATES |
781 BSS_CHANGED_BEACON_INT)))
782 if (ieee80211_mesh_rebuild_beacon(&sdata->u.mesh))
783 return;
784 ieee80211_bss_info_change_notify(sdata, changed);
785}
786
787int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
607{ 788{
608 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 789 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
609 struct ieee80211_local *local = sdata->local; 790 struct ieee80211_local *local = sdata->local;
791 u32 changed = BSS_CHANGED_BEACON |
792 BSS_CHANGED_BEACON_ENABLED |
793 BSS_CHANGED_HT |
794 BSS_CHANGED_BASIC_RATES |
795 BSS_CHANGED_BEACON_INT;
796 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
610 797
611 local->fif_other_bss++; 798 local->fif_other_bss++;
612 /* mesh ifaces must set allmulti to forward mcast traffic */ 799 /* mesh ifaces must set allmulti to forward mcast traffic */
@@ -624,34 +811,51 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
624 ieee80211_queue_work(&local->hw, &sdata->work); 811 ieee80211_queue_work(&local->hw, &sdata->work);
625 sdata->vif.bss_conf.ht_operation_mode = 812 sdata->vif.bss_conf.ht_operation_mode =
626 ifmsh->mshcfg.ht_opmode; 813 ifmsh->mshcfg.ht_opmode;
627 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; 814 sdata->vif.bss_conf.enable_beacon = true;
628 sdata->vif.bss_conf.basic_rates = 815 sdata->vif.bss_conf.basic_rates =
629 ieee80211_mandatory_rates(sdata->local, 816 ieee80211_mandatory_rates(local, band);
630 ieee80211_get_sdata_band(sdata)); 817
631 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 818 changed |= ieee80211_mps_local_status_update(sdata);
632 BSS_CHANGED_BEACON_ENABLED | 819
633 BSS_CHANGED_HT | 820 if (ieee80211_mesh_build_beacon(ifmsh)) {
634 BSS_CHANGED_BASIC_RATES | 821 ieee80211_stop_mesh(sdata);
635 BSS_CHANGED_BEACON_INT); 822 return -ENOMEM;
823 }
824
825 ieee80211_bss_info_change_notify(sdata, changed);
636 826
637 netif_carrier_on(sdata->dev); 827 netif_carrier_on(sdata->dev);
828 return 0;
638} 829}
639 830
640void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 831void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
641{ 832{
642 struct ieee80211_local *local = sdata->local; 833 struct ieee80211_local *local = sdata->local;
643 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 834 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
835 struct beacon_data *bcn;
644 836
645 netif_carrier_off(sdata->dev); 837 netif_carrier_off(sdata->dev);
646 838
647 /* stop the beacon */ 839 /* stop the beacon */
648 ifmsh->mesh_id_len = 0; 840 ifmsh->mesh_id_len = 0;
841 sdata->vif.bss_conf.enable_beacon = false;
842 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
649 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 843 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
844 mutex_lock(&ifmsh->mtx);
845 bcn = rcu_dereference_protected(ifmsh->beacon,
846 lockdep_is_held(&ifmsh->mtx));
847 rcu_assign_pointer(ifmsh->beacon, NULL);
848 kfree_rcu(bcn, rcu_head);
849 mutex_unlock(&ifmsh->mtx);
650 850
651 /* flush STAs and mpaths on this iface */ 851 /* flush STAs and mpaths on this iface */
652 sta_info_flush(sdata->local, sdata); 852 sta_info_flush(sdata);
653 mesh_path_flush_by_iface(sdata); 853 mesh_path_flush_by_iface(sdata);
654 854
855 /* free all potentially still buffered group-addressed frames */
856 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
857 skb_queue_purge(&ifmsh->ps.bc_buf);
858
655 del_timer_sync(&sdata->u.mesh.housekeeping_timer); 859 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
656 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); 860 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
657 del_timer_sync(&sdata->u.mesh.mesh_path_timer); 861 del_timer_sync(&sdata->u.mesh.mesh_path_timer);
@@ -671,6 +875,62 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
671 sdata->u.mesh.timers_running = 0; 875 sdata->u.mesh.timers_running = 0;
672} 876}
673 877
878static void
879ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
880 struct ieee80211_mgmt *mgmt, size_t len)
881{
882 struct ieee80211_local *local = sdata->local;
883 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
884 struct sk_buff *presp;
885 struct beacon_data *bcn;
886 struct ieee80211_mgmt *hdr;
887 struct ieee802_11_elems elems;
888 size_t baselen;
889 u8 *pos, *end;
890
891 end = ((u8 *) mgmt) + len;
892 pos = mgmt->u.probe_req.variable;
893 baselen = (u8 *) pos - (u8 *) mgmt;
894 if (baselen > len)
895 return;
896
897 ieee802_11_parse_elems(pos, len - baselen, &elems);
898
899 /* 802.11-2012 10.1.4.3.2 */
900 if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
901 !is_broadcast_ether_addr(mgmt->da)) ||
902 elems.ssid_len != 0)
903 return;
904
905 if (elems.mesh_id_len != 0 &&
906 (elems.mesh_id_len != ifmsh->mesh_id_len ||
907 memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
908 return;
909
910 rcu_read_lock();
911 bcn = rcu_dereference(ifmsh->beacon);
912
913 if (!bcn)
914 goto out;
915
916 presp = dev_alloc_skb(local->tx_headroom +
917 bcn->head_len + bcn->tail_len);
918 if (!presp)
919 goto out;
920
921 skb_reserve(presp, local->tx_headroom);
922 memcpy(skb_put(presp, bcn->head_len), bcn->head, bcn->head_len);
923 memcpy(skb_put(presp, bcn->tail_len), bcn->tail, bcn->tail_len);
924 hdr = (struct ieee80211_mgmt *) presp->data;
925 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
926 IEEE80211_STYPE_PROBE_RESP);
927 memcpy(hdr->da, mgmt->sa, ETH_ALEN);
928 IEEE80211_SKB_CB(presp)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
929 ieee80211_tx_skb(sdata, presp);
930out:
931 rcu_read_unlock();
932}
933
674static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, 934static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
675 u16 stype, 935 u16 stype,
676 struct ieee80211_mgmt *mgmt, 936 struct ieee80211_mgmt *mgmt,
@@ -760,6 +1020,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
760 ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, 1020 ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
761 rx_status); 1021 rx_status);
762 break; 1022 break;
1023 case IEEE80211_STYPE_PROBE_REQ:
1024 ieee80211_mesh_rx_probe_req(sdata, mgmt, skb->len);
1025 break;
763 case IEEE80211_STYPE_ACTION: 1026 case IEEE80211_STYPE_ACTION:
764 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); 1027 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
765 break; 1028 break;
@@ -782,7 +1045,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
782 mesh_mpp_table_grow(); 1045 mesh_mpp_table_grow();
783 1046
784 if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) 1047 if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
785 ieee80211_mesh_housekeeping(sdata, ifmsh); 1048 ieee80211_mesh_housekeeping(sdata);
786 1049
787 if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) 1050 if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags))
788 ieee80211_mesh_rootpath(sdata); 1051 ieee80211_mesh_rootpath(sdata);
@@ -805,6 +1068,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
805void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) 1068void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
806{ 1069{
807 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1070 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1071 static u8 zero_addr[ETH_ALEN] = {};
808 1072
809 setup_timer(&ifmsh->housekeeping_timer, 1073 setup_timer(&ifmsh->housekeeping_timer,
810 ieee80211_mesh_housekeeping_timer, 1074 ieee80211_mesh_housekeeping_timer,
@@ -828,6 +1092,11 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
828 ieee80211_mesh_path_root_timer, 1092 ieee80211_mesh_path_root_timer,
829 (unsigned long) sdata); 1093 (unsigned long) sdata);
830 INIT_LIST_HEAD(&ifmsh->preq_queue.list); 1094 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
1095 skb_queue_head_init(&ifmsh->ps.bc_buf);
831 spin_lock_init(&ifmsh->mesh_preq_queue_lock); 1096 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
832 spin_lock_init(&ifmsh->sync_offset_lock); 1097 spin_lock_init(&ifmsh->sync_offset_lock);
1098 RCU_INIT_POINTER(ifmsh->beacon, NULL);
1099 mutex_init(&ifmsh->mtx);
1100
1101 sdata->vif.bss_conf.bssid = zero_addr;
833} 1102}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 7c9215fb2ac8..336c88a16687 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -26,12 +26,12 @@
26 * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding 26 * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding
27 * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path 27 * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path
28 * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence 28 * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence
29 * number 29 * number
30 * @MESH_PATH_FIXED: the mesh path has been manually set and should not be 30 * @MESH_PATH_FIXED: the mesh path has been manually set and should not be
31 * modified 31 * modified
32 * @MESH_PATH_RESOLVED: the mesh path can has been resolved 32 * @MESH_PATH_RESOLVED: the mesh path can has been resolved
33 * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination 33 * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination
34 * already queued up, waiting for the discovery process to start. 34 * already queued up, waiting for the discovery process to start.
35 * 35 *
36 * MESH_PATH_RESOLVED is used by the mesh path timer to 36 * MESH_PATH_RESOLVED is used by the mesh path timer to
37 * decide when to stop or cancel the mesh path discovery. 37 * decide when to stop or cancel the mesh path discovery.
@@ -73,16 +73,16 @@ enum mesh_deferred_task_flags {
73 * @dst: mesh path destination mac address 73 * @dst: mesh path destination mac address
74 * @sdata: mesh subif 74 * @sdata: mesh subif
75 * @next_hop: mesh neighbor to which frames for this destination will be 75 * @next_hop: mesh neighbor to which frames for this destination will be
76 * forwarded 76 * forwarded
77 * @timer: mesh path discovery timer 77 * @timer: mesh path discovery timer
78 * @frame_queue: pending queue for frames sent to this destination while the 78 * @frame_queue: pending queue for frames sent to this destination while the
79 * path is unresolved 79 * path is unresolved
80 * @sn: target sequence number 80 * @sn: target sequence number
81 * @metric: current metric to this destination 81 * @metric: current metric to this destination
82 * @hop_count: hops to destination 82 * @hop_count: hops to destination
83 * @exp_time: in jiffies, when the path will expire or when it expired 83 * @exp_time: in jiffies, when the path will expire or when it expired
84 * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery 84 * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery
85 * retry 85 * retry
86 * @discovery_retries: number of discovery retries 86 * @discovery_retries: number of discovery retries
87 * @flags: mesh path flags, as specified on &enum mesh_path_flags 87 * @flags: mesh path flags, as specified on &enum mesh_path_flags
88 * @state_lock: mesh path state lock used to protect changes to the 88 * @state_lock: mesh path state lock used to protect changes to the
@@ -184,15 +184,13 @@ struct rmc_entry {
184}; 184};
185 185
186struct mesh_rmc { 186struct mesh_rmc {
187 struct rmc_entry bucket[RMC_BUCKETS]; 187 struct list_head bucket[RMC_BUCKETS];
188 u32 idx_mask; 188 u32 idx_mask;
189}; 189};
190 190
191#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) 191#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
192#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) 192#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
193 193
194#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
195
196#define MESH_PATH_EXPIRE (600 * HZ) 194#define MESH_PATH_EXPIRE (600 * HZ)
197 195
198/* Default maximum number of plinks per interface */ 196/* Default maximum number of plinks per interface */
@@ -208,95 +206,113 @@ struct mesh_rmc {
208/* Various */ 206/* Various */
209int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, 207int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
210 const u8 *da, const u8 *sa); 208 const u8 *da, const u8 *sa);
211int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 209int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
212 struct ieee80211_sub_if_data *sdata, char *addr4or5, 210 struct ieee80211s_hdr *meshhdr,
213 char *addr6); 211 const char *addr4or5, const char *addr6);
214int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 212int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
215 struct ieee80211_sub_if_data *sdata); 213 const u8 *addr, struct ieee80211s_hdr *mesh_hdr);
216bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, 214bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
217 struct ieee802_11_elems *ie); 215 struct ieee802_11_elems *ie);
218void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); 216void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
219void mesh_mgmt_ies_add(struct sk_buff *skb, 217void mesh_mgmt_ies_add(struct ieee80211_sub_if_data *sdata,
220 struct ieee80211_sub_if_data *sdata); 218 struct sk_buff *skb);
221int mesh_add_meshconf_ie(struct sk_buff *skb, 219int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
222 struct ieee80211_sub_if_data *sdata); 220 struct sk_buff *skb);
223int mesh_add_meshid_ie(struct sk_buff *skb, 221int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata,
224 struct ieee80211_sub_if_data *sdata); 222 struct sk_buff *skb);
225int mesh_add_rsn_ie(struct sk_buff *skb, 223int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata,
226 struct ieee80211_sub_if_data *sdata); 224 struct sk_buff *skb);
227int mesh_add_vendor_ies(struct sk_buff *skb, 225int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
228 struct ieee80211_sub_if_data *sdata); 226 struct sk_buff *skb);
229int mesh_add_ds_params_ie(struct sk_buff *skb, 227int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
230 struct ieee80211_sub_if_data *sdata); 228 struct sk_buff *skb);
231int mesh_add_ht_cap_ie(struct sk_buff *skb, 229int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
232 struct ieee80211_sub_if_data *sdata); 230 struct sk_buff *skb);
233int mesh_add_ht_oper_ie(struct sk_buff *skb,
234 struct ieee80211_sub_if_data *sdata);
235void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); 231void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
236int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 232int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
237void ieee80211s_init(void); 233void ieee80211s_init(void);
238void ieee80211s_update_metric(struct ieee80211_local *local, 234void ieee80211s_update_metric(struct ieee80211_local *local,
239 struct sta_info *sta, struct sk_buff *skb); 235 struct sta_info *sta, struct sk_buff *skb);
240void ieee80211s_stop(void);
241void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 236void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
242void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 237int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
243void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); 238void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
244void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); 239void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
245const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); 240const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
241/* wrapper for ieee80211_bss_info_change_notify() */
242void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
243 u32 changed);
244
245/* mesh power save */
246u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata);
247u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
248 enum nl80211_mesh_power_mode pm);
249void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
250 struct sta_info *sta,
251 struct ieee80211_hdr *hdr);
252void ieee80211_mps_sta_status_update(struct sta_info *sta);
253void ieee80211_mps_rx_h_sta_process(struct sta_info *sta,
254 struct ieee80211_hdr *hdr);
255void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
256 bool tx, bool acked);
257void ieee80211_mps_frame_release(struct sta_info *sta,
258 struct ieee802_11_elems *elems);
246 259
247/* Mesh paths */ 260/* Mesh paths */
248int mesh_nexthop_lookup(struct sk_buff *skb, 261int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
249 struct ieee80211_sub_if_data *sdata); 262 struct sk_buff *skb);
250int mesh_nexthop_resolve(struct sk_buff *skb, 263int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
251 struct ieee80211_sub_if_data *sdata); 264 struct sk_buff *skb);
252void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); 265void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
253struct mesh_path *mesh_path_lookup(u8 *dst, 266struct mesh_path *mesh_path_lookup(struct ieee80211_sub_if_data *sdata,
254 struct ieee80211_sub_if_data *sdata); 267 const u8 *dst);
255struct mesh_path *mpp_path_lookup(u8 *dst, 268struct mesh_path *mpp_path_lookup(struct ieee80211_sub_if_data *sdata,
256 struct ieee80211_sub_if_data *sdata); 269 const u8 *dst);
257int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata); 270int mpp_path_add(struct ieee80211_sub_if_data *sdata,
258struct mesh_path *mesh_path_lookup_by_idx(int idx, 271 const u8 *dst, const u8 *mpp);
259 struct ieee80211_sub_if_data *sdata); 272struct mesh_path *
273mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx);
260void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); 274void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
261void mesh_path_expire(struct ieee80211_sub_if_data *sdata); 275void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
262void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, 276void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
263 struct ieee80211_mgmt *mgmt, size_t len); 277 struct ieee80211_mgmt *mgmt, size_t len);
264int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); 278int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst);
265 279
266int mesh_path_add_gate(struct mesh_path *mpath); 280int mesh_path_add_gate(struct mesh_path *mpath);
267int mesh_path_send_to_gates(struct mesh_path *mpath); 281int mesh_path_send_to_gates(struct mesh_path *mpath);
268int mesh_gate_num(struct ieee80211_sub_if_data *sdata); 282int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
283
269/* Mesh plinks */ 284/* Mesh plinks */
270void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, 285void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
271 u8 *hw_addr, 286 u8 *hw_addr, struct ieee802_11_elems *ie);
272 struct ieee802_11_elems *ie);
273bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 287bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
274u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 288u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
275void mesh_plink_broken(struct sta_info *sta); 289void mesh_plink_broken(struct sta_info *sta);
276void mesh_plink_deactivate(struct sta_info *sta); 290u32 mesh_plink_deactivate(struct sta_info *sta);
277int mesh_plink_open(struct sta_info *sta); 291u32 mesh_plink_open(struct sta_info *sta);
278void mesh_plink_block(struct sta_info *sta); 292u32 mesh_plink_block(struct sta_info *sta);
279void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, 293void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
280 struct ieee80211_mgmt *mgmt, size_t len, 294 struct ieee80211_mgmt *mgmt, size_t len,
281 struct ieee80211_rx_status *rx_status); 295 struct ieee80211_rx_status *rx_status);
296void mesh_sta_cleanup(struct sta_info *sta);
282 297
283/* Private interfaces */ 298/* Private interfaces */
284/* Mesh tables */ 299/* Mesh tables */
285void mesh_mpath_table_grow(void); 300void mesh_mpath_table_grow(void);
286void mesh_mpp_table_grow(void); 301void mesh_mpp_table_grow(void);
287/* Mesh paths */ 302/* Mesh paths */
288int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, 303int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
289 const u8 *ra, struct ieee80211_sub_if_data *sdata); 304 u8 ttl, const u8 *target, __le32 target_sn,
305 __le16 target_rcode, const u8 *ra);
290void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); 306void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
291void mesh_path_flush_pending(struct mesh_path *mpath); 307void mesh_path_flush_pending(struct mesh_path *mpath);
292void mesh_path_tx_pending(struct mesh_path *mpath); 308void mesh_path_tx_pending(struct mesh_path *mpath);
293int mesh_pathtbl_init(void); 309int mesh_pathtbl_init(void);
294void mesh_pathtbl_unregister(void); 310void mesh_pathtbl_unregister(void);
295int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); 311int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
296void mesh_path_timer(unsigned long data); 312void mesh_path_timer(unsigned long data);
297void mesh_path_flush_by_nexthop(struct sta_info *sta); 313void mesh_path_flush_by_nexthop(struct sta_info *sta);
298void mesh_path_discard_frame(struct sk_buff *skb, 314void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
299 struct ieee80211_sub_if_data *sdata); 315 struct sk_buff *skb);
300void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); 316void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
301void mesh_path_restart(struct ieee80211_sub_if_data *sdata); 317void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
302void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); 318void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
@@ -305,7 +321,19 @@ bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
305extern int mesh_paths_generation; 321extern int mesh_paths_generation;
306 322
307#ifdef CONFIG_MAC80211_MESH 323#ifdef CONFIG_MAC80211_MESH
308extern int mesh_allocated; 324static inline
325u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
326{
327 atomic_inc(&sdata->u.mesh.estab_plinks);
328 return mesh_accept_plinks_update(sdata);
329}
330
331static inline
332u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
333{
334 atomic_dec(&sdata->u.mesh.estab_plinks);
335 return mesh_accept_plinks_update(sdata);
336}
309 337
310static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) 338static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata)
311{ 339{
@@ -337,8 +365,8 @@ void mesh_plink_quiesce(struct sta_info *sta);
337void mesh_plink_restart(struct sta_info *sta); 365void mesh_plink_restart(struct sta_info *sta);
338void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); 366void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
339void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); 367void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
368void ieee80211s_stop(void);
340#else 369#else
341#define mesh_allocated 0
342static inline void 370static inline void
343ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} 371ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
344static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) 372static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
@@ -351,6 +379,7 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
351{ return false; } 379{ return false; }
352static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 380static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
353{} 381{}
382static inline void ieee80211s_stop(void) {}
354#endif 383#endif
355 384
356#endif /* IEEE80211S_H */ 385#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 47aeee2d8db1..bdb8d3b14587 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -30,14 +30,14 @@
30 30
31static void mesh_queue_preq(struct mesh_path *, u8); 31static void mesh_queue_preq(struct mesh_path *, u8);
32 32
33static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) 33static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
34{ 34{
35 if (ae) 35 if (ae)
36 offset += 6; 36 offset += 6;
37 return get_unaligned_le32(preq_elem + offset); 37 return get_unaligned_le32(preq_elem + offset);
38} 38}
39 39
40static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae) 40static inline u32 u16_field_get(const u8 *preq_elem, int offset, bool ae)
41{ 41{
42 if (ae) 42 if (ae)
43 offset += 6; 43 offset += 6;
@@ -102,10 +102,13 @@ enum mpath_frame_type {
102static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 102static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
103 103
104static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, 104static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
105 u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target, 105 const u8 *orig_addr, __le32 orig_sn,
106 __le32 target_sn, const u8 *da, u8 hop_count, u8 ttl, 106 u8 target_flags, const u8 *target,
107 __le32 lifetime, __le32 metric, __le32 preq_id, 107 __le32 target_sn, const u8 *da,
108 struct ieee80211_sub_if_data *sdata) 108 u8 hop_count, u8 ttl,
109 __le32 lifetime, __le32 metric,
110 __le32 preq_id,
111 struct ieee80211_sub_if_data *sdata)
109{ 112{
110 struct ieee80211_local *local = sdata->local; 113 struct ieee80211_local *local = sdata->local;
111 struct sk_buff *skb; 114 struct sk_buff *skb;
@@ -205,6 +208,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
205 struct sk_buff *skb) 208 struct sk_buff *skb)
206{ 209{
207 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 210 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
211 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
208 212
209 skb_set_mac_header(skb, 0); 213 skb_set_mac_header(skb, 0);
210 skb_set_network_header(skb, 0); 214 skb_set_network_header(skb, 0);
@@ -215,24 +219,28 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
215 skb->priority = 7; 219 skb->priority = 7;
216 220
217 info->control.vif = &sdata->vif; 221 info->control.vif = &sdata->vif;
222 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
218 ieee80211_set_qos_hdr(sdata, skb); 223 ieee80211_set_qos_hdr(sdata, skb);
224 ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
219} 225}
220 226
221/** 227/**
222 * mesh_send_path error - Sends a PERR mesh management frame 228 * mesh_path_error_tx - Sends a PERR mesh management frame
223 * 229 *
230 * @ttl: allowed remaining hops
224 * @target: broken destination 231 * @target: broken destination
225 * @target_sn: SN of the broken destination 232 * @target_sn: SN of the broken destination
226 * @target_rcode: reason code for this PERR 233 * @target_rcode: reason code for this PERR
227 * @ra: node this frame is addressed to 234 * @ra: node this frame is addressed to
235 * @sdata: local mesh subif
228 * 236 *
229 * Note: This function may be called with driver locks taken that the driver 237 * Note: This function may be called with driver locks taken that the driver
230 * also acquires in the TX path. To avoid a deadlock we don't transmit the 238 * also acquires in the TX path. To avoid a deadlock we don't transmit the
231 * frame directly but add it to the pending queue instead. 239 * frame directly but add it to the pending queue instead.
232 */ 240 */
233int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, 241int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
234 __le16 target_rcode, const u8 *ra, 242 u8 ttl, const u8 *target, __le32 target_sn,
235 struct ieee80211_sub_if_data *sdata) 243 __le16 target_rcode, const u8 *ra)
236{ 244{
237 struct ieee80211_local *local = sdata->local; 245 struct ieee80211_local *local = sdata->local;
238 struct sk_buff *skb; 246 struct sk_buff *skb;
@@ -246,11 +254,13 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
246 return -EAGAIN; 254 return -EAGAIN;
247 255
248 skb = dev_alloc_skb(local->tx_headroom + 256 skb = dev_alloc_skb(local->tx_headroom +
257 IEEE80211_ENCRYPT_HEADROOM +
258 IEEE80211_ENCRYPT_TAILROOM +
249 hdr_len + 259 hdr_len +
250 2 + 15 /* PERR IE */); 260 2 + 15 /* PERR IE */);
251 if (!skb) 261 if (!skb)
252 return -1; 262 return -1;
253 skb_reserve(skb, local->tx_headroom); 263 skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
254 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 264 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
255 memset(mgmt, 0, hdr_len); 265 memset(mgmt, 0, hdr_len);
256 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 266 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
@@ -350,6 +360,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
350 * @sdata: local mesh subif 360 * @sdata: local mesh subif
351 * @mgmt: mesh management frame 361 * @mgmt: mesh management frame
352 * @hwmp_ie: hwmp information element (PREP or PREQ) 362 * @hwmp_ie: hwmp information element (PREP or PREQ)
363 * @action: type of hwmp ie
353 * 364 *
354 * This function updates the path routing information to the originator and the 365 * This function updates the path routing information to the originator and the
355 * transmitter of a HWMP PREQ or PREP frame. 366 * transmitter of a HWMP PREQ or PREP frame.
@@ -361,14 +372,14 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
361 * path routing information is updated. 372 * path routing information is updated.
362 */ 373 */
363static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, 374static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
364 struct ieee80211_mgmt *mgmt, 375 struct ieee80211_mgmt *mgmt,
365 u8 *hwmp_ie, enum mpath_frame_type action) 376 const u8 *hwmp_ie, enum mpath_frame_type action)
366{ 377{
367 struct ieee80211_local *local = sdata->local; 378 struct ieee80211_local *local = sdata->local;
368 struct mesh_path *mpath; 379 struct mesh_path *mpath;
369 struct sta_info *sta; 380 struct sta_info *sta;
370 bool fresh_info; 381 bool fresh_info;
371 u8 *orig_addr, *ta; 382 const u8 *orig_addr, *ta;
372 u32 orig_sn, orig_metric; 383 u32 orig_sn, orig_metric;
373 unsigned long orig_lifetime, exp_time; 384 unsigned long orig_lifetime, exp_time;
374 u32 last_hop_metric, new_metric; 385 u32 last_hop_metric, new_metric;
@@ -419,7 +430,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
419 process = false; 430 process = false;
420 fresh_info = false; 431 fresh_info = false;
421 } else { 432 } else {
422 mpath = mesh_path_lookup(orig_addr, sdata); 433 mpath = mesh_path_lookup(sdata, orig_addr);
423 if (mpath) { 434 if (mpath) {
424 spin_lock_bh(&mpath->state_lock); 435 spin_lock_bh(&mpath->state_lock);
425 if (mpath->flags & MESH_PATH_FIXED) 436 if (mpath->flags & MESH_PATH_FIXED)
@@ -434,8 +445,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
434 } 445 }
435 } 446 }
436 } else { 447 } else {
437 mesh_path_add(orig_addr, sdata); 448 mesh_path_add(sdata, orig_addr);
438 mpath = mesh_path_lookup(orig_addr, sdata); 449 mpath = mesh_path_lookup(sdata, orig_addr);
439 if (!mpath) { 450 if (!mpath) {
440 rcu_read_unlock(); 451 rcu_read_unlock();
441 return 0; 452 return 0;
@@ -467,7 +478,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
467 else { 478 else {
468 fresh_info = true; 479 fresh_info = true;
469 480
470 mpath = mesh_path_lookup(ta, sdata); 481 mpath = mesh_path_lookup(sdata, ta);
471 if (mpath) { 482 if (mpath) {
472 spin_lock_bh(&mpath->state_lock); 483 spin_lock_bh(&mpath->state_lock);
473 if ((mpath->flags & MESH_PATH_FIXED) || 484 if ((mpath->flags & MESH_PATH_FIXED) ||
@@ -475,8 +486,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
475 (last_hop_metric > mpath->metric))) 486 (last_hop_metric > mpath->metric)))
476 fresh_info = false; 487 fresh_info = false;
477 } else { 488 } else {
478 mesh_path_add(ta, sdata); 489 mesh_path_add(sdata, ta);
479 mpath = mesh_path_lookup(ta, sdata); 490 mpath = mesh_path_lookup(sdata, ta);
480 if (!mpath) { 491 if (!mpath) {
481 rcu_read_unlock(); 492 rcu_read_unlock();
482 return 0; 493 return 0;
@@ -503,11 +514,11 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
503 514
504static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, 515static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
505 struct ieee80211_mgmt *mgmt, 516 struct ieee80211_mgmt *mgmt,
506 u8 *preq_elem, u32 metric) 517 const u8 *preq_elem, u32 metric)
507{ 518{
508 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 519 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
509 struct mesh_path *mpath = NULL; 520 struct mesh_path *mpath = NULL;
510 u8 *target_addr, *orig_addr; 521 const u8 *target_addr, *orig_addr;
511 const u8 *da; 522 const u8 *da;
512 u8 target_flags, ttl, flags; 523 u8 target_flags, ttl, flags;
513 u32 orig_sn, target_sn, lifetime, orig_metric; 524 u32 orig_sn, target_sn, lifetime, orig_metric;
@@ -542,7 +553,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
542 } else if (is_broadcast_ether_addr(target_addr) && 553 } else if (is_broadcast_ether_addr(target_addr) &&
543 (target_flags & IEEE80211_PREQ_TO_FLAG)) { 554 (target_flags & IEEE80211_PREQ_TO_FLAG)) {
544 rcu_read_lock(); 555 rcu_read_lock();
545 mpath = mesh_path_lookup(orig_addr, sdata); 556 mpath = mesh_path_lookup(sdata, orig_addr);
546 if (mpath) { 557 if (mpath) {
547 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) { 558 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
548 reply = true; 559 reply = true;
@@ -557,7 +568,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
557 rcu_read_unlock(); 568 rcu_read_unlock();
558 } else { 569 } else {
559 rcu_read_lock(); 570 rcu_read_lock();
560 mpath = mesh_path_lookup(target_addr, sdata); 571 mpath = mesh_path_lookup(sdata, target_addr);
561 if (mpath) { 572 if (mpath) {
562 if ((!(mpath->flags & MESH_PATH_SN_VALID)) || 573 if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
563 SN_LT(mpath->sn, target_sn)) { 574 SN_LT(mpath->sn, target_sn)) {
@@ -640,11 +651,11 @@ next_hop_deref_protected(struct mesh_path *mpath)
640 651
641static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, 652static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
642 struct ieee80211_mgmt *mgmt, 653 struct ieee80211_mgmt *mgmt,
643 u8 *prep_elem, u32 metric) 654 const u8 *prep_elem, u32 metric)
644{ 655{
645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 656 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
646 struct mesh_path *mpath; 657 struct mesh_path *mpath;
647 u8 *target_addr, *orig_addr; 658 const u8 *target_addr, *orig_addr;
648 u8 ttl, hopcount, flags; 659 u8 ttl, hopcount, flags;
649 u8 next_hop[ETH_ALEN]; 660 u8 next_hop[ETH_ALEN];
650 u32 target_sn, orig_sn, lifetime; 661 u32 target_sn, orig_sn, lifetime;
@@ -667,7 +678,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
667 } 678 }
668 679
669 rcu_read_lock(); 680 rcu_read_lock();
670 mpath = mesh_path_lookup(orig_addr, sdata); 681 mpath = mesh_path_lookup(sdata, orig_addr);
671 if (mpath) 682 if (mpath)
672 spin_lock_bh(&mpath->state_lock); 683 spin_lock_bh(&mpath->state_lock);
673 else 684 else
@@ -703,12 +714,13 @@ fail:
703} 714}
704 715
705static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, 716static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
706 struct ieee80211_mgmt *mgmt, u8 *perr_elem) 717 struct ieee80211_mgmt *mgmt,
718 const u8 *perr_elem)
707{ 719{
708 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 720 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
709 struct mesh_path *mpath; 721 struct mesh_path *mpath;
710 u8 ttl; 722 u8 ttl;
711 u8 *ta, *target_addr; 723 const u8 *ta, *target_addr;
712 u32 target_sn; 724 u32 target_sn;
713 u16 target_rcode; 725 u16 target_rcode;
714 726
@@ -724,7 +736,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
724 target_rcode = PERR_IE_TARGET_RCODE(perr_elem); 736 target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
725 737
726 rcu_read_lock(); 738 rcu_read_lock();
727 mpath = mesh_path_lookup(target_addr, sdata); 739 mpath = mesh_path_lookup(sdata, target_addr);
728 if (mpath) { 740 if (mpath) {
729 struct sta_info *sta; 741 struct sta_info *sta;
730 742
@@ -739,9 +751,10 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
739 spin_unlock_bh(&mpath->state_lock); 751 spin_unlock_bh(&mpath->state_lock);
740 if (!ifmsh->mshcfg.dot11MeshForwarding) 752 if (!ifmsh->mshcfg.dot11MeshForwarding)
741 goto endperr; 753 goto endperr;
742 mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn), 754 mesh_path_error_tx(sdata, ttl, target_addr,
755 cpu_to_le32(target_sn),
743 cpu_to_le16(target_rcode), 756 cpu_to_le16(target_rcode),
744 broadcast_addr, sdata); 757 broadcast_addr);
745 } else 758 } else
746 spin_unlock_bh(&mpath->state_lock); 759 spin_unlock_bh(&mpath->state_lock);
747 } 760 }
@@ -750,15 +763,15 @@ endperr:
750} 763}
751 764
752static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, 765static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
753 struct ieee80211_mgmt *mgmt, 766 struct ieee80211_mgmt *mgmt,
754 struct ieee80211_rann_ie *rann) 767 const struct ieee80211_rann_ie *rann)
755{ 768{
756 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 769 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
757 struct ieee80211_local *local = sdata->local; 770 struct ieee80211_local *local = sdata->local;
758 struct sta_info *sta; 771 struct sta_info *sta;
759 struct mesh_path *mpath; 772 struct mesh_path *mpath;
760 u8 ttl, flags, hopcount; 773 u8 ttl, flags, hopcount;
761 u8 *orig_addr; 774 const u8 *orig_addr;
762 u32 orig_sn, metric, metric_txsta, interval; 775 u32 orig_sn, metric, metric_txsta, interval;
763 bool root_is_gate; 776 bool root_is_gate;
764 777
@@ -789,10 +802,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
789 802
790 metric_txsta = airtime_link_metric_get(local, sta); 803 metric_txsta = airtime_link_metric_get(local, sta);
791 804
792 mpath = mesh_path_lookup(orig_addr, sdata); 805 mpath = mesh_path_lookup(sdata, orig_addr);
793 if (!mpath) { 806 if (!mpath) {
794 mesh_path_add(orig_addr, sdata); 807 mesh_path_add(sdata, orig_addr);
795 mpath = mesh_path_lookup(orig_addr, sdata); 808 mpath = mesh_path_lookup(sdata, orig_addr);
796 if (!mpath) { 809 if (!mpath) {
797 rcu_read_unlock(); 810 rcu_read_unlock();
798 sdata->u.mesh.mshstats.dropped_frames_no_route++; 811 sdata->u.mesh.mshstats.dropped_frames_no_route++;
@@ -849,8 +862,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
849 862
850 863
851void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, 864void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
852 struct ieee80211_mgmt *mgmt, 865 struct ieee80211_mgmt *mgmt, size_t len)
853 size_t len)
854{ 866{
855 struct ieee802_11_elems elems; 867 struct ieee802_11_elems elems;
856 size_t baselen; 868 size_t baselen;
@@ -994,7 +1006,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
994 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 1006 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
995 1007
996 rcu_read_lock(); 1008 rcu_read_lock();
997 mpath = mesh_path_lookup(preq_node->dst, sdata); 1009 mpath = mesh_path_lookup(sdata, preq_node->dst);
998 if (!mpath) 1010 if (!mpath)
999 goto enddiscovery; 1011 goto enddiscovery;
1000 1012
@@ -1064,8 +1076,8 @@ enddiscovery:
1064 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued. 1076 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
1065 * skb is freeed here if no mpath could be allocated. 1077 * skb is freeed here if no mpath could be allocated.
1066 */ 1078 */
1067int mesh_nexthop_resolve(struct sk_buff *skb, 1079int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
1068 struct ieee80211_sub_if_data *sdata) 1080 struct sk_buff *skb)
1069{ 1081{
1070 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1082 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1071 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1083 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1074,18 +1086,22 @@ int mesh_nexthop_resolve(struct sk_buff *skb,
1074 u8 *target_addr = hdr->addr3; 1086 u8 *target_addr = hdr->addr3;
1075 int err = 0; 1087 int err = 0;
1076 1088
1089 /* Nulls are only sent to peers for PS and should be pre-addressed */
1090 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1091 return 0;
1092
1077 rcu_read_lock(); 1093 rcu_read_lock();
1078 err = mesh_nexthop_lookup(skb, sdata); 1094 err = mesh_nexthop_lookup(sdata, skb);
1079 if (!err) 1095 if (!err)
1080 goto endlookup; 1096 goto endlookup;
1081 1097
1082 /* no nexthop found, start resolving */ 1098 /* no nexthop found, start resolving */
1083 mpath = mesh_path_lookup(target_addr, sdata); 1099 mpath = mesh_path_lookup(sdata, target_addr);
1084 if (!mpath) { 1100 if (!mpath) {
1085 mesh_path_add(target_addr, sdata); 1101 mesh_path_add(sdata, target_addr);
1086 mpath = mesh_path_lookup(target_addr, sdata); 1102 mpath = mesh_path_lookup(sdata, target_addr);
1087 if (!mpath) { 1103 if (!mpath) {
1088 mesh_path_discard_frame(skb, sdata); 1104 mesh_path_discard_frame(sdata, skb);
1089 err = -ENOSPC; 1105 err = -ENOSPC;
1090 goto endlookup; 1106 goto endlookup;
1091 } 1107 }
@@ -1102,12 +1118,13 @@ int mesh_nexthop_resolve(struct sk_buff *skb,
1102 skb_queue_tail(&mpath->frame_queue, skb); 1118 skb_queue_tail(&mpath->frame_queue, skb);
1103 err = -ENOENT; 1119 err = -ENOENT;
1104 if (skb_to_free) 1120 if (skb_to_free)
1105 mesh_path_discard_frame(skb_to_free, sdata); 1121 mesh_path_discard_frame(sdata, skb_to_free);
1106 1122
1107endlookup: 1123endlookup:
1108 rcu_read_unlock(); 1124 rcu_read_unlock();
1109 return err; 1125 return err;
1110} 1126}
1127
1111/** 1128/**
1112 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling 1129 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
1113 * this function is considered "using" the associated mpath, so preempt a path 1130 * this function is considered "using" the associated mpath, so preempt a path
@@ -1118,8 +1135,8 @@ endlookup:
1118 * 1135 *
1119 * Returns: 0 if the next hop was found. Nonzero otherwise. 1136 * Returns: 0 if the next hop was found. Nonzero otherwise.
1120 */ 1137 */
1121int mesh_nexthop_lookup(struct sk_buff *skb, 1138int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
1122 struct ieee80211_sub_if_data *sdata) 1139 struct sk_buff *skb)
1123{ 1140{
1124 struct mesh_path *mpath; 1141 struct mesh_path *mpath;
1125 struct sta_info *next_hop; 1142 struct sta_info *next_hop;
@@ -1128,7 +1145,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
1128 int err = -ENOENT; 1145 int err = -ENOENT;
1129 1146
1130 rcu_read_lock(); 1147 rcu_read_lock();
1131 mpath = mesh_path_lookup(target_addr, sdata); 1148 mpath = mesh_path_lookup(sdata, target_addr);
1132 1149
1133 if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE)) 1150 if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
1134 goto endlookup; 1151 goto endlookup;
@@ -1145,6 +1162,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
1145 if (next_hop) { 1162 if (next_hop) {
1146 memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN); 1163 memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
1147 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); 1164 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1165 ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
1148 err = 0; 1166 err = 0;
1149 } 1167 }
1150 1168
@@ -1186,8 +1204,7 @@ void mesh_path_timer(unsigned long data)
1186 } 1204 }
1187} 1205}
1188 1206
1189void 1207void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1190mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1191{ 1208{
1192 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1209 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1193 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; 1210 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index aa749818860e..6b3c4e119c63 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -24,9 +24,12 @@
24/* Keep the mean chain length below this constant */ 24/* Keep the mean chain length below this constant */
25#define MEAN_CHAIN_LEN 2 25#define MEAN_CHAIN_LEN 2
26 26
27#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ 27static inline bool mpath_expired(struct mesh_path *mpath)
28 time_after(jiffies, mpath->exp_time) && \ 28{
29 !(mpath->flags & MESH_PATH_FIXED)) 29 return (mpath->flags & MESH_PATH_ACTIVE) &&
30 time_after(jiffies, mpath->exp_time) &&
31 !(mpath->flags & MESH_PATH_FIXED);
32}
30 33
31struct mpath_node { 34struct mpath_node {
32 struct hlist_node list; 35 struct hlist_node list;
@@ -181,12 +184,12 @@ errcopy:
181 return -ENOMEM; 184 return -ENOMEM;
182} 185}
183 186
184static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, 187static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
185 struct mesh_table *tbl) 188 struct mesh_table *tbl)
186{ 189{
187 /* Use last four bytes of hw addr and interface index as hash index */ 190 /* Use last four bytes of hw addr and interface index as hash index */
188 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) 191 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
189 & tbl->hash_mask; 192 tbl->hash_rnd) & tbl->hash_mask;
190} 193}
191 194
192 195
@@ -212,6 +215,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
212 hdr = (struct ieee80211_hdr *) skb->data; 215 hdr = (struct ieee80211_hdr *) skb->data;
213 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 216 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
214 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 217 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
218 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
215 } 219 }
216 220
217 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 221 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
@@ -325,8 +329,8 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
325} 329}
326 330
327 331
328static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst, 332static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
329 struct ieee80211_sub_if_data *sdata) 333 struct ieee80211_sub_if_data *sdata)
330{ 334{
331 struct mesh_path *mpath; 335 struct mesh_path *mpath;
332 struct hlist_node *n; 336 struct hlist_node *n;
@@ -338,7 +342,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
338 mpath = node->mpath; 342 mpath = node->mpath;
339 if (mpath->sdata == sdata && 343 if (mpath->sdata == sdata &&
340 ether_addr_equal(dst, mpath->dst)) { 344 ether_addr_equal(dst, mpath->dst)) {
341 if (MPATH_EXPIRED(mpath)) { 345 if (mpath_expired(mpath)) {
342 spin_lock_bh(&mpath->state_lock); 346 spin_lock_bh(&mpath->state_lock);
343 mpath->flags &= ~MESH_PATH_ACTIVE; 347 mpath->flags &= ~MESH_PATH_ACTIVE;
344 spin_unlock_bh(&mpath->state_lock); 348 spin_unlock_bh(&mpath->state_lock);
@@ -351,19 +355,21 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
351 355
352/** 356/**
353 * mesh_path_lookup - look up a path in the mesh path table 357 * mesh_path_lookup - look up a path in the mesh path table
354 * @dst: hardware address (ETH_ALEN length) of destination
355 * @sdata: local subif 358 * @sdata: local subif
359 * @dst: hardware address (ETH_ALEN length) of destination
356 * 360 *
357 * Returns: pointer to the mesh path structure, or NULL if not found 361 * Returns: pointer to the mesh path structure, or NULL if not found
358 * 362 *
359 * Locking: must be called within a read rcu section. 363 * Locking: must be called within a read rcu section.
360 */ 364 */
361struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 365struct mesh_path *
366mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
362{ 367{
363 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata); 368 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
364} 369}
365 370
366struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 371struct mesh_path *
372mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
367{ 373{
368 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata); 374 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
369} 375}
@@ -378,7 +384,8 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
378 * 384 *
379 * Locking: must be called within a read rcu section. 385 * Locking: must be called within a read rcu section.
380 */ 386 */
381struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) 387struct mesh_path *
388mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
382{ 389{
383 struct mesh_table *tbl = rcu_dereference(mesh_paths); 390 struct mesh_table *tbl = rcu_dereference(mesh_paths);
384 struct mpath_node *node; 391 struct mpath_node *node;
@@ -390,7 +397,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
390 if (sdata && node->mpath->sdata != sdata) 397 if (sdata && node->mpath->sdata != sdata)
391 continue; 398 continue;
392 if (j++ == idx) { 399 if (j++ == idx) {
393 if (MPATH_EXPIRED(node->mpath)) { 400 if (mpath_expired(node->mpath)) {
394 spin_lock_bh(&node->mpath->state_lock); 401 spin_lock_bh(&node->mpath->state_lock);
395 node->mpath->flags &= ~MESH_PATH_ACTIVE; 402 node->mpath->flags &= ~MESH_PATH_ACTIVE;
396 spin_unlock_bh(&node->mpath->state_lock); 403 spin_unlock_bh(&node->mpath->state_lock);
@@ -434,11 +441,10 @@ int mesh_path_add_gate(struct mesh_path *mpath)
434 spin_lock_bh(&tbl->gates_lock); 441 spin_lock_bh(&tbl->gates_lock);
435 hlist_add_head_rcu(&new_gate->list, tbl->known_gates); 442 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
436 spin_unlock_bh(&tbl->gates_lock); 443 spin_unlock_bh(&tbl->gates_lock);
437 rcu_read_unlock();
438 mpath_dbg(mpath->sdata, 444 mpath_dbg(mpath->sdata,
439 "Mesh path: Recorded new gate: %pM. %d known gates\n", 445 "Mesh path: Recorded new gate: %pM. %d known gates\n",
440 mpath->dst, mpath->sdata->u.mesh.num_gates); 446 mpath->dst, mpath->sdata->u.mesh.num_gates);
441 return 0; 447 err = 0;
442err_rcu: 448err_rcu:
443 rcu_read_unlock(); 449 rcu_read_unlock();
444 return err; 450 return err;
@@ -449,30 +455,27 @@ err_rcu:
449 * @tbl: table which holds our list of known gates 455 * @tbl: table which holds our list of known gates
450 * @mpath: gate mpath 456 * @mpath: gate mpath
451 * 457 *
452 * Returns: 0 on success
453 *
454 * Locking: must be called inside rcu_read_lock() section 458 * Locking: must be called inside rcu_read_lock() section
455 */ 459 */
456static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 460static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
457{ 461{
458 struct mpath_node *gate; 462 struct mpath_node *gate;
459 struct hlist_node *p, *q; 463 struct hlist_node *p, *q;
460 464
461 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) 465 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) {
462 if (gate->mpath == mpath) { 466 if (gate->mpath != mpath)
463 spin_lock_bh(&tbl->gates_lock); 467 continue;
464 hlist_del_rcu(&gate->list); 468 spin_lock_bh(&tbl->gates_lock);
465 kfree_rcu(gate, rcu); 469 hlist_del_rcu(&gate->list);
466 spin_unlock_bh(&tbl->gates_lock); 470 kfree_rcu(gate, rcu);
467 mpath->sdata->u.mesh.num_gates--; 471 spin_unlock_bh(&tbl->gates_lock);
468 mpath->is_gate = false; 472 mpath->sdata->u.mesh.num_gates--;
469 mpath_dbg(mpath->sdata, 473 mpath->is_gate = false;
470 "Mesh path: Deleted gate: %pM. %d known gates\n", 474 mpath_dbg(mpath->sdata,
471 mpath->dst, mpath->sdata->u.mesh.num_gates); 475 "Mesh path: Deleted gate: %pM. %d known gates\n",
472 break; 476 mpath->dst, mpath->sdata->u.mesh.num_gates);
473 } 477 break;
474 478 }
475 return 0;
476} 479}
477 480
478/** 481/**
@@ -486,14 +489,14 @@ int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
486 489
487/** 490/**
488 * mesh_path_add - allocate and add a new path to the mesh path table 491 * mesh_path_add - allocate and add a new path to the mesh path table
489 * @addr: destination address of the path (ETH_ALEN length) 492 * @dst: destination address of the path (ETH_ALEN length)
490 * @sdata: local subif 493 * @sdata: local subif
491 * 494 *
492 * Returns: 0 on success 495 * Returns: 0 on success
493 * 496 *
494 * State: the initial state of the new path is set to 0 497 * State: the initial state of the new path is set to 0
495 */ 498 */
496int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) 499int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
497{ 500{
498 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 501 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
499 struct ieee80211_local *local = sdata->local; 502 struct ieee80211_local *local = sdata->local;
@@ -628,7 +631,8 @@ void mesh_mpp_table_grow(void)
628 write_unlock_bh(&pathtbl_resize_lock); 631 write_unlock_bh(&pathtbl_resize_lock);
629} 632}
630 633
631int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 634int mpp_path_add(struct ieee80211_sub_if_data *sdata,
635 const u8 *dst, const u8 *mpp)
632{ 636{
633 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 637 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
634 struct ieee80211_local *local = sdata->local; 638 struct ieee80211_local *local = sdata->local;
@@ -737,9 +741,10 @@ void mesh_plink_broken(struct sta_info *sta)
737 mpath->flags &= ~MESH_PATH_ACTIVE; 741 mpath->flags &= ~MESH_PATH_ACTIVE;
738 ++mpath->sn; 742 ++mpath->sn;
739 spin_unlock_bh(&mpath->state_lock); 743 spin_unlock_bh(&mpath->state_lock);
740 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, 744 mesh_path_error_tx(sdata,
741 mpath->dst, cpu_to_le32(mpath->sn), 745 sdata->u.mesh.mshcfg.element_ttl,
742 reason, bcast, sdata); 746 mpath->dst, cpu_to_le32(mpath->sn),
747 reason, bcast);
743 } 748 }
744 } 749 }
745 rcu_read_unlock(); 750 rcu_read_unlock();
@@ -854,7 +859,7 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
854 * 859 *
855 * Returns: 0 if successful 860 * Returns: 0 if successful
856 */ 861 */
857int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 862int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
858{ 863{
859 struct mesh_table *tbl; 864 struct mesh_table *tbl;
860 struct mesh_path *mpath; 865 struct mesh_path *mpath;
@@ -963,8 +968,8 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
963 * 968 *
964 * Locking: the function must me called within a rcu_read_lock region 969 * Locking: the function must me called within a rcu_read_lock region
965 */ 970 */
966void mesh_path_discard_frame(struct sk_buff *skb, 971void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
967 struct ieee80211_sub_if_data *sdata) 972 struct sk_buff *skb)
968{ 973{
969 kfree_skb(skb); 974 kfree_skb(skb);
970 sdata->u.mesh.mshstats.dropped_frames_no_route++; 975 sdata->u.mesh.mshstats.dropped_frames_no_route++;
@@ -982,7 +987,7 @@ void mesh_path_flush_pending(struct mesh_path *mpath)
982 struct sk_buff *skb; 987 struct sk_buff *skb;
983 988
984 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) 989 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
985 mesh_path_discard_frame(skb, mpath->sdata); 990 mesh_path_discard_frame(mpath->sdata, skb);
986} 991}
987 992
988/** 993/**
@@ -1103,7 +1108,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1103 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 1108 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1104 (!(mpath->flags & MESH_PATH_FIXED)) && 1109 (!(mpath->flags & MESH_PATH_FIXED)) &&
1105 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 1110 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1106 mesh_path_del(mpath->dst, mpath->sdata); 1111 mesh_path_del(mpath->sdata, mpath->dst);
1107 } 1112 }
1108 rcu_read_unlock(); 1113 rcu_read_unlock();
1109} 1114}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 4b274e9c91a5..07d396d57079 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -37,23 +37,31 @@ enum plink_event {
37 CLS_IGNR 37 CLS_IGNR
38}; 38};
39 39
40static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 40static const char * const mplstates[] = {
41 enum ieee80211_self_protected_actioncode action, 41 [NL80211_PLINK_LISTEN] = "LISTEN",
42 u8 *da, __le16 llid, __le16 plid, __le16 reason); 42 [NL80211_PLINK_OPN_SNT] = "OPN-SNT",
43 [NL80211_PLINK_OPN_RCVD] = "OPN-RCVD",
44 [NL80211_PLINK_CNF_RCVD] = "CNF_RCVD",
45 [NL80211_PLINK_ESTAB] = "ESTAB",
46 [NL80211_PLINK_HOLDING] = "HOLDING",
47 [NL80211_PLINK_BLOCKED] = "BLOCKED"
48};
43 49
44static inline 50static const char * const mplevents[] = {
45u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) 51 [PLINK_UNDEFINED] = "NONE",
46{ 52 [OPN_ACPT] = "OPN_ACPT",
47 atomic_inc(&sdata->u.mesh.estab_plinks); 53 [OPN_RJCT] = "OPN_RJCT",
48 return mesh_accept_plinks_update(sdata); 54 [OPN_IGNR] = "OPN_IGNR",
49} 55 [CNF_ACPT] = "CNF_ACPT",
56 [CNF_RJCT] = "CNF_RJCT",
57 [CNF_IGNR] = "CNF_IGNR",
58 [CLS_ACPT] = "CLS_ACPT",
59 [CLS_IGNR] = "CLS_IGNR"
60};
50 61
51static inline 62static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
52u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) 63 enum ieee80211_self_protected_actioncode action,
53{ 64 u8 *da, __le16 llid, __le16 plid, __le16 reason);
54 atomic_dec(&sdata->u.mesh.estab_plinks);
55 return mesh_accept_plinks_update(sdata);
56}
57 65
58/** 66/**
59 * mesh_plink_fsm_restart - restart a mesh peer link finite state machine 67 * mesh_plink_fsm_restart - restart a mesh peer link finite state machine
@@ -70,27 +78,63 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
70} 78}
71 79
72/* 80/*
73 * Allocate mesh sta entry and insert into station table 81 * mesh_set_short_slot_time - enable / disable ERP short slot time.
82 *
83 * The standard indirectly mandates mesh STAs to turn off short slot time by
84 * disallowing advertising this (802.11-2012 8.4.1.4), but that doesn't mean we
85 * can't be sneaky about it. Enable short slot time if all mesh STAs in the
86 * MBSS support ERP rates.
87 *
88 * Returns BSS_CHANGED_ERP_SLOT or 0 for no change.
74 */ 89 */
75static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, 90static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
76 u8 *hw_addr)
77{ 91{
92 struct ieee80211_local *local = sdata->local;
93 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
94 struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
78 struct sta_info *sta; 95 struct sta_info *sta;
96 u32 erp_rates = 0, changed = 0;
97 int i;
98 bool short_slot = false;
79 99
80 if (sdata->local->num_sta >= MESH_MAX_PLINKS) 100 if (band == IEEE80211_BAND_5GHZ) {
81 return NULL; 101 /* (IEEE 802.11-2012 19.4.5) */
102 short_slot = true;
103 goto out;
104 } else if (band != IEEE80211_BAND_2GHZ ||
105 (band == IEEE80211_BAND_2GHZ &&
106 local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
107 goto out;
82 108
83 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); 109 for (i = 0; i < sband->n_bitrates; i++)
84 if (!sta) 110 if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G)
85 return NULL; 111 erp_rates |= BIT(i);
86 112
87 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 113 if (!erp_rates)
88 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 114 goto out;
89 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
90 115
91 set_sta_flag(sta, WLAN_STA_WME); 116 rcu_read_lock();
117 list_for_each_entry_rcu(sta, &local->sta_list, list) {
118 if (sdata != sta->sdata ||
119 sta->plink_state != NL80211_PLINK_ESTAB)
120 continue;
92 121
93 return sta; 122 short_slot = false;
123 if (erp_rates & sta->sta.supp_rates[band])
124 short_slot = true;
125 else
126 break;
127 }
128 rcu_read_unlock();
129
130out:
131 if (sdata->vif.bss_conf.use_short_slot != short_slot) {
132 sdata->vif.bss_conf.use_short_slot = short_slot;
133 changed = BSS_CHANGED_ERP_SLOT;
134 mpl_dbg(sdata, "mesh_plink %pM: ERP short slot time %d\n",
135 sdata->vif.addr, short_slot);
136 }
137 return changed;
94} 138}
95 139
96/** 140/**
@@ -107,7 +151,6 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
107{ 151{
108 struct ieee80211_local *local = sdata->local; 152 struct ieee80211_local *local = sdata->local;
109 struct sta_info *sta; 153 struct sta_info *sta;
110 u32 changed = 0;
111 u16 ht_opmode; 154 u16 ht_opmode;
112 bool non_ht_sta = false, ht20_sta = false; 155 bool non_ht_sta = false, ht20_sta = false;
113 156
@@ -120,23 +163,19 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
120 sta->plink_state != NL80211_PLINK_ESTAB) 163 sta->plink_state != NL80211_PLINK_ESTAB)
121 continue; 164 continue;
122 165
123 switch (sta->ch_width) { 166 if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20)
124 case NL80211_CHAN_WIDTH_20_NOHT: 167 continue;
125 mpl_dbg(sdata, 168
126 "mesh_plink %pM: nonHT sta (%pM) is present\n", 169 if (!sta->sta.ht_cap.ht_supported) {
127 sdata->vif.addr, sta->sta.addr); 170 mpl_dbg(sdata, "nonHT sta (%pM) is present\n",
171 sta->sta.addr);
128 non_ht_sta = true; 172 non_ht_sta = true;
129 goto out;
130 case NL80211_CHAN_WIDTH_20:
131 mpl_dbg(sdata,
132 "mesh_plink %pM: HT20 sta (%pM) is present\n",
133 sdata->vif.addr, sta->sta.addr);
134 ht20_sta = true;
135 default:
136 break; 173 break;
137 } 174 }
175
176 mpl_dbg(sdata, "HT20 sta (%pM) is present\n", sta->sta.addr);
177 ht20_sta = true;
138 } 178 }
139out:
140 rcu_read_unlock(); 179 rcu_read_unlock();
141 180
142 if (non_ht_sta) 181 if (non_ht_sta)
@@ -147,16 +186,13 @@ out:
147 else 186 else
148 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 187 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
149 188
150 if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) { 189 if (sdata->vif.bss_conf.ht_operation_mode == ht_opmode)
151 sdata->vif.bss_conf.ht_operation_mode = ht_opmode; 190 return 0;
152 sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
153 changed = BSS_CHANGED_HT;
154 mpl_dbg(sdata,
155 "mesh_plink %pM: protection mode changed to %d\n",
156 sdata->vif.addr, ht_opmode);
157 }
158 191
159 return changed; 192 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
193 sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
194 mpl_dbg(sdata, "selected new HT protection mode %d\n", ht_opmode);
195 return BSS_CHANGED_HT;
160} 196}
161 197
162/** 198/**
@@ -179,6 +215,9 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta)
179 sta->plink_state = NL80211_PLINK_BLOCKED; 215 sta->plink_state = NL80211_PLINK_BLOCKED;
180 mesh_path_flush_by_nexthop(sta); 216 mesh_path_flush_by_nexthop(sta);
181 217
218 ieee80211_mps_sta_status_update(sta);
219 changed |= ieee80211_mps_local_status_update(sdata);
220
182 return changed; 221 return changed;
183} 222}
184 223
@@ -189,7 +228,7 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta)
189 * 228 *
190 * All mesh paths with this peer as next hop will be flushed 229 * All mesh paths with this peer as next hop will be flushed
191 */ 230 */
192void mesh_plink_deactivate(struct sta_info *sta) 231u32 mesh_plink_deactivate(struct sta_info *sta)
193{ 232{
194 struct ieee80211_sub_if_data *sdata = sta->sdata; 233 struct ieee80211_sub_if_data *sdata = sta->sdata;
195 u32 changed; 234 u32 changed;
@@ -202,12 +241,13 @@ void mesh_plink_deactivate(struct sta_info *sta)
202 sta->reason); 241 sta->reason);
203 spin_unlock_bh(&sta->lock); 242 spin_unlock_bh(&sta->lock);
204 243
205 ieee80211_bss_info_change_notify(sdata, changed); 244 return changed;
206} 245}
207 246
208static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 247static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
209 enum ieee80211_self_protected_actioncode action, 248 enum ieee80211_self_protected_actioncode action,
210 u8 *da, __le16 llid, __le16 plid, __le16 reason) { 249 u8 *da, __le16 llid, __le16 plid, __le16 reason)
250{
211 struct ieee80211_local *local = sdata->local; 251 struct ieee80211_local *local = sdata->local;
212 struct sk_buff *skb; 252 struct sk_buff *skb;
213 struct ieee80211_tx_info *info; 253 struct ieee80211_tx_info *info;
@@ -258,13 +298,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
258 } 298 }
259 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 299 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
260 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || 300 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
261 mesh_add_rsn_ie(skb, sdata) || 301 mesh_add_rsn_ie(sdata, skb) ||
262 mesh_add_meshid_ie(skb, sdata) || 302 mesh_add_meshid_ie(sdata, skb) ||
263 mesh_add_meshconf_ie(skb, sdata)) 303 mesh_add_meshconf_ie(sdata, skb))
264 goto free; 304 goto free;
265 } else { /* WLAN_SP_MESH_PEERING_CLOSE */ 305 } else { /* WLAN_SP_MESH_PEERING_CLOSE */
266 info->flags |= IEEE80211_TX_CTL_NO_ACK; 306 info->flags |= IEEE80211_TX_CTL_NO_ACK;
267 if (mesh_add_meshid_ie(skb, sdata)) 307 if (mesh_add_meshid_ie(sdata, skb))
268 goto free; 308 goto free;
269 } 309 }
270 310
@@ -308,12 +348,12 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
308 } 348 }
309 349
310 if (action != WLAN_SP_MESH_PEERING_CLOSE) { 350 if (action != WLAN_SP_MESH_PEERING_CLOSE) {
311 if (mesh_add_ht_cap_ie(skb, sdata) || 351 if (mesh_add_ht_cap_ie(sdata, skb) ||
312 mesh_add_ht_oper_ie(skb, sdata)) 352 mesh_add_ht_oper_ie(sdata, skb))
313 goto free; 353 goto free;
314 } 354 }
315 355
316 if (mesh_add_vendor_ies(skb, sdata)) 356 if (mesh_add_vendor_ies(sdata, skb))
317 goto free; 357 goto free;
318 358
319 ieee80211_tx_skb(sdata, skb); 359 ieee80211_tx_skb(sdata, skb);
@@ -323,92 +363,147 @@ free:
323 return err; 363 return err;
324} 364}
325 365
326/** 366static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
327 * mesh_peer_init - initialize new mesh peer and return resulting sta_info 367 struct sta_info *sta,
328 * 368 struct ieee802_11_elems *elems, bool insert)
329 * @sdata: local meshif
330 * @addr: peer's address
331 * @elems: IEs from beacon or mesh peering frame
332 *
333 * call under RCU
334 */
335static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
336 u8 *addr,
337 struct ieee802_11_elems *elems)
338{ 369{
339 struct ieee80211_local *local = sdata->local; 370 struct ieee80211_local *local = sdata->local;
340 enum ieee80211_band band = ieee80211_get_sdata_band(sdata); 371 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
341 struct ieee80211_supported_band *sband; 372 struct ieee80211_supported_band *sband;
342 u32 rates, basic_rates = 0; 373 u32 rates, basic_rates = 0, changed = 0;
343 struct sta_info *sta;
344 bool insert = false;
345 374
346 sband = local->hw.wiphy->bands[band]; 375 sband = local->hw.wiphy->bands[band];
347 rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates); 376 rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
348 377
349 sta = sta_info_get(sdata, addr);
350 if (!sta) {
351 /* Userspace handles peer allocation when security is enabled */
352 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
353 cfg80211_notify_new_peer_candidate(sdata->dev, addr,
354 elems->ie_start,
355 elems->total_len,
356 GFP_ATOMIC);
357 return NULL;
358 }
359
360 sta = mesh_plink_alloc(sdata, addr);
361 if (!sta)
362 return NULL;
363 insert = true;
364 }
365
366 spin_lock_bh(&sta->lock); 378 spin_lock_bh(&sta->lock);
367 sta->last_rx = jiffies; 379 sta->last_rx = jiffies;
368 if (sta->plink_state == NL80211_PLINK_ESTAB) {
369 spin_unlock_bh(&sta->lock);
370 return sta;
371 }
372 380
381 /* rates and capabilities don't change during peering */
382 if (sta->plink_state == NL80211_PLINK_ESTAB)
383 goto out;
384
385 if (sta->sta.supp_rates[band] != rates)
386 changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
373 sta->sta.supp_rates[band] = rates; 387 sta->sta.supp_rates[band] = rates;
374 if (elems->ht_cap_elem && 388
375 sdata->vif.bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) 389 if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
376 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 390 elems->ht_cap_elem, sta))
377 elems->ht_cap_elem, 391 changed |= IEEE80211_RC_BW_CHANGED;
378 &sta->sta.ht_cap); 392
379 else 393 /* HT peer is operating 20MHz-only */
380 memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); 394 if (elems->ht_operation &&
381 395 !(elems->ht_operation->ht_param &
382 if (elems->ht_operation) { 396 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
383 struct cfg80211_chan_def chandef; 397 if (sta->sta.bandwidth != IEEE80211_STA_RX_BW_20)
384 398 changed |= IEEE80211_RC_BW_CHANGED;
385 if (!(elems->ht_operation->ht_param & 399 sta->sta.bandwidth = IEEE80211_STA_RX_BW_20;
386 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
387 sta->sta.ht_cap.cap &=
388 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
389 ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
390 elems->ht_operation, &chandef);
391 sta->ch_width = chandef.width;
392 } 400 }
393 401
394 if (insert) 402 if (insert)
395 rate_control_rate_init(sta); 403 rate_control_rate_init(sta);
404 else
405 rate_control_rate_update(local, sband, sta, changed);
406out:
396 spin_unlock_bh(&sta->lock); 407 spin_unlock_bh(&sta->lock);
408}
409
410static struct sta_info *
411__mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
412{
413 struct sta_info *sta;
397 414
398 if (insert && sta_info_insert(sta)) 415 if (sdata->local->num_sta >= MESH_MAX_PLINKS)
416 return NULL;
417
418 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
419 if (!sta)
399 return NULL; 420 return NULL;
400 421
422 sta->plink_state = NL80211_PLINK_LISTEN;
423 init_timer(&sta->plink_timer);
424
425 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
426 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
427 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
428
429 set_sta_flag(sta, WLAN_STA_WME);
430
431 return sta;
432}
433
434static struct sta_info *
435mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
436 struct ieee802_11_elems *elems)
437{
438 struct sta_info *sta = NULL;
439
440 /* Userspace handles peer allocation when security is enabled */
441 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
442 cfg80211_notify_new_peer_candidate(sdata->dev, addr,
443 elems->ie_start,
444 elems->total_len,
445 GFP_KERNEL);
446 else
447 sta = __mesh_sta_info_alloc(sdata, addr);
448
449 return sta;
450}
451
452/*
453 * mesh_sta_info_get - return mesh sta info entry for @addr.
454 *
455 * @sdata: local meshif
456 * @addr: peer's address
457 * @elems: IEs from beacon or mesh peering frame.
458 *
459 * Return existing or newly allocated sta_info under RCU read lock.
460 * (re)initialize with given IEs.
461 */
462static struct sta_info *
463mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
464 u8 *addr, struct ieee802_11_elems *elems) __acquires(RCU)
465{
466 struct sta_info *sta = NULL;
467
468 rcu_read_lock();
469 sta = sta_info_get(sdata, addr);
470 if (sta) {
471 mesh_sta_info_init(sdata, sta, elems, false);
472 } else {
473 rcu_read_unlock();
474 /* can't run atomic */
475 sta = mesh_sta_info_alloc(sdata, addr, elems);
476 if (!sta) {
477 rcu_read_lock();
478 return NULL;
479 }
480
481 mesh_sta_info_init(sdata, sta, elems, true);
482
483 if (sta_info_insert_rcu(sta))
484 return NULL;
485 }
486
401 return sta; 487 return sta;
402} 488}
403 489
490/*
491 * mesh_neighbour_update - update or initialize new mesh neighbor.
492 *
493 * @sdata: local meshif
494 * @addr: peer's address
495 * @elems: IEs from beacon or mesh peering frame
496 *
497 * Initiates peering if appropriate.
498 */
404void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, 499void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
405 u8 *hw_addr, 500 u8 *hw_addr,
406 struct ieee802_11_elems *elems) 501 struct ieee802_11_elems *elems)
407{ 502{
408 struct sta_info *sta; 503 struct sta_info *sta;
504 u32 changed = 0;
409 505
410 rcu_read_lock(); 506 sta = mesh_sta_info_get(sdata, hw_addr, elems);
411 sta = mesh_peer_init(sdata, hw_addr, elems);
412 if (!sta) 507 if (!sta)
413 goto out; 508 goto out;
414 509
@@ -417,10 +512,12 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
417 sdata->u.mesh.accepting_plinks && 512 sdata->u.mesh.accepting_plinks &&
418 sdata->u.mesh.mshcfg.auto_open_plinks && 513 sdata->u.mesh.mshcfg.auto_open_plinks &&
419 rssi_threshold_check(sta, sdata)) 514 rssi_threshold_check(sta, sdata))
420 mesh_plink_open(sta); 515 changed = mesh_plink_open(sta);
421 516
517 ieee80211_mps_frame_release(sta, elems);
422out: 518out:
423 rcu_read_unlock(); 519 rcu_read_unlock();
520 ieee80211_mbss_info_change_notify(sdata, changed);
424} 521}
425 522
426static void mesh_plink_timer(unsigned long data) 523static void mesh_plink_timer(unsigned long data)
@@ -504,6 +601,13 @@ static void mesh_plink_timer(unsigned long data)
504#ifdef CONFIG_PM 601#ifdef CONFIG_PM
505void mesh_plink_quiesce(struct sta_info *sta) 602void mesh_plink_quiesce(struct sta_info *sta)
506{ 603{
604 if (!ieee80211_vif_is_mesh(&sta->sdata->vif))
605 return;
606
607 /* no kernel mesh sta timers have been initialized */
608 if (sta->sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
609 return;
610
507 if (del_timer_sync(&sta->plink_timer)) 611 if (del_timer_sync(&sta->plink_timer))
508 sta->plink_timer_was_running = true; 612 sta->plink_timer_was_running = true;
509} 613}
@@ -526,13 +630,14 @@ static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
526 add_timer(&sta->plink_timer); 630 add_timer(&sta->plink_timer);
527} 631}
528 632
529int mesh_plink_open(struct sta_info *sta) 633u32 mesh_plink_open(struct sta_info *sta)
530{ 634{
531 __le16 llid; 635 __le16 llid;
532 struct ieee80211_sub_if_data *sdata = sta->sdata; 636 struct ieee80211_sub_if_data *sdata = sta->sdata;
637 u32 changed;
533 638
534 if (!test_sta_flag(sta, WLAN_STA_AUTH)) 639 if (!test_sta_flag(sta, WLAN_STA_AUTH))
535 return -EPERM; 640 return 0;
536 641
537 spin_lock_bh(&sta->lock); 642 spin_lock_bh(&sta->lock);
538 get_random_bytes(&llid, 2); 643 get_random_bytes(&llid, 2);
@@ -540,7 +645,7 @@ int mesh_plink_open(struct sta_info *sta)
540 if (sta->plink_state != NL80211_PLINK_LISTEN && 645 if (sta->plink_state != NL80211_PLINK_LISTEN &&
541 sta->plink_state != NL80211_PLINK_BLOCKED) { 646 sta->plink_state != NL80211_PLINK_BLOCKED) {
542 spin_unlock_bh(&sta->lock); 647 spin_unlock_bh(&sta->lock);
543 return -EBUSY; 648 return 0;
544 } 649 }
545 sta->plink_state = NL80211_PLINK_OPN_SNT; 650 sta->plink_state = NL80211_PLINK_OPN_SNT;
546 mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout); 651 mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
@@ -549,13 +654,16 @@ int mesh_plink_open(struct sta_info *sta)
549 "Mesh plink: starting establishment with %pM\n", 654 "Mesh plink: starting establishment with %pM\n",
550 sta->sta.addr); 655 sta->sta.addr);
551 656
552 return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN, 657 /* set the non-peer mode to active during peering */
553 sta->sta.addr, llid, 0, 0); 658 changed = ieee80211_mps_local_status_update(sdata);
659
660 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
661 sta->sta.addr, llid, 0, 0);
662 return changed;
554} 663}
555 664
556void mesh_plink_block(struct sta_info *sta) 665u32 mesh_plink_block(struct sta_info *sta)
557{ 666{
558 struct ieee80211_sub_if_data *sdata = sta->sdata;
559 u32 changed; 667 u32 changed;
560 668
561 spin_lock_bh(&sta->lock); 669 spin_lock_bh(&sta->lock);
@@ -563,12 +671,13 @@ void mesh_plink_block(struct sta_info *sta)
563 sta->plink_state = NL80211_PLINK_BLOCKED; 671 sta->plink_state = NL80211_PLINK_BLOCKED;
564 spin_unlock_bh(&sta->lock); 672 spin_unlock_bh(&sta->lock);
565 673
566 ieee80211_bss_info_change_notify(sdata, changed); 674 return changed;
567} 675}
568 676
569 677
570void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, 678void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
571 size_t len, struct ieee80211_rx_status *rx_status) 679 struct ieee80211_mgmt *mgmt, size_t len,
680 struct ieee80211_rx_status *rx_status)
572{ 681{
573 struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; 682 struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
574 struct ieee802_11_elems elems; 683 struct ieee802_11_elems elems;
@@ -581,15 +690,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
581 u8 *baseaddr; 690 u8 *baseaddr;
582 u32 changed = 0; 691 u32 changed = 0;
583 __le16 plid, llid, reason; 692 __le16 plid, llid, reason;
584 static const char *mplstates[] = {
585 [NL80211_PLINK_LISTEN] = "LISTEN",
586 [NL80211_PLINK_OPN_SNT] = "OPN-SNT",
587 [NL80211_PLINK_OPN_RCVD] = "OPN-RCVD",
588 [NL80211_PLINK_CNF_RCVD] = "CNF_RCVD",
589 [NL80211_PLINK_ESTAB] = "ESTAB",
590 [NL80211_PLINK_HOLDING] = "HOLDING",
591 [NL80211_PLINK_BLOCKED] = "BLOCKED"
592 };
593 693
594 /* need action_code, aux */ 694 /* need action_code, aux */
595 if (len < IEEE80211_MIN_ACTION_SIZE + 3) 695 if (len < IEEE80211_MIN_ACTION_SIZE + 3)
@@ -609,13 +709,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
609 baselen += 4; 709 baselen += 4;
610 } 710 }
611 ieee802_11_parse_elems(baseaddr, len - baselen, &elems); 711 ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
712
612 if (!elems.peering) { 713 if (!elems.peering) {
613 mpl_dbg(sdata, 714 mpl_dbg(sdata,
614 "Mesh plink: missing necessary peer link ie\n"); 715 "Mesh plink: missing necessary peer link ie\n");
615 return; 716 return;
616 } 717 }
718
617 if (elems.rsn_len && 719 if (elems.rsn_len &&
618 sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) { 720 sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
619 mpl_dbg(sdata, 721 mpl_dbg(sdata,
620 "Mesh plink: can't establish link with secure peer\n"); 722 "Mesh plink: can't establish link with secure peer\n");
621 return; 723 return;
@@ -634,7 +736,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
634 } 736 }
635 737
636 if (ftype != WLAN_SP_MESH_PEERING_CLOSE && 738 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
637 (!elems.mesh_id || !elems.mesh_config)) { 739 (!elems.mesh_id || !elems.mesh_config)) {
638 mpl_dbg(sdata, "Mesh plink: missing necessary ie\n"); 740 mpl_dbg(sdata, "Mesh plink: missing necessary ie\n");
639 return; 741 return;
640 } 742 }
@@ -646,6 +748,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
646 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) 748 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
647 memcpy(&llid, PLINK_GET_PLID(elems.peering), 2); 749 memcpy(&llid, PLINK_GET_PLID(elems.peering), 2);
648 750
751 /* WARNING: Only for sta pointer, is dropped & re-acquired */
649 rcu_read_lock(); 752 rcu_read_lock();
650 753
651 sta = sta_info_get(sdata, mgmt->sa); 754 sta = sta_info_get(sdata, mgmt->sa);
@@ -749,8 +852,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
749 } 852 }
750 853
751 if (event == OPN_ACPT) { 854 if (event == OPN_ACPT) {
855 rcu_read_unlock();
752 /* allocate sta entry if necessary and update info */ 856 /* allocate sta entry if necessary and update info */
753 sta = mesh_peer_init(sdata, mgmt->sa, &elems); 857 sta = mesh_sta_info_get(sdata, mgmt->sa, &elems);
754 if (!sta) { 858 if (!sta) {
755 mpl_dbg(sdata, "Mesh plink: failed to init peer!\n"); 859 mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
756 rcu_read_unlock(); 860 rcu_read_unlock();
@@ -758,11 +862,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
758 } 862 }
759 } 863 }
760 864
761 mpl_dbg(sdata, 865 mpl_dbg(sdata, "peer %pM in state %s got event %s\n", mgmt->sa,
762 "Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", 866 mplstates[sta->plink_state], mplevents[event]);
763 mgmt->sa, mplstates[sta->plink_state],
764 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
765 event);
766 reason = 0; 867 reason = 0;
767 spin_lock_bh(&sta->lock); 868 spin_lock_bh(&sta->lock);
768 switch (sta->plink_state) { 869 switch (sta->plink_state) {
@@ -780,6 +881,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
780 sta->llid = llid; 881 sta->llid = llid;
781 mesh_plink_timer_set(sta, 882 mesh_plink_timer_set(sta,
782 mshcfg->dot11MeshRetryTimeout); 883 mshcfg->dot11MeshRetryTimeout);
884
885 /* set the non-peer mode to active during peering */
886 changed |= ieee80211_mps_local_status_update(sdata);
887
783 spin_unlock_bh(&sta->lock); 888 spin_unlock_bh(&sta->lock);
784 mesh_plink_frame_tx(sdata, 889 mesh_plink_frame_tx(sdata,
785 WLAN_SP_MESH_PEERING_OPEN, 890 WLAN_SP_MESH_PEERING_OPEN,
@@ -870,8 +975,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
870 spin_unlock_bh(&sta->lock); 975 spin_unlock_bh(&sta->lock);
871 changed |= mesh_plink_inc_estab_count(sdata); 976 changed |= mesh_plink_inc_estab_count(sdata);
872 changed |= mesh_set_ht_prot_mode(sdata); 977 changed |= mesh_set_ht_prot_mode(sdata);
978 changed |= mesh_set_short_slot_time(sdata);
873 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", 979 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
874 sta->sta.addr); 980 sta->sta.addr);
981 ieee80211_mps_sta_status_update(sta);
982 changed |= ieee80211_mps_set_sta_local_pm(sta,
983 mshcfg->power_mode);
875 break; 984 break;
876 default: 985 default:
877 spin_unlock_bh(&sta->lock); 986 spin_unlock_bh(&sta->lock);
@@ -905,11 +1014,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
905 spin_unlock_bh(&sta->lock); 1014 spin_unlock_bh(&sta->lock);
906 changed |= mesh_plink_inc_estab_count(sdata); 1015 changed |= mesh_plink_inc_estab_count(sdata);
907 changed |= mesh_set_ht_prot_mode(sdata); 1016 changed |= mesh_set_ht_prot_mode(sdata);
1017 changed |= mesh_set_short_slot_time(sdata);
908 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", 1018 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
909 sta->sta.addr); 1019 sta->sta.addr);
910 mesh_plink_frame_tx(sdata, 1020 mesh_plink_frame_tx(sdata,
911 WLAN_SP_MESH_PEERING_CONFIRM, 1021 WLAN_SP_MESH_PEERING_CONFIRM,
912 sta->sta.addr, llid, plid, 0); 1022 sta->sta.addr, llid, plid, 0);
1023 ieee80211_mps_sta_status_update(sta);
1024 changed |= ieee80211_mps_set_sta_local_pm(sta,
1025 mshcfg->power_mode);
913 break; 1026 break;
914 default: 1027 default:
915 spin_unlock_bh(&sta->lock); 1028 spin_unlock_bh(&sta->lock);
@@ -928,6 +1041,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
928 mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); 1041 mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
929 spin_unlock_bh(&sta->lock); 1042 spin_unlock_bh(&sta->lock);
930 changed |= mesh_set_ht_prot_mode(sdata); 1043 changed |= mesh_set_ht_prot_mode(sdata);
1044 changed |= mesh_set_short_slot_time(sdata);
931 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, 1045 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
932 sta->sta.addr, llid, plid, reason); 1046 sta->sta.addr, llid, plid, reason);
933 break; 1047 break;
@@ -976,5 +1090,5 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
976 rcu_read_unlock(); 1090 rcu_read_unlock();
977 1091
978 if (changed) 1092 if (changed)
979 ieee80211_bss_info_change_notify(sdata, changed); 1093 ieee80211_mbss_info_change_notify(sdata, changed);
980} 1094}
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
new file mode 100644
index 000000000000..3b7bfc01ee36
--- /dev/null
+++ b/net/mac80211/mesh_ps.c
@@ -0,0 +1,598 @@
1/*
2 * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
3 * Copyright 2012-2013, cozybit Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include "mesh.h"
11#include "wme.h"
12
13
14/* mesh PS management */
15
16/**
17 * mps_qos_null_get - create pre-addressed QoS Null frame for mesh powersave
18 */
19static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
20{
21 struct ieee80211_sub_if_data *sdata = sta->sdata;
22 struct ieee80211_local *local = sdata->local;
23 struct ieee80211_hdr *nullfunc; /* use 4addr header */
24 struct sk_buff *skb;
25 int size = sizeof(*nullfunc);
26 __le16 fc;
27
28 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size + 2);
29 if (!skb)
30 return NULL;
31 skb_reserve(skb, local->hw.extra_tx_headroom);
32
33 nullfunc = (struct ieee80211_hdr *) skb_put(skb, size);
34 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
35 ieee80211_fill_mesh_addresses(nullfunc, &fc, sta->sta.addr,
36 sdata->vif.addr);
37 nullfunc->frame_control = fc;
38 nullfunc->duration_id = 0;
39 /* no address resolution for this frame -> set addr 1 immediately */
40 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
41 memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
42 ieee80211_mps_set_frame_flags(sdata, sta, nullfunc);
43
44 return skb;
45}
46
47/**
48 * mps_qos_null_tx - send a QoS Null to indicate link-specific power mode
49 */
50static void mps_qos_null_tx(struct sta_info *sta)
51{
52 struct sk_buff *skb;
53
54 skb = mps_qos_null_get(sta);
55 if (!skb)
56 return;
57
58 mps_dbg(sta->sdata, "announcing peer-specific power mode to %pM\n",
59 sta->sta.addr);
60
61 /* don't unintentionally start a MPSP */
62 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
63 u8 *qc = ieee80211_get_qos_ctl((void *) skb->data);
64
65 qc[0] |= IEEE80211_QOS_CTL_EOSP;
66 }
67
68 ieee80211_tx_skb(sta->sdata, skb);
69}
70
71/**
72 * ieee80211_mps_local_status_update - track status of local link-specific PMs
73 *
74 * @sdata: local mesh subif
75 *
76 * sets the non-peer power mode and triggers the driver PS (re-)configuration
77 * Return BSS_CHANGED_BEACON if a beacon update is necessary.
78 */
79u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
80{
81 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
82 struct sta_info *sta;
83 bool peering = false;
84 int light_sleep_cnt = 0;
85 int deep_sleep_cnt = 0;
86 u32 changed = 0;
87 enum nl80211_mesh_power_mode nonpeer_pm;
88
89 rcu_read_lock();
90 list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
91 if (sdata != sta->sdata)
92 continue;
93
94 switch (sta->plink_state) {
95 case NL80211_PLINK_OPN_SNT:
96 case NL80211_PLINK_OPN_RCVD:
97 case NL80211_PLINK_CNF_RCVD:
98 peering = true;
99 break;
100 case NL80211_PLINK_ESTAB:
101 if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
102 light_sleep_cnt++;
103 else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
104 deep_sleep_cnt++;
105 break;
106 default:
107 break;
108 }
109 }
110 rcu_read_unlock();
111
112 /*
113 * Set non-peer mode to active during peering/scanning/authentication
114 * (see IEEE802.11-2012 13.14.8.3). The non-peer mesh power mode is
115 * deep sleep if the local STA is in light or deep sleep towards at
116 * least one mesh peer (see 13.14.3.1). Otherwise, set it to the
117 * user-configured default value.
118 */
119 if (peering) {
120 mps_dbg(sdata, "setting non-peer PM to active for peering\n");
121 nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
122 } else if (light_sleep_cnt || deep_sleep_cnt) {
123 mps_dbg(sdata, "setting non-peer PM to deep sleep\n");
124 nonpeer_pm = NL80211_MESH_POWER_DEEP_SLEEP;
125 } else {
126 mps_dbg(sdata, "setting non-peer PM to user value\n");
127 nonpeer_pm = ifmsh->mshcfg.power_mode;
128 }
129
130 /* need update if sleep counts move between 0 and non-zero */
131 if (ifmsh->nonpeer_pm != nonpeer_pm ||
132 !ifmsh->ps_peers_light_sleep != !light_sleep_cnt ||
133 !ifmsh->ps_peers_deep_sleep != !deep_sleep_cnt)
134 changed = BSS_CHANGED_BEACON;
135
136 ifmsh->nonpeer_pm = nonpeer_pm;
137 ifmsh->ps_peers_light_sleep = light_sleep_cnt;
138 ifmsh->ps_peers_deep_sleep = deep_sleep_cnt;
139
140 return changed;
141}
142
143/**
144 * ieee80211_mps_set_sta_local_pm - set local PM towards a mesh STA
145 *
146 * @sta: mesh STA
147 * @pm: the power mode to set
148 * Return BSS_CHANGED_BEACON if a beacon update is in order.
149 */
150u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
151 enum nl80211_mesh_power_mode pm)
152{
153 struct ieee80211_sub_if_data *sdata = sta->sdata;
154
155 mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
156 pm, sta->sta.addr);
157
158 sta->local_pm = pm;
159
160 /*
161 * announce peer-specific power mode transition
162 * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
163 */
164 if (sta->plink_state == NL80211_PLINK_ESTAB)
165 mps_qos_null_tx(sta);
166
167 return ieee80211_mps_local_status_update(sdata);
168}
169
170/**
171 * ieee80211_mps_set_frame_flags - set mesh PS flags in FC (and QoS Control)
172 *
173 * @sdata: local mesh subif
174 * @sta: mesh STA
175 * @hdr: 802.11 frame header
176 *
177 * see IEEE802.11-2012 8.2.4.1.7 and 8.2.4.5.11
178 *
179 * NOTE: sta must be given when an individually-addressed QoS frame header
180 * is handled, for group-addressed and management frames it is not used
181 */
182void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
183 struct sta_info *sta,
184 struct ieee80211_hdr *hdr)
185{
186 enum nl80211_mesh_power_mode pm;
187 u8 *qc;
188
189 if (WARN_ON(is_unicast_ether_addr(hdr->addr1) &&
190 ieee80211_is_data_qos(hdr->frame_control) &&
191 !sta))
192 return;
193
194 if (is_unicast_ether_addr(hdr->addr1) &&
195 ieee80211_is_data_qos(hdr->frame_control) &&
196 sta->plink_state == NL80211_PLINK_ESTAB)
197 pm = sta->local_pm;
198 else
199 pm = sdata->u.mesh.nonpeer_pm;
200
201 if (pm == NL80211_MESH_POWER_ACTIVE)
202 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_PM);
203 else
204 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
205
206 if (!ieee80211_is_data_qos(hdr->frame_control))
207 return;
208
209 qc = ieee80211_get_qos_ctl(hdr);
210
211 if ((is_unicast_ether_addr(hdr->addr1) &&
212 pm == NL80211_MESH_POWER_DEEP_SLEEP) ||
213 (is_multicast_ether_addr(hdr->addr1) &&
214 sdata->u.mesh.ps_peers_deep_sleep > 0))
215 qc[1] |= (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8);
216 else
217 qc[1] &= ~(IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8);
218}
219
220/**
221 * ieee80211_mps_sta_status_update - update buffering status of neighbor STA
222 *
223 * @sta: mesh STA
224 *
225 * called after change of peering status or non-peer/peer-specific power mode
226 */
227void ieee80211_mps_sta_status_update(struct sta_info *sta)
228{
229 enum nl80211_mesh_power_mode pm;
230 bool do_buffer;
231
232 /*
233 * use peer-specific power mode if peering is established and the
234 * peer's power mode is known
235 */
236 if (sta->plink_state == NL80211_PLINK_ESTAB &&
237 sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
238 pm = sta->peer_pm;
239 else
240 pm = sta->nonpeer_pm;
241
242 do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
243
244 /* Don't let the same PS state be set twice */
245 if (test_sta_flag(sta, WLAN_STA_PS_STA) == do_buffer)
246 return;
247
248 if (do_buffer) {
249 set_sta_flag(sta, WLAN_STA_PS_STA);
250 atomic_inc(&sta->sdata->u.mesh.ps.num_sta_ps);
251 mps_dbg(sta->sdata, "start PS buffering frames towards %pM\n",
252 sta->sta.addr);
253 } else {
254 ieee80211_sta_ps_deliver_wakeup(sta);
255 }
256
257 /* clear the MPSP flags for non-peers or active STA */
258 if (sta->plink_state != NL80211_PLINK_ESTAB) {
259 clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
260 clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
261 } else if (!do_buffer) {
262 clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
263 }
264}
265
266static void mps_set_sta_peer_pm(struct sta_info *sta,
267 struct ieee80211_hdr *hdr)
268{
269 enum nl80211_mesh_power_mode pm;
270 u8 *qc = ieee80211_get_qos_ctl(hdr);
271
272 /*
273 * Test Power Management field of frame control (PW) and
274 * mesh power save level subfield of QoS control field (PSL)
275 *
276 * | PM | PSL| Mesh PM |
277 * +----+----+---------+
278 * | 0 |Rsrv| Active |
279 * | 1 | 0 | Light |
280 * | 1 | 1 | Deep |
281 */
282 if (ieee80211_has_pm(hdr->frame_control)) {
283 if (qc[1] & (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8))
284 pm = NL80211_MESH_POWER_DEEP_SLEEP;
285 else
286 pm = NL80211_MESH_POWER_LIGHT_SLEEP;
287 } else {
288 pm = NL80211_MESH_POWER_ACTIVE;
289 }
290
291 if (sta->peer_pm == pm)
292 return;
293
294 mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
295 sta->sta.addr, pm);
296
297 sta->peer_pm = pm;
298
299 ieee80211_mps_sta_status_update(sta);
300}
301
302static void mps_set_sta_nonpeer_pm(struct sta_info *sta,
303 struct ieee80211_hdr *hdr)
304{
305 enum nl80211_mesh_power_mode pm;
306
307 if (ieee80211_has_pm(hdr->frame_control))
308 pm = NL80211_MESH_POWER_DEEP_SLEEP;
309 else
310 pm = NL80211_MESH_POWER_ACTIVE;
311
312 if (sta->nonpeer_pm == pm)
313 return;
314
315 mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
316 sta->sta.addr, pm);
317
318 sta->nonpeer_pm = pm;
319
320 ieee80211_mps_sta_status_update(sta);
321}
322
323/**
324 * ieee80211_mps_rx_h_sta_process - frame receive handler for mesh powersave
325 *
326 * @sta: STA info that transmitted the frame
327 * @hdr: IEEE 802.11 (QoS) Header
328 */
329void ieee80211_mps_rx_h_sta_process(struct sta_info *sta,
330 struct ieee80211_hdr *hdr)
331{
332 if (is_unicast_ether_addr(hdr->addr1) &&
333 ieee80211_is_data_qos(hdr->frame_control)) {
334 /*
335 * individually addressed QoS Data/Null frames contain
336 * peer link-specific PS mode towards the local STA
337 */
338 mps_set_sta_peer_pm(sta, hdr);
339
340 /* check for mesh Peer Service Period trigger frames */
341 ieee80211_mpsp_trigger_process(ieee80211_get_qos_ctl(hdr),
342 sta, false, false);
343 } else {
344 /*
345 * can only determine non-peer PS mode
346 * (see IEEE802.11-2012 8.2.4.1.7)
347 */
348 mps_set_sta_nonpeer_pm(sta, hdr);
349 }
350}
351
352
353/* mesh PS frame release */
354
355static void mpsp_trigger_send(struct sta_info *sta, bool rspi, bool eosp)
356{
357 struct ieee80211_sub_if_data *sdata = sta->sdata;
358 struct sk_buff *skb;
359 struct ieee80211_hdr *nullfunc;
360 struct ieee80211_tx_info *info;
361 u8 *qc;
362
363 skb = mps_qos_null_get(sta);
364 if (!skb)
365 return;
366
367 nullfunc = (struct ieee80211_hdr *) skb->data;
368 if (!eosp)
369 nullfunc->frame_control |=
370 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
371 /*
372 * | RSPI | EOSP | MPSP triggering |
373 * +------+------+--------------------+
374 * | 0 | 0 | local STA is owner |
375 * | 0 | 1 | no MPSP (MPSP end) |
376 * | 1 | 0 | both STA are owner |
377 * | 1 | 1 | peer STA is owner | see IEEE802.11-2012 13.14.9.2
378 */
379 qc = ieee80211_get_qos_ctl(nullfunc);
380 if (rspi)
381 qc[1] |= (IEEE80211_QOS_CTL_RSPI >> 8);
382 if (eosp)
383 qc[0] |= IEEE80211_QOS_CTL_EOSP;
384
385 info = IEEE80211_SKB_CB(skb);
386
387 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
388 IEEE80211_TX_CTL_REQ_TX_STATUS;
389
390 mps_dbg(sdata, "sending MPSP trigger%s%s to %pM\n",
391 rspi ? " RSPI" : "", eosp ? " EOSP" : "", sta->sta.addr);
392
393 ieee80211_tx_skb(sdata, skb);
394}
395
396/**
397 * mpsp_qos_null_append - append QoS Null frame to MPSP skb queue if needed
398 *
399 * To properly end a mesh MPSP the last transmitted frame has to set the EOSP
400 * flag in the QoS Control field. In case the current tailing frame is not a
401 * QoS Data frame, append a QoS Null to carry the flag.
402 */
403static void mpsp_qos_null_append(struct sta_info *sta,
404 struct sk_buff_head *frames)
405{
406 struct ieee80211_sub_if_data *sdata = sta->sdata;
407 struct sk_buff *new_skb, *skb = skb_peek_tail(frames);
408 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
409 struct ieee80211_tx_info *info;
410
411 if (ieee80211_is_data_qos(hdr->frame_control))
412 return;
413
414 new_skb = mps_qos_null_get(sta);
415 if (!new_skb)
416 return;
417
418 mps_dbg(sdata, "appending QoS Null in MPSP towards %pM\n",
419 sta->sta.addr);
420 /*
421 * This frame has to be transmitted last. Assign lowest priority to
422 * make sure it cannot pass other frames when releasing multiple ACs.
423 */
424 new_skb->priority = 1;
425 skb_set_queue_mapping(new_skb, IEEE80211_AC_BK);
426 ieee80211_set_qos_hdr(sdata, new_skb);
427
428 info = IEEE80211_SKB_CB(new_skb);
429 info->control.vif = &sdata->vif;
430 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
431
432 __skb_queue_tail(frames, new_skb);
433}
434
435/**
436 * mps_frame_deliver - transmit frames during mesh powersave
437 *
438 * @sta: STA info to transmit to
439 * @n_frames: number of frames to transmit. -1 for all
440 */
441static void mps_frame_deliver(struct sta_info *sta, int n_frames)
442{
443 struct ieee80211_sub_if_data *sdata = sta->sdata;
444 struct ieee80211_local *local = sdata->local;
445 int ac;
446 struct sk_buff_head frames;
447 struct sk_buff *skb;
448 bool more_data = false;
449
450 skb_queue_head_init(&frames);
451
452 /* collect frame(s) from buffers */
453 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
454 while (n_frames != 0) {
455 skb = skb_dequeue(&sta->tx_filtered[ac]);
456 if (!skb) {
457 skb = skb_dequeue(
458 &sta->ps_tx_buf[ac]);
459 if (skb)
460 local->total_ps_buffered--;
461 }
462 if (!skb)
463 break;
464 n_frames--;
465 __skb_queue_tail(&frames, skb);
466 }
467
468 if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
469 !skb_queue_empty(&sta->ps_tx_buf[ac]))
470 more_data = true;
471 }
472
473 /* nothing to send? -> EOSP */
474 if (skb_queue_empty(&frames)) {
475 mpsp_trigger_send(sta, false, true);
476 return;
477 }
478
479 /* in a MPSP make sure the last skb is a QoS Data frame */
480 if (test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
481 mpsp_qos_null_append(sta, &frames);
482
483 mps_dbg(sta->sdata, "sending %d frames to PS STA %pM\n",
484 skb_queue_len(&frames), sta->sta.addr);
485
486 /* prepare collected frames for transmission */
487 skb_queue_walk(&frames, skb) {
488 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
489 struct ieee80211_hdr *hdr = (void *) skb->data;
490
491 /*
492 * Tell TX path to send this frame even though the
493 * STA may still remain is PS mode after this frame
494 * exchange.
495 */
496 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
497
498 if (more_data || !skb_queue_is_last(&frames, skb))
499 hdr->frame_control |=
500 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
501 else
502 hdr->frame_control &=
503 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
504
505 if (skb_queue_is_last(&frames, skb) &&
506 ieee80211_is_data_qos(hdr->frame_control)) {
507 u8 *qoshdr = ieee80211_get_qos_ctl(hdr);
508
509 /* MPSP trigger frame ends service period */
510 *qoshdr |= IEEE80211_QOS_CTL_EOSP;
511 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
512 }
513 }
514
515 ieee80211_add_pending_skbs(local, &frames);
516 sta_info_recalc_tim(sta);
517}
518
519/**
520 * ieee80211_mpsp_trigger_process - track status of mesh Peer Service Periods
521 *
522 * @qc: QoS Control field
523 * @sta: peer to start a MPSP with
524 * @tx: frame was transmitted by the local STA
525 * @acked: frame has been transmitted successfully
526 *
527 * NOTE: active mode STA may only serve as MPSP owner
528 */
529void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
530 bool tx, bool acked)
531{
532 u8 rspi = qc[1] & (IEEE80211_QOS_CTL_RSPI >> 8);
533 u8 eosp = qc[0] & IEEE80211_QOS_CTL_EOSP;
534
535 if (tx) {
536 if (rspi && acked)
537 set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
538
539 if (eosp)
540 clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
541 else if (acked &&
542 test_sta_flag(sta, WLAN_STA_PS_STA) &&
543 !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
544 mps_frame_deliver(sta, -1);
545 } else {
546 if (eosp)
547 clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
548 else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
549 set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
550
551 if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
552 mps_frame_deliver(sta, -1);
553 }
554}
555
556/**
557 * ieee80211_mps_frame_release - release buffered frames in response to beacon
558 *
559 * @sta: mesh STA
560 * @elems: beacon IEs
561 *
562 * For peers if we have individually-addressed frames buffered or the peer
563 * indicates buffered frames, send a corresponding MPSP trigger frame. Since
564 * we do not evaluate the awake window duration, QoS Nulls are used as MPSP
565 * trigger frames. If the neighbour STA is not a peer, only send single frames.
566 */
567void ieee80211_mps_frame_release(struct sta_info *sta,
568 struct ieee802_11_elems *elems)
569{
570 int ac, buffer_local = 0;
571 bool has_buffered = false;
572
573 /* TIM map only for LLID <= IEEE80211_MAX_AID */
574 if (sta->plink_state == NL80211_PLINK_ESTAB)
575 has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
576 le16_to_cpu(sta->llid) % IEEE80211_MAX_AID);
577
578 if (has_buffered)
579 mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
580 sta->sta.addr);
581
582 /* only transmit to PS STA with announced, non-zero awake window */
583 if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
584 (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
585 return;
586
587 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
588 buffer_local += skb_queue_len(&sta->ps_tx_buf[ac]) +
589 skb_queue_len(&sta->tx_filtered[ac]);
590
591 if (!has_buffered && !buffer_local)
592 return;
593
594 if (sta->plink_state == NL80211_PLINK_ESTAB)
595 mpsp_trigger_send(sta, has_buffered, !buffer_local);
596 else
597 mps_frame_deliver(sta, 1);
598}
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index aa8d1e437385..05a256b38e24 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -43,7 +43,7 @@ struct sync_method {
43static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) 43static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
44{ 44{
45 return (ie->mesh_config->meshconf_cap & 45 return (ie->mesh_config->meshconf_cap &
46 IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; 46 IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
47} 47}
48 48
49void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) 49void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
@@ -112,7 +112,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
112 112
113 if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { 113 if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
114 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); 114 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
115 msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr); 115 msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
116 sta->sta.addr);
116 goto no_sync; 117 goto no_sync;
117 } 118 }
118 119
@@ -129,18 +130,15 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
129 sta->t_offset = t_t - t_r; 130 sta->t_offset = t_t - t_r;
130 131
131 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 132 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
132 s64 t_clockdrift = sta->t_offset_setpoint 133 s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset;
133 - sta->t_offset;
134 msync_dbg(sdata, 134 msync_dbg(sdata,
135 "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n", 135 "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
136 sta->sta.addr, 136 sta->sta.addr, (long long) sta->t_offset,
137 (long long) sta->t_offset, 137 (long long) sta->t_offset_setpoint,
138 (long long)
139 sta->t_offset_setpoint,
140 (long long) t_clockdrift); 138 (long long) t_clockdrift);
141 139
142 if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || 140 if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
143 t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) { 141 t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
144 msync_dbg(sdata, 142 msync_dbg(sdata,
145 "STA %pM : t_clockdrift=%lld too large, setpoint reset\n", 143 "STA %pM : t_clockdrift=%lld too large, setpoint reset\n",
146 sta->sta.addr, 144 sta->sta.addr,
@@ -149,15 +147,10 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
149 goto no_sync; 147 goto no_sync;
150 } 148 }
151 149
152 rcu_read_unlock();
153
154 spin_lock_bh(&ifmsh->sync_offset_lock); 150 spin_lock_bh(&ifmsh->sync_offset_lock);
155 if (t_clockdrift > 151 if (t_clockdrift > ifmsh->sync_offset_clockdrift_max)
156 ifmsh->sync_offset_clockdrift_max) 152 ifmsh->sync_offset_clockdrift_max = t_clockdrift;
157 ifmsh->sync_offset_clockdrift_max
158 = t_clockdrift;
159 spin_unlock_bh(&ifmsh->sync_offset_lock); 153 spin_unlock_bh(&ifmsh->sync_offset_lock);
160
161 } else { 154 } else {
162 sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN; 155 sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
163 set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); 156 set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
@@ -165,9 +158,7 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
165 "STA %pM : offset was invalid, sta->t_offset=%lld\n", 158 "STA %pM : offset was invalid, sta->t_offset=%lld\n",
166 sta->sta.addr, 159 sta->sta.addr,
167 (long long) sta->t_offset); 160 (long long) sta->t_offset);
168 rcu_read_unlock();
169 } 161 }
170 return;
171 162
172no_sync: 163no_sync:
173 rcu_read_unlock(); 164 rcu_read_unlock();
@@ -177,14 +168,12 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
177{ 168{
178 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 169 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
179 170
180 WARN_ON(ifmsh->mesh_sp_id 171 WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
181 != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
182 BUG_ON(!rcu_read_lock_held()); 172 BUG_ON(!rcu_read_lock_held());
183 173
184 spin_lock_bh(&ifmsh->sync_offset_lock); 174 spin_lock_bh(&ifmsh->sync_offset_lock);
185 175
186 if (ifmsh->sync_offset_clockdrift_max > 176 if (ifmsh->sync_offset_clockdrift_max > TOFFSET_MINIMUM_ADJUSTMENT) {
187 TOFFSET_MINIMUM_ADJUSTMENT) {
188 /* Since ajusting the tsf here would 177 /* Since ajusting the tsf here would
189 * require a possibly blocking call 178 * require a possibly blocking call
190 * to the driver tsf setter, we punt 179 * to the driver tsf setter, we punt
@@ -193,8 +182,7 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
193 msync_dbg(sdata, 182 msync_dbg(sdata,
194 "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n", 183 "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
195 ifmsh->sync_offset_clockdrift_max); 184 ifmsh->sync_offset_clockdrift_max);
196 set_bit(MESH_WORK_DRIFT_ADJUST, 185 set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
197 &ifmsh->wrkq_flags);
198 186
199 ifmsh->adjusting_tbtt = true; 187 ifmsh->adjusting_tbtt = true;
200 } else { 188 } else {
@@ -220,14 +208,11 @@ static const struct sync_method sync_methods[] = {
220 208
221const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) 209const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
222{ 210{
223 const struct ieee80211_mesh_sync_ops *ops = NULL; 211 int i;
224 u8 i;
225 212
226 for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { 213 for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) {
227 if (sync_methods[i].method == method) { 214 if (sync_methods[i].method == method)
228 ops = &sync_methods[i].ops; 215 return &sync_methods[i].ops;
229 break;
230 }
231 } 216 }
232 return ops; 217 return NULL;
233} 218}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7753a9ca98a6..9f6464f3e05f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -30,11 +30,13 @@
30#include "rate.h" 30#include "rate.h"
31#include "led.h" 31#include "led.h"
32 32
33#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 33#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
34#define IEEE80211_AUTH_MAX_TRIES 3 34#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
35#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) 35#define IEEE80211_AUTH_MAX_TRIES 3
36#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 36#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
37#define IEEE80211_ASSOC_MAX_TRIES 3 37#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
38#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
39#define IEEE80211_ASSOC_MAX_TRIES 3
38 40
39static int max_nullfunc_tries = 2; 41static int max_nullfunc_tries = 2;
40module_param(max_nullfunc_tries, int, 0644); 42module_param(max_nullfunc_tries, int, 0644);
@@ -112,6 +114,9 @@ enum rx_mgmt_action {
112 114
113 /* caller must call cfg80211_send_assoc_timeout() */ 115 /* caller must call cfg80211_send_assoc_timeout() */
114 RX_MGMT_CFG80211_ASSOC_TIMEOUT, 116 RX_MGMT_CFG80211_ASSOC_TIMEOUT,
117
118 /* used when a processed beacon causes a deauth */
119 RX_MGMT_CFG80211_TX_DEAUTH,
115}; 120};
116 121
117/* utils */ 122/* utils */
@@ -172,79 +177,331 @@ static int ecw2cw(int ecw)
172 return (1 << ecw) - 1; 177 return (1 << ecw) - 1;
173} 178}
174 179
175static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, 180static u32 chandef_downgrade(struct cfg80211_chan_def *c)
176 struct ieee80211_ht_operation *ht_oper, 181{
177 const u8 *bssid, bool reconfig) 182 u32 ret;
183 int tmp;
184
185 switch (c->width) {
186 case NL80211_CHAN_WIDTH_20:
187 c->width = NL80211_CHAN_WIDTH_20_NOHT;
188 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
189 break;
190 case NL80211_CHAN_WIDTH_40:
191 c->width = NL80211_CHAN_WIDTH_20;
192 c->center_freq1 = c->chan->center_freq;
193 ret = IEEE80211_STA_DISABLE_40MHZ |
194 IEEE80211_STA_DISABLE_VHT;
195 break;
196 case NL80211_CHAN_WIDTH_80:
197 tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
198 /* n_P40 */
199 tmp /= 2;
200 /* freq_P40 */
201 c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
202 c->width = NL80211_CHAN_WIDTH_40;
203 ret = IEEE80211_STA_DISABLE_VHT;
204 break;
205 case NL80211_CHAN_WIDTH_80P80:
206 c->center_freq2 = 0;
207 c->width = NL80211_CHAN_WIDTH_80;
208 ret = IEEE80211_STA_DISABLE_80P80MHZ |
209 IEEE80211_STA_DISABLE_160MHZ;
210 break;
211 case NL80211_CHAN_WIDTH_160:
212 /* n_P20 */
213 tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
214 /* n_P80 */
215 tmp /= 4;
216 c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
217 c->width = NL80211_CHAN_WIDTH_80;
218 ret = IEEE80211_STA_DISABLE_80P80MHZ |
219 IEEE80211_STA_DISABLE_160MHZ;
220 break;
221 default:
222 case NL80211_CHAN_WIDTH_20_NOHT:
223 WARN_ON_ONCE(1);
224 c->width = NL80211_CHAN_WIDTH_20_NOHT;
225 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
226 break;
227 }
228
229 WARN_ON_ONCE(!cfg80211_chandef_valid(c));
230
231 return ret;
232}
233
234static u32
235ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
236 struct ieee80211_supported_band *sband,
237 struct ieee80211_channel *channel,
238 const struct ieee80211_ht_operation *ht_oper,
239 const struct ieee80211_vht_operation *vht_oper,
240 struct cfg80211_chan_def *chandef, bool verbose)
241{
242 struct cfg80211_chan_def vht_chandef;
243 u32 ht_cfreq, ret;
244
245 chandef->chan = channel;
246 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
247 chandef->center_freq1 = channel->center_freq;
248 chandef->center_freq2 = 0;
249
250 if (!ht_oper || !sband->ht_cap.ht_supported) {
251 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
252 goto out;
253 }
254
255 chandef->width = NL80211_CHAN_WIDTH_20;
256
257 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
258 channel->band);
259 /* check that channel matches the right operating channel */
260 if (channel->center_freq != ht_cfreq) {
261 /*
262 * It's possible that some APs are confused here;
263 * Netgear WNDR3700 sometimes reports 4 higher than
264 * the actual channel in association responses, but
265 * since we look at probe response/beacon data here
266 * it should be OK.
267 */
268 if (verbose)
269 sdata_info(sdata,
270 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
271 channel->center_freq, ht_cfreq,
272 ht_oper->primary_chan, channel->band);
273 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
274 goto out;
275 }
276
277 /* check 40 MHz support, if we have it */
278 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
279 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
280 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
281 chandef->width = NL80211_CHAN_WIDTH_40;
282 chandef->center_freq1 += 10;
283 break;
284 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
285 chandef->width = NL80211_CHAN_WIDTH_40;
286 chandef->center_freq1 -= 10;
287 break;
288 }
289 } else {
290 /* 40 MHz (and 80 MHz) must be supported for VHT */
291 ret = IEEE80211_STA_DISABLE_VHT;
292 goto out;
293 }
294
295 if (!vht_oper || !sband->vht_cap.vht_supported) {
296 ret = IEEE80211_STA_DISABLE_VHT;
297 goto out;
298 }
299
300 vht_chandef.chan = channel;
301 vht_chandef.center_freq1 =
302 ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
303 channel->band);
304 vht_chandef.center_freq2 = 0;
305
306 if (vht_oper->center_freq_seg2_idx)
307 vht_chandef.center_freq2 =
308 ieee80211_channel_to_frequency(
309 vht_oper->center_freq_seg2_idx,
310 channel->band);
311
312 switch (vht_oper->chan_width) {
313 case IEEE80211_VHT_CHANWIDTH_USE_HT:
314 vht_chandef.width = chandef->width;
315 break;
316 case IEEE80211_VHT_CHANWIDTH_80MHZ:
317 vht_chandef.width = NL80211_CHAN_WIDTH_80;
318 break;
319 case IEEE80211_VHT_CHANWIDTH_160MHZ:
320 vht_chandef.width = NL80211_CHAN_WIDTH_160;
321 break;
322 case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
323 vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
324 break;
325 default:
326 if (verbose)
327 sdata_info(sdata,
328 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
329 vht_oper->chan_width);
330 ret = IEEE80211_STA_DISABLE_VHT;
331 goto out;
332 }
333
334 if (!cfg80211_chandef_valid(&vht_chandef)) {
335 if (verbose)
336 sdata_info(sdata,
337 "AP VHT information is invalid, disable VHT\n");
338 ret = IEEE80211_STA_DISABLE_VHT;
339 goto out;
340 }
341
342 if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
343 ret = 0;
344 goto out;
345 }
346
347 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
348 if (verbose)
349 sdata_info(sdata,
350 "AP VHT information doesn't match HT, disable VHT\n");
351 ret = IEEE80211_STA_DISABLE_VHT;
352 goto out;
353 }
354
355 *chandef = vht_chandef;
356
357 ret = 0;
358
359out:
360 /* don't print the message below for VHT mismatch if VHT is disabled */
361 if (ret & IEEE80211_STA_DISABLE_VHT)
362 vht_chandef = *chandef;
363
364 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
365 IEEE80211_CHAN_DISABLED)) {
366 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
367 ret = IEEE80211_STA_DISABLE_HT |
368 IEEE80211_STA_DISABLE_VHT;
369 goto out;
370 }
371
372 ret |= chandef_downgrade(chandef);
373 }
374
375 if (chandef->width != vht_chandef.width && verbose)
376 sdata_info(sdata,
377 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
378
379 WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
380 return ret;
381}
382
383static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
384 struct sta_info *sta,
385 const struct ieee80211_ht_operation *ht_oper,
386 const struct ieee80211_vht_operation *vht_oper,
387 const u8 *bssid, u32 *changed)
178{ 388{
179 struct ieee80211_local *local = sdata->local; 389 struct ieee80211_local *local = sdata->local;
390 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
180 struct ieee80211_supported_band *sband; 391 struct ieee80211_supported_band *sband;
181 struct ieee80211_chanctx_conf *chanctx_conf;
182 struct ieee80211_channel *chan; 392 struct ieee80211_channel *chan;
183 struct sta_info *sta; 393 struct cfg80211_chan_def chandef;
184 u32 changed = 0;
185 u16 ht_opmode; 394 u16 ht_opmode;
186 bool disable_40 = false; 395 u32 flags;
396 enum ieee80211_sta_rx_bandwidth new_sta_bw;
397 int ret;
187 398
188 rcu_read_lock(); 399 /* if HT was/is disabled, don't track any bandwidth changes */
189 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 400 if (ifmgd->flags & IEEE80211_STA_DISABLE_HT || !ht_oper)
190 if (WARN_ON(!chanctx_conf)) {
191 rcu_read_unlock();
192 return 0; 401 return 0;
193 } 402
194 chan = chanctx_conf->def.chan; 403 /* don't check VHT if we associated as non-VHT station */
195 rcu_read_unlock(); 404 if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
405 vht_oper = NULL;
406
407 if (WARN_ON_ONCE(!sta))
408 return -EINVAL;
409
410 chan = sdata->vif.bss_conf.chandef.chan;
196 sband = local->hw.wiphy->bands[chan->band]; 411 sband = local->hw.wiphy->bands[chan->band];
197 412
198 switch (sdata->vif.bss_conf.chandef.width) { 413 /* calculate new channel (type) based on HT/VHT operation IEs */
414 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
415 vht_oper, &chandef, false);
416
417 /*
418 * Downgrade the new channel if we associated with restricted
419 * capabilities. For example, if we associated as a 20 MHz STA
420 * to a 40 MHz AP (due to regulatory, capabilities or config
421 * reasons) then switching to a 40 MHz channel now won't do us
422 * any good -- we couldn't use it with the AP.
423 */
424 if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
425 chandef.width == NL80211_CHAN_WIDTH_80P80)
426 flags |= chandef_downgrade(&chandef);
427 if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
428 chandef.width == NL80211_CHAN_WIDTH_160)
429 flags |= chandef_downgrade(&chandef);
430 if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
431 chandef.width > NL80211_CHAN_WIDTH_20)
432 flags |= chandef_downgrade(&chandef);
433
434 if (cfg80211_chandef_identical(&chandef, &sdata->vif.bss_conf.chandef))
435 return 0;
436
437 sdata_info(sdata,
438 "AP %pM changed bandwidth, new config is %d MHz, width %d (%d/%d MHz)\n",
439 ifmgd->bssid, chandef.chan->center_freq, chandef.width,
440 chandef.center_freq1, chandef.center_freq2);
441
442 if (flags != (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
443 IEEE80211_STA_DISABLE_VHT |
444 IEEE80211_STA_DISABLE_40MHZ |
445 IEEE80211_STA_DISABLE_80P80MHZ |
446 IEEE80211_STA_DISABLE_160MHZ)) ||
447 !cfg80211_chandef_valid(&chandef)) {
448 sdata_info(sdata,
449 "AP %pM changed bandwidth in a way we can't support - disconnect\n",
450 ifmgd->bssid);
451 return -EINVAL;
452 }
453
454 switch (chandef.width) {
455 case NL80211_CHAN_WIDTH_20_NOHT:
456 case NL80211_CHAN_WIDTH_20:
457 new_sta_bw = IEEE80211_STA_RX_BW_20;
458 break;
199 case NL80211_CHAN_WIDTH_40: 459 case NL80211_CHAN_WIDTH_40:
200 if (sdata->vif.bss_conf.chandef.chan->center_freq > 460 new_sta_bw = IEEE80211_STA_RX_BW_40;
201 sdata->vif.bss_conf.chandef.center_freq1 &&
202 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
203 disable_40 = true;
204 if (sdata->vif.bss_conf.chandef.chan->center_freq <
205 sdata->vif.bss_conf.chandef.center_freq1 &&
206 chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
207 disable_40 = true;
208 break; 461 break;
209 default: 462 case NL80211_CHAN_WIDTH_80:
463 new_sta_bw = IEEE80211_STA_RX_BW_80;
210 break; 464 break;
465 case NL80211_CHAN_WIDTH_80P80:
466 case NL80211_CHAN_WIDTH_160:
467 new_sta_bw = IEEE80211_STA_RX_BW_160;
468 break;
469 default:
470 return -EINVAL;
211 } 471 }
212 472
213 /* This can change during the lifetime of the BSS */ 473 if (new_sta_bw > sta->cur_max_bandwidth)
214 if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) 474 new_sta_bw = sta->cur_max_bandwidth;
215 disable_40 = true;
216 475
217 mutex_lock(&local->sta_mtx); 476 if (new_sta_bw < sta->sta.bandwidth) {
218 sta = sta_info_get(sdata, bssid); 477 sta->sta.bandwidth = new_sta_bw;
219 478 rate_control_rate_update(local, sband, sta,
220 WARN_ON_ONCE(!sta); 479 IEEE80211_RC_BW_CHANGED);
221 480 }
222 if (sta && !sta->supports_40mhz)
223 disable_40 = true;
224
225 if (sta && (!reconfig ||
226 (disable_40 != !(sta->sta.ht_cap.cap &
227 IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) {
228 481
229 if (disable_40) 482 ret = ieee80211_vif_change_bandwidth(sdata, &chandef, changed);
230 sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 483 if (ret) {
231 else 484 sdata_info(sdata,
232 sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 485 "AP %pM changed bandwidth to incompatible one - disconnect\n",
486 ifmgd->bssid);
487 return ret;
488 }
233 489
490 if (new_sta_bw > sta->sta.bandwidth) {
491 sta->sta.bandwidth = new_sta_bw;
234 rate_control_rate_update(local, sband, sta, 492 rate_control_rate_update(local, sband, sta,
235 IEEE80211_RC_BW_CHANGED); 493 IEEE80211_RC_BW_CHANGED);
236 } 494 }
237 mutex_unlock(&local->sta_mtx);
238 495
239 ht_opmode = le16_to_cpu(ht_oper->operation_mode); 496 ht_opmode = le16_to_cpu(ht_oper->operation_mode);
240 497
241 /* if bss configuration changed store the new one */ 498 /* if bss configuration changed store the new one */
242 if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) { 499 if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
243 changed |= BSS_CHANGED_HT; 500 *changed |= BSS_CHANGED_HT;
244 sdata->vif.bss_conf.ht_operation_mode = ht_opmode; 501 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
245 } 502 }
246 503
247 return changed; 504 return 0;
248} 505}
249 506
250/* frame sending functions */ 507/* frame sending functions */
@@ -341,11 +598,13 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
341 598
342static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, 599static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
343 struct sk_buff *skb, 600 struct sk_buff *skb,
344 struct ieee80211_supported_band *sband) 601 struct ieee80211_supported_band *sband,
602 struct ieee80211_vht_cap *ap_vht_cap)
345{ 603{
346 u8 *pos; 604 u8 *pos;
347 u32 cap; 605 u32 cap;
348 struct ieee80211_sta_vht_cap vht_cap; 606 struct ieee80211_sta_vht_cap vht_cap;
607 int i;
349 608
350 BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap)); 609 BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
351 610
@@ -364,6 +623,42 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
364 cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 623 cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
365 } 624 }
366 625
626 /*
627 * Some APs apparently get confused if our capabilities are better
628 * than theirs, so restrict what we advertise in the assoc request.
629 */
630 if (!(ap_vht_cap->vht_cap_info &
631 cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
632 cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
633
634 if (!(ap_vht_cap->vht_cap_info &
635 cpu_to_le32(IEEE80211_VHT_CAP_TXSTBC)))
636 cap &= ~(IEEE80211_VHT_CAP_RXSTBC_1 |
637 IEEE80211_VHT_CAP_RXSTBC_3 |
638 IEEE80211_VHT_CAP_RXSTBC_4);
639
640 for (i = 0; i < 8; i++) {
641 int shift = i * 2;
642 u16 mask = IEEE80211_VHT_MCS_NOT_SUPPORTED << shift;
643 u16 ap_mcs, our_mcs;
644
645 ap_mcs = (le16_to_cpu(ap_vht_cap->supp_mcs.tx_mcs_map) &
646 mask) >> shift;
647 our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) &
648 mask) >> shift;
649
650 switch (ap_mcs) {
651 default:
652 if (our_mcs <= ap_mcs)
653 break;
654 /* fall through */
655 case IEEE80211_VHT_MCS_NOT_SUPPORTED:
656 vht_cap.vht_mcs.rx_mcs_map &= cpu_to_le16(~mask);
657 vht_cap.vht_mcs.rx_mcs_map |=
658 cpu_to_le16(ap_mcs << shift);
659 }
660 }
661
367 /* reserve and fill IE */ 662 /* reserve and fill IE */
368 pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); 663 pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
369 ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); 664 ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
@@ -562,7 +857,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
562 sband, chan, sdata->smps_mode); 857 sband, chan, sdata->smps_mode);
563 858
564 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) 859 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
565 ieee80211_add_vht_ie(sdata, skb, sband); 860 ieee80211_add_vht_ie(sdata, skb, sband,
861 &assoc_data->ap_vht_cap);
566 862
567 /* if present, add any custom non-vendor IEs that go after HT */ 863 /* if present, add any custom non-vendor IEs that go after HT */
568 if (assoc_data->ie_len && assoc_data->ie) { 864 if (assoc_data->ie_len && assoc_data->ie) {
@@ -605,6 +901,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
605 drv_mgd_prepare_tx(local, sdata); 901 drv_mgd_prepare_tx(local, sdata);
606 902
607 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 903 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
904 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
905 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
906 IEEE80211_TX_INTFL_MLME_CONN_TX;
608 ieee80211_tx_skb(sdata, skb); 907 ieee80211_tx_skb(sdata, skb);
609} 908}
610 909
@@ -641,7 +940,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
641 if (powersave) 940 if (powersave)
642 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 941 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
643 942
644 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 943 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
944 IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
645 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 945 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
646 IEEE80211_STA_CONNECTION_POLL)) 946 IEEE80211_STA_CONNECTION_POLL))
647 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; 947 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
@@ -745,10 +1045,10 @@ static void ieee80211_chswitch_timer(unsigned long data)
745 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); 1045 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
746} 1046}
747 1047
748void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1048void
749 struct ieee80211_channel_sw_ie *sw_elem, 1049ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
750 struct ieee80211_bss *bss, 1050 const struct ieee80211_channel_sw_ie *sw_elem,
751 u64 timestamp) 1051 struct ieee80211_bss *bss, u64 timestamp)
752{ 1052{
753 struct cfg80211_bss *cbss = 1053 struct cfg80211_bss *cbss =
754 container_of((void *)bss, struct cfg80211_bss, priv); 1054 container_of((void *)bss, struct cfg80211_bss, priv);
@@ -907,39 +1207,6 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
907 return 0; 1207 return 0;
908} 1208}
909 1209
910void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
911{
912 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
913 struct ieee80211_local *local = sdata->local;
914 struct ieee80211_conf *conf = &local->hw.conf;
915
916 WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
917 !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
918 (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
919
920 local->disable_dynamic_ps = false;
921 conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout;
922}
923EXPORT_SYMBOL(ieee80211_enable_dyn_ps);
924
925void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif)
926{
927 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
928 struct ieee80211_local *local = sdata->local;
929 struct ieee80211_conf *conf = &local->hw.conf;
930
931 WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
932 !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
933 (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
934
935 local->disable_dynamic_ps = true;
936 conf->dynamic_ps_timeout = 0;
937 del_timer_sync(&local->dynamic_ps_timer);
938 ieee80211_queue_work(&local->hw,
939 &local->dynamic_ps_enable_work);
940}
941EXPORT_SYMBOL(ieee80211_disable_dyn_ps);
942
943/* powersave */ 1210/* powersave */
944static void ieee80211_enable_ps(struct ieee80211_local *local, 1211static void ieee80211_enable_ps(struct ieee80211_local *local,
945 struct ieee80211_sub_if_data *sdata) 1212 struct ieee80211_sub_if_data *sdata)
@@ -1042,7 +1309,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1042 } 1309 }
1043 1310
1044 if (count == 1 && ieee80211_powersave_allowed(found)) { 1311 if (count == 1 && ieee80211_powersave_allowed(found)) {
1045 struct ieee80211_conf *conf = &local->hw.conf;
1046 s32 beaconint_us; 1312 s32 beaconint_us;
1047 1313
1048 if (latency < 0) 1314 if (latency < 0)
@@ -1066,20 +1332,13 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1066 else 1332 else
1067 timeout = 100; 1333 timeout = 100;
1068 } 1334 }
1069 local->dynamic_ps_user_timeout = timeout; 1335 local->hw.conf.dynamic_ps_timeout = timeout;
1070 if (!local->disable_dynamic_ps)
1071 conf->dynamic_ps_timeout =
1072 local->dynamic_ps_user_timeout;
1073 1336
1074 if (beaconint_us > latency) { 1337 if (beaconint_us > latency) {
1075 local->ps_sdata = NULL; 1338 local->ps_sdata = NULL;
1076 } else { 1339 } else {
1077 struct ieee80211_bss *bss;
1078 int maxslp = 1; 1340 int maxslp = 1;
1079 u8 dtimper; 1341 u8 dtimper = found->u.mgd.dtim_period;
1080
1081 bss = (void *)found->u.mgd.associated->priv;
1082 dtimper = bss->dtim_period;
1083 1342
1084 /* If the TIM IE is invalid, pretend the value is 1 */ 1343 /* If the TIM IE is invalid, pretend the value is 1 */
1085 if (!dtimper) 1344 if (!dtimper)
@@ -1143,8 +1402,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
1143 if (local->hw.conf.flags & IEEE80211_CONF_PS) 1402 if (local->hw.conf.flags & IEEE80211_CONF_PS)
1144 return; 1403 return;
1145 1404
1146 if (!local->disable_dynamic_ps && 1405 if (local->hw.conf.dynamic_ps_timeout > 0) {
1147 local->hw.conf.dynamic_ps_timeout > 0) {
1148 /* don't enter PS if TX frames are pending */ 1406 /* don't enter PS if TX frames are pending */
1149 if (drv_tx_frames_pending(local)) { 1407 if (drv_tx_frames_pending(local)) {
1150 mod_timer(&local->dynamic_ps_timer, jiffies + 1408 mod_timer(&local->dynamic_ps_timer, jiffies +
@@ -1209,16 +1467,30 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
1209 ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work); 1467 ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
1210} 1468}
1211 1469
1470void ieee80211_dfs_cac_timer_work(struct work_struct *work)
1471{
1472 struct delayed_work *delayed_work =
1473 container_of(work, struct delayed_work, work);
1474 struct ieee80211_sub_if_data *sdata =
1475 container_of(delayed_work, struct ieee80211_sub_if_data,
1476 dfs_cac_timer_work);
1477
1478 ieee80211_vif_release_channel(sdata);
1479
1480 cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
1481}
1482
1212/* MLME */ 1483/* MLME */
1213static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, 1484static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
1214 struct ieee80211_sub_if_data *sdata, 1485 struct ieee80211_sub_if_data *sdata,
1215 u8 *wmm_param, size_t wmm_param_len) 1486 const u8 *wmm_param, size_t wmm_param_len)
1216{ 1487{
1217 struct ieee80211_tx_queue_params params; 1488 struct ieee80211_tx_queue_params params;
1218 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1489 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1219 size_t left; 1490 size_t left;
1220 int count; 1491 int count;
1221 u8 *pos, uapsd_queues = 0; 1492 const u8 *pos;
1493 u8 uapsd_queues = 0;
1222 1494
1223 if (!local->ops->conf_tx) 1495 if (!local->ops->conf_tx)
1224 return false; 1496 return false;
@@ -1410,10 +1682,18 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1410 1682
1411 ieee80211_led_assoc(local, 1); 1683 ieee80211_led_assoc(local, 1);
1412 1684
1413 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) 1685 if (sdata->u.mgd.assoc_data->have_beacon) {
1414 bss_conf->dtim_period = bss->dtim_period; 1686 /*
1415 else 1687 * If the AP is buggy we may get here with no DTIM period
1688 * known, so assume it's 1 which is the only safe assumption
1689 * in that case, although if the TIM IE is broken powersave
1690 * probably just won't work at all.
1691 */
1692 bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
1693 bss_info_changed |= BSS_CHANGED_DTIM_PERIOD;
1694 } else {
1416 bss_conf->dtim_period = 0; 1695 bss_conf->dtim_period = 0;
1696 }
1417 1697
1418 bss_conf->assoc = 1; 1698 bss_conf->assoc = 1;
1419 1699
@@ -1423,10 +1703,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1423 bss_info_changed |= BSS_CHANGED_CQM; 1703 bss_info_changed |= BSS_CHANGED_CQM;
1424 1704
1425 /* Enable ARP filtering */ 1705 /* Enable ARP filtering */
1426 if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) { 1706 if (bss_conf->arp_addr_cnt)
1427 bss_conf->arp_filter_enabled = sdata->arp_filter_state;
1428 bss_info_changed |= BSS_CHANGED_ARP_FILTER; 1707 bss_info_changed |= BSS_CHANGED_ARP_FILTER;
1429 }
1430 1708
1431 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 1709 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
1432 1710
@@ -1447,7 +1725,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1447{ 1725{
1448 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1726 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1449 struct ieee80211_local *local = sdata->local; 1727 struct ieee80211_local *local = sdata->local;
1450 struct sta_info *sta;
1451 u32 changed = 0; 1728 u32 changed = 0;
1452 1729
1453 ASSERT_MGD_MTX(ifmgd); 1730 ASSERT_MGD_MTX(ifmgd);
@@ -1479,14 +1756,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1479 netif_tx_stop_all_queues(sdata->dev); 1756 netif_tx_stop_all_queues(sdata->dev);
1480 netif_carrier_off(sdata->dev); 1757 netif_carrier_off(sdata->dev);
1481 1758
1482 mutex_lock(&local->sta_mtx);
1483 sta = sta_info_get(sdata, ifmgd->bssid);
1484 if (sta) {
1485 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
1486 ieee80211_sta_tear_down_BA_sessions(sta, false);
1487 }
1488 mutex_unlock(&local->sta_mtx);
1489
1490 /* 1759 /*
1491 * if we want to get out of ps before disassoc (why?) we have 1760 * if we want to get out of ps before disassoc (why?) we have
1492 * to do it before sending disassoc, as otherwise the null-packet 1761 * to do it before sending disassoc, as otherwise the null-packet
@@ -1518,7 +1787,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1518 memset(ifmgd->bssid, 0, ETH_ALEN); 1787 memset(ifmgd->bssid, 0, ETH_ALEN);
1519 1788
1520 /* remove AP and TDLS peers */ 1789 /* remove AP and TDLS peers */
1521 sta_info_flush(local, sdata); 1790 sta_info_flush_defer(sdata);
1522 1791
1523 /* finally reset all BSS / config parameters */ 1792 /* finally reset all BSS / config parameters */
1524 changed |= ieee80211_reset_erp_info(sdata); 1793 changed |= ieee80211_reset_erp_info(sdata);
@@ -1540,10 +1809,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1540 cancel_work_sync(&local->dynamic_ps_enable_work); 1809 cancel_work_sync(&local->dynamic_ps_enable_work);
1541 1810
1542 /* Disable ARP filtering */ 1811 /* Disable ARP filtering */
1543 if (sdata->vif.bss_conf.arp_filter_enabled) { 1812 if (sdata->vif.bss_conf.arp_addr_cnt)
1544 sdata->vif.bss_conf.arp_filter_enabled = false;
1545 changed |= BSS_CHANGED_ARP_FILTER; 1813 changed |= BSS_CHANGED_ARP_FILTER;
1546 }
1547 1814
1548 sdata->vif.bss_conf.qos = false; 1815 sdata->vif.bss_conf.qos = false;
1549 changed |= BSS_CHANGED_QOS; 1816 changed |= BSS_CHANGED_QOS;
@@ -1562,6 +1829,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1562 1829
1563 sdata->u.mgd.timers_running = 0; 1830 sdata->u.mgd.timers_running = 0;
1564 1831
1832 sdata->vif.bss_conf.dtim_period = 0;
1833
1565 ifmgd->flags = 0; 1834 ifmgd->flags = 0;
1566 ieee80211_vif_release_channel(sdata); 1835 ieee80211_vif_release_channel(sdata);
1567} 1836}
@@ -1624,17 +1893,18 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
1624 if (!ieee80211_is_data(hdr->frame_control)) 1893 if (!ieee80211_is_data(hdr->frame_control))
1625 return; 1894 return;
1626 1895
1627 if (ack)
1628 ieee80211_sta_reset_conn_monitor(sdata);
1629
1630 if (ieee80211_is_nullfunc(hdr->frame_control) && 1896 if (ieee80211_is_nullfunc(hdr->frame_control) &&
1631 sdata->u.mgd.probe_send_count > 0) { 1897 sdata->u.mgd.probe_send_count > 0) {
1632 if (ack) 1898 if (ack)
1633 sdata->u.mgd.probe_send_count = 0; 1899 ieee80211_sta_reset_conn_monitor(sdata);
1634 else 1900 else
1635 sdata->u.mgd.nullfunc_failed = true; 1901 sdata->u.mgd.nullfunc_failed = true;
1636 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 1902 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1903 return;
1637 } 1904 }
1905
1906 if (ack)
1907 ieee80211_sta_reset_conn_monitor(sdata);
1638} 1908}
1639 1909
1640static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) 1910static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
@@ -1675,7 +1945,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1675 ssid_len = ssid[1]; 1945 ssid_len = ssid[1];
1676 1946
1677 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, 1947 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
1678 0, (u32) -1, true, false, 1948 0, (u32) -1, true, 0,
1679 ifmgd->associated->channel, false); 1949 ifmgd->associated->channel, false);
1680 rcu_read_unlock(); 1950 rcu_read_unlock();
1681 } 1951 }
@@ -1709,7 +1979,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1709 1979
1710 if (beacon) 1980 if (beacon)
1711 mlme_dbg_ratelimited(sdata, 1981 mlme_dbg_ratelimited(sdata,
1712 "detected beacon loss from AP - sending probe request\n"); 1982 "detected beacon loss from AP - probing\n");
1713 1983
1714 ieee80211_cqm_rssi_notify(&sdata->vif, 1984 ieee80211_cqm_rssi_notify(&sdata->vif,
1715 NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL); 1985 NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL);
@@ -1790,11 +2060,9 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1790} 2060}
1791EXPORT_SYMBOL(ieee80211_ap_probereq_get); 2061EXPORT_SYMBOL(ieee80211_ap_probereq_get);
1792 2062
1793static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata, 2063static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
1794 bool transmit_frame)
1795{ 2064{
1796 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2065 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1797 struct ieee80211_local *local = sdata->local;
1798 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 2066 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
1799 2067
1800 mutex_lock(&ifmgd->mtx); 2068 mutex_lock(&ifmgd->mtx);
@@ -1805,8 +2073,10 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
1805 2073
1806 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 2074 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1807 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 2075 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1808 transmit_frame, frame_buf); 2076 true, frame_buf);
1809 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; 2077 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
2078 ieee80211_wake_queues_by_reason(&sdata->local->hw,
2079 IEEE80211_QUEUE_STOP_REASON_CSA);
1810 mutex_unlock(&ifmgd->mtx); 2080 mutex_unlock(&ifmgd->mtx);
1811 2081
1812 /* 2082 /*
@@ -1814,10 +2084,6 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
1814 * but that's not a problem. 2084 * but that's not a problem.
1815 */ 2085 */
1816 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); 2086 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
1817
1818 mutex_lock(&local->mtx);
1819 ieee80211_recalc_idle(local);
1820 mutex_unlock(&local->mtx);
1821} 2087}
1822 2088
1823static void ieee80211_beacon_connection_loss_work(struct work_struct *work) 2089static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@@ -1836,10 +2102,10 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
1836 rcu_read_unlock(); 2102 rcu_read_unlock();
1837 } 2103 }
1838 2104
1839 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) { 2105 if (ifmgd->connection_loss) {
1840 sdata_info(sdata, "Connection to AP %pM lost\n", 2106 sdata_info(sdata, "Connection to AP %pM lost\n",
1841 ifmgd->bssid); 2107 ifmgd->bssid);
1842 __ieee80211_disconnect(sdata, false); 2108 __ieee80211_disconnect(sdata);
1843 } else { 2109 } else {
1844 ieee80211_mgd_probe_ap(sdata, true); 2110 ieee80211_mgd_probe_ap(sdata, true);
1845 } 2111 }
@@ -1851,9 +2117,7 @@ static void ieee80211_csa_connection_drop_work(struct work_struct *work)
1851 container_of(work, struct ieee80211_sub_if_data, 2117 container_of(work, struct ieee80211_sub_if_data,
1852 u.mgd.csa_connection_drop_work); 2118 u.mgd.csa_connection_drop_work);
1853 2119
1854 ieee80211_wake_queues_by_reason(&sdata->local->hw, 2120 __ieee80211_disconnect(sdata);
1855 IEEE80211_QUEUE_STOP_REASON_CSA);
1856 __ieee80211_disconnect(sdata, true);
1857} 2121}
1858 2122
1859void ieee80211_beacon_loss(struct ieee80211_vif *vif) 2123void ieee80211_beacon_loss(struct ieee80211_vif *vif)
@@ -1864,6 +2128,7 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
1864 trace_api_beacon_loss(sdata); 2128 trace_api_beacon_loss(sdata);
1865 2129
1866 WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR); 2130 WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR);
2131 sdata->u.mgd.connection_loss = false;
1867 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); 2132 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
1868} 2133}
1869EXPORT_SYMBOL(ieee80211_beacon_loss); 2134EXPORT_SYMBOL(ieee80211_beacon_loss);
@@ -1875,7 +2140,7 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif)
1875 2140
1876 trace_api_connection_loss(sdata); 2141 trace_api_connection_loss(sdata);
1877 2142
1878 WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR)); 2143 sdata->u.mgd.connection_loss = true;
1879 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); 2144 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
1880} 2145}
1881EXPORT_SYMBOL(ieee80211_connection_loss); 2146EXPORT_SYMBOL(ieee80211_connection_loss);
@@ -1897,7 +2162,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
1897 ieee80211_vif_release_channel(sdata); 2162 ieee80211_vif_release_channel(sdata);
1898 } 2163 }
1899 2164
1900 cfg80211_put_bss(auth_data->bss); 2165 cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
1901 kfree(auth_data); 2166 kfree(auth_data);
1902 sdata->u.mgd.auth_data = NULL; 2167 sdata->u.mgd.auth_data = NULL;
1903} 2168}
@@ -1905,9 +2170,11 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
1905static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, 2170static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1906 struct ieee80211_mgmt *mgmt, size_t len) 2171 struct ieee80211_mgmt *mgmt, size_t len)
1907{ 2172{
2173 struct ieee80211_local *local = sdata->local;
1908 struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; 2174 struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
1909 u8 *pos; 2175 u8 *pos;
1910 struct ieee802_11_elems elems; 2176 struct ieee802_11_elems elems;
2177 u32 tx_flags = 0;
1911 2178
1912 pos = mgmt->u.auth.variable; 2179 pos = mgmt->u.auth.variable;
1913 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 2180 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@ -1915,11 +2182,14 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1915 return; 2182 return;
1916 auth_data->expected_transaction = 4; 2183 auth_data->expected_transaction = 4;
1917 drv_mgd_prepare_tx(sdata->local, sdata); 2184 drv_mgd_prepare_tx(sdata->local, sdata);
2185 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
2186 tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
2187 IEEE80211_TX_INTFL_MLME_CONN_TX;
1918 ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, 2188 ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
1919 elems.challenge - 2, elems.challenge_len + 2, 2189 elems.challenge - 2, elems.challenge_len + 2,
1920 auth_data->bss->bssid, auth_data->bss->bssid, 2190 auth_data->bss->bssid, auth_data->bss->bssid,
1921 auth_data->key, auth_data->key_len, 2191 auth_data->key, auth_data->key_len,
1922 auth_data->key_idx); 2192 auth_data->key_idx, tx_flags);
1923} 2193}
1924 2194
1925static enum rx_mgmt_action __must_check 2195static enum rx_mgmt_action __must_check
@@ -1986,6 +2256,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1986 sdata_info(sdata, "authenticated\n"); 2256 sdata_info(sdata, "authenticated\n");
1987 ifmgd->auth_data->done = true; 2257 ifmgd->auth_data->done = true;
1988 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; 2258 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
2259 ifmgd->auth_data->timeout_started = true;
1989 run_again(ifmgd, ifmgd->auth_data->timeout); 2260 run_again(ifmgd, ifmgd->auth_data->timeout);
1990 2261
1991 if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && 2262 if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
@@ -2044,10 +2315,6 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
2044 2315
2045 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 2316 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
2046 2317
2047 mutex_lock(&sdata->local->mtx);
2048 ieee80211_recalc_idle(sdata->local);
2049 mutex_unlock(&sdata->local->mtx);
2050
2051 return RX_MGMT_CFG80211_DEAUTH; 2318 return RX_MGMT_CFG80211_DEAUTH;
2052} 2319}
2053 2320
@@ -2075,10 +2342,6 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
2075 2342
2076 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 2343 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
2077 2344
2078 mutex_lock(&sdata->local->mtx);
2079 ieee80211_recalc_idle(sdata->local);
2080 mutex_unlock(&sdata->local->mtx);
2081
2082 return RX_MGMT_CFG80211_DISASSOC; 2345 return RX_MGMT_CFG80211_DISASSOC;
2083} 2346}
2084 2347
@@ -2188,6 +2451,24 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2188 2451
2189 ifmgd->aid = aid; 2452 ifmgd->aid = aid;
2190 2453
2454 /*
2455 * We previously checked these in the beacon/probe response, so
2456 * they should be present here. This is just a safety net.
2457 */
2458 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
2459 (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
2460 sdata_info(sdata,
2461 "HT AP is missing WMM params or HT capability/operation in AssocResp\n");
2462 return false;
2463 }
2464
2465 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
2466 (!elems.vht_cap_elem || !elems.vht_operation)) {
2467 sdata_info(sdata,
2468 "VHT AP is missing VHT capability/operation in AssocResp\n");
2469 return false;
2470 }
2471
2191 mutex_lock(&sdata->local->sta_mtx); 2472 mutex_lock(&sdata->local->sta_mtx);
2192 /* 2473 /*
2193 * station info was already allocated and inserted before 2474 * station info was already allocated and inserted before
@@ -2201,17 +2482,36 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2201 2482
2202 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; 2483 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
2203 2484
2485 /* Set up internal HT/VHT capabilities */
2204 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) 2486 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
2205 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2487 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
2206 elems.ht_cap_elem, &sta->sta.ht_cap); 2488 elems.ht_cap_elem, sta);
2207
2208 sta->supports_40mhz =
2209 sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
2210 2489
2211 if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) 2490 if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
2212 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, 2491 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
2213 elems.vht_cap_elem, 2492 elems.vht_cap_elem, sta);
2214 &sta->sta.vht_cap); 2493
2494 /*
2495 * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data
2496 * in their association response, so ignore that data for our own
2497 * configuration. If it changed since the last beacon, we'll get the
2498 * next beacon and update then.
2499 */
2500
2501 /*
2502 * If an operating mode notification IE is present, override the
2503 * NSS calculation (that would be done in rate_control_rate_init())
2504 * and use the # of streams from that element.
2505 */
2506 if (elems.opmode_notif &&
2507 !(*elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) {
2508 u8 nss;
2509
2510 nss = *elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
2511 nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
2512 nss += 1;
2513 sta->sta.rx_nss = nss;
2514 }
2215 2515
2216 rate_control_rate_init(sta); 2516 rate_control_rate_init(sta);
2217 2517
@@ -2221,9 +2521,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2221 if (elems.wmm_param) 2521 if (elems.wmm_param)
2222 set_sta_flag(sta, WLAN_STA_WME); 2522 set_sta_flag(sta, WLAN_STA_WME);
2223 2523
2224 err = sta_info_move_state(sta, IEEE80211_STA_AUTH); 2524 err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
2225 if (!err)
2226 err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
2227 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) 2525 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
2228 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 2526 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
2229 if (err) { 2527 if (err) {
@@ -2252,11 +2550,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2252 ieee80211_set_wmm_default(sdata, false); 2550 ieee80211_set_wmm_default(sdata, false);
2253 changed |= BSS_CHANGED_QOS; 2551 changed |= BSS_CHANGED_QOS;
2254 2552
2255 if (elems.ht_operation && elems.wmm_param &&
2256 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
2257 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
2258 cbss->bssid, false);
2259
2260 /* set AID and assoc capability, 2553 /* set AID and assoc capability,
2261 * ieee80211_set_associated() will tell the driver */ 2554 * ieee80211_set_associated() will tell the driver */
2262 bss_conf->aid = aid; 2555 bss_conf->aid = aid;
@@ -2330,6 +2623,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2330 "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n", 2623 "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
2331 mgmt->sa, tu, ms); 2624 mgmt->sa, tu, ms);
2332 assoc_data->timeout = jiffies + msecs_to_jiffies(ms); 2625 assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
2626 assoc_data->timeout_started = true;
2333 if (ms > IEEE80211_ASSOC_TIMEOUT) 2627 if (ms > IEEE80211_ASSOC_TIMEOUT)
2334 run_again(ifmgd, assoc_data->timeout); 2628 run_again(ifmgd, assoc_data->timeout);
2335 return RX_MGMT_NONE; 2629 return RX_MGMT_NONE;
@@ -2345,7 +2639,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2345 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { 2639 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
2346 /* oops -- internal error -- send timeout for now */ 2640 /* oops -- internal error -- send timeout for now */
2347 ieee80211_destroy_assoc_data(sdata, false); 2641 ieee80211_destroy_assoc_data(sdata, false);
2348 cfg80211_put_bss(*bss); 2642 cfg80211_put_bss(sdata->local->hw.wiphy, *bss);
2349 return RX_MGMT_CFG80211_ASSOC_TIMEOUT; 2643 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
2350 } 2644 }
2351 sdata_info(sdata, "associated\n"); 2645 sdata_info(sdata, "associated\n");
@@ -2364,8 +2658,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2364static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 2658static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2365 struct ieee80211_mgmt *mgmt, size_t len, 2659 struct ieee80211_mgmt *mgmt, size_t len,
2366 struct ieee80211_rx_status *rx_status, 2660 struct ieee80211_rx_status *rx_status,
2367 struct ieee802_11_elems *elems, 2661 struct ieee802_11_elems *elems)
2368 bool beacon)
2369{ 2662{
2370 struct ieee80211_local *local = sdata->local; 2663 struct ieee80211_local *local = sdata->local;
2371 int freq; 2664 int freq;
@@ -2373,11 +2666,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2373 struct ieee80211_channel *channel; 2666 struct ieee80211_channel *channel;
2374 bool need_ps = false; 2667 bool need_ps = false;
2375 2668
2376 if (sdata->u.mgd.associated && 2669 if ((sdata->u.mgd.associated &&
2377 ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) { 2670 ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) ||
2378 bss = (void *)sdata->u.mgd.associated->priv; 2671 (sdata->u.mgd.assoc_data &&
2672 ether_addr_equal(mgmt->bssid,
2673 sdata->u.mgd.assoc_data->bss->bssid))) {
2379 /* not previously set so we may need to recalc */ 2674 /* not previously set so we may need to recalc */
2380 need_ps = !bss->dtim_period; 2675 need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period;
2676
2677 if (elems->tim && !elems->parse_error) {
2678 const struct ieee80211_tim_ie *tim_ie = elems->tim;
2679 sdata->u.mgd.dtim_period = tim_ie->dtim_period;
2680 }
2381 } 2681 }
2382 2682
2383 if (elems->ds_params && elems->ds_params_len == 1) 2683 if (elems->ds_params && elems->ds_params_len == 1)
@@ -2392,7 +2692,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2392 return; 2692 return;
2393 2693
2394 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, 2694 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
2395 channel, beacon); 2695 channel);
2396 if (bss) 2696 if (bss)
2397 ieee80211_rx_bss_put(local, bss); 2697 ieee80211_rx_bss_put(local, bss);
2398 2698
@@ -2435,7 +2735,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
2435 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 2735 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
2436 &elems); 2736 &elems);
2437 2737
2438 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 2738 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
2439 2739
2440 if (ifmgd->associated && 2740 if (ifmgd->associated &&
2441 ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) 2741 ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
@@ -2447,6 +2747,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
2447 sdata_info(sdata, "direct probe responded\n"); 2747 sdata_info(sdata, "direct probe responded\n");
2448 ifmgd->auth_data->tries = 0; 2748 ifmgd->auth_data->tries = 0;
2449 ifmgd->auth_data->timeout = jiffies; 2749 ifmgd->auth_data->timeout = jiffies;
2750 ifmgd->auth_data->timeout_started = true;
2450 run_again(ifmgd, ifmgd->auth_data->timeout); 2751 run_again(ifmgd, ifmgd->auth_data->timeout);
2451 } 2752 }
2452} 2753}
@@ -2472,10 +2773,10 @@ static const u64 care_about_ies =
2472 (1ULL << WLAN_EID_HT_CAPABILITY) | 2773 (1ULL << WLAN_EID_HT_CAPABILITY) |
2473 (1ULL << WLAN_EID_HT_OPERATION); 2774 (1ULL << WLAN_EID_HT_OPERATION);
2474 2775
2475static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 2776static enum rx_mgmt_action
2476 struct ieee80211_mgmt *mgmt, 2777ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2477 size_t len, 2778 struct ieee80211_mgmt *mgmt, size_t len,
2478 struct ieee80211_rx_status *rx_status) 2779 u8 *deauth_buf, struct ieee80211_rx_status *rx_status)
2479{ 2780{
2480 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2781 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2481 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 2782 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
@@ -2484,6 +2785,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2484 struct ieee80211_local *local = sdata->local; 2785 struct ieee80211_local *local = sdata->local;
2485 struct ieee80211_chanctx_conf *chanctx_conf; 2786 struct ieee80211_chanctx_conf *chanctx_conf;
2486 struct ieee80211_channel *chan; 2787 struct ieee80211_channel *chan;
2788 struct sta_info *sta;
2487 u32 changed = 0; 2789 u32 changed = 0;
2488 bool erp_valid; 2790 bool erp_valid;
2489 u8 erp_value = 0; 2791 u8 erp_value = 0;
@@ -2495,40 +2797,51 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2495 /* Process beacon from the current BSS */ 2797 /* Process beacon from the current BSS */
2496 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; 2798 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
2497 if (baselen > len) 2799 if (baselen > len)
2498 return; 2800 return RX_MGMT_NONE;
2499 2801
2500 rcu_read_lock(); 2802 rcu_read_lock();
2501 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 2803 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2502 if (!chanctx_conf) { 2804 if (!chanctx_conf) {
2503 rcu_read_unlock(); 2805 rcu_read_unlock();
2504 return; 2806 return RX_MGMT_NONE;
2505 } 2807 }
2506 2808
2507 if (rx_status->freq != chanctx_conf->def.chan->center_freq) { 2809 if (rx_status->freq != chanctx_conf->def.chan->center_freq) {
2508 rcu_read_unlock(); 2810 rcu_read_unlock();
2509 return; 2811 return RX_MGMT_NONE;
2510 } 2812 }
2511 chan = chanctx_conf->def.chan; 2813 chan = chanctx_conf->def.chan;
2512 rcu_read_unlock(); 2814 rcu_read_unlock();
2513 2815
2514 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && 2816 if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
2515 ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { 2817 ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
2516 ieee802_11_parse_elems(mgmt->u.beacon.variable, 2818 ieee802_11_parse_elems(mgmt->u.beacon.variable,
2517 len - baselen, &elems); 2819 len - baselen, &elems);
2518 2820
2519 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, 2821 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
2520 false);
2521 ifmgd->assoc_data->have_beacon = true; 2822 ifmgd->assoc_data->have_beacon = true;
2522 ifmgd->assoc_data->sent_assoc = false; 2823 ifmgd->assoc_data->need_beacon = false;
2824 if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
2825 sdata->vif.bss_conf.sync_tsf =
2826 le64_to_cpu(mgmt->u.beacon.timestamp);
2827 sdata->vif.bss_conf.sync_device_ts =
2828 rx_status->device_timestamp;
2829 if (elems.tim)
2830 sdata->vif.bss_conf.sync_dtim_count =
2831 elems.tim->dtim_count;
2832 else
2833 sdata->vif.bss_conf.sync_dtim_count = 0;
2834 }
2523 /* continue assoc process */ 2835 /* continue assoc process */
2524 ifmgd->assoc_data->timeout = jiffies; 2836 ifmgd->assoc_data->timeout = jiffies;
2837 ifmgd->assoc_data->timeout_started = true;
2525 run_again(ifmgd, ifmgd->assoc_data->timeout); 2838 run_again(ifmgd, ifmgd->assoc_data->timeout);
2526 return; 2839 return RX_MGMT_NONE;
2527 } 2840 }
2528 2841
2529 if (!ifmgd->associated || 2842 if (!ifmgd->associated ||
2530 !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) 2843 !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
2531 return; 2844 return RX_MGMT_NONE;
2532 bssid = ifmgd->associated->bssid; 2845 bssid = ifmgd->associated->bssid;
2533 2846
2534 /* Track average RSSI from the Beacon frames of the current AP */ 2847 /* Track average RSSI from the Beacon frames of the current AP */
@@ -2559,12 +2872,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2559 if (sig > ifmgd->rssi_max_thold && 2872 if (sig > ifmgd->rssi_max_thold &&
2560 (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) { 2873 (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
2561 ifmgd->last_ave_beacon_signal = sig; 2874 ifmgd->last_ave_beacon_signal = sig;
2562 drv_rssi_callback(local, RSSI_EVENT_HIGH); 2875 drv_rssi_callback(local, sdata, RSSI_EVENT_HIGH);
2563 } else if (sig < ifmgd->rssi_min_thold && 2876 } else if (sig < ifmgd->rssi_min_thold &&
2564 (last_sig >= ifmgd->rssi_max_thold || 2877 (last_sig >= ifmgd->rssi_max_thold ||
2565 last_sig == 0)) { 2878 last_sig == 0)) {
2566 ifmgd->last_ave_beacon_signal = sig; 2879 ifmgd->last_ave_beacon_signal = sig;
2567 drv_rssi_callback(local, RSSI_EVENT_LOW); 2880 drv_rssi_callback(local, sdata, RSSI_EVENT_LOW);
2568 } 2881 }
2569 } 2882 }
2570 2883
@@ -2594,7 +2907,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2594 2907
2595 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { 2908 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
2596 mlme_dbg_ratelimited(sdata, 2909 mlme_dbg_ratelimited(sdata,
2597 "cancelling probereq poll due to a received beacon\n"); 2910 "cancelling AP probe due to a received beacon\n");
2598 mutex_lock(&local->mtx); 2911 mutex_lock(&local->mtx);
2599 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 2912 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
2600 ieee80211_run_deferred_scan(local); 2913 ieee80211_run_deferred_scan(local);
@@ -2666,17 +2979,42 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2666 } 2979 }
2667 2980
2668 if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) 2981 if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
2669 return; 2982 return RX_MGMT_NONE;
2670 ifmgd->beacon_crc = ncrc; 2983 ifmgd->beacon_crc = ncrc;
2671 ifmgd->beacon_crc_valid = true; 2984 ifmgd->beacon_crc_valid = true;
2672 2985
2673 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, 2986 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
2674 true);
2675 2987
2676 if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, 2988 if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
2677 elems.wmm_param_len)) 2989 elems.wmm_param_len))
2678 changed |= BSS_CHANGED_QOS; 2990 changed |= BSS_CHANGED_QOS;
2679 2991
2992 /*
2993 * If we haven't had a beacon before, tell the driver about the
2994 * DTIM period (and beacon timing if desired) now.
2995 */
2996 if (!bss_conf->dtim_period) {
2997 /* a few bogus AP send dtim_period = 0 or no TIM IE */
2998 if (elems.tim)
2999 bss_conf->dtim_period = elems.tim->dtim_period ?: 1;
3000 else
3001 bss_conf->dtim_period = 1;
3002
3003 if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
3004 sdata->vif.bss_conf.sync_tsf =
3005 le64_to_cpu(mgmt->u.beacon.timestamp);
3006 sdata->vif.bss_conf.sync_device_ts =
3007 rx_status->device_timestamp;
3008 if (elems.tim)
3009 sdata->vif.bss_conf.sync_dtim_count =
3010 elems.tim->dtim_count;
3011 else
3012 sdata->vif.bss_conf.sync_dtim_count = 0;
3013 }
3014
3015 changed |= BSS_CHANGED_DTIM_PERIOD;
3016 }
3017
2680 if (elems.erp_info && elems.erp_info_len >= 1) { 3018 if (elems.erp_info && elems.erp_info_len >= 1) {
2681 erp_valid = true; 3019 erp_valid = true;
2682 erp_value = elems.erp_info[0]; 3020 erp_value = elems.erp_info[0];
@@ -2687,11 +3025,22 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2687 le16_to_cpu(mgmt->u.beacon.capab_info), 3025 le16_to_cpu(mgmt->u.beacon.capab_info),
2688 erp_valid, erp_value); 3026 erp_valid, erp_value);
2689 3027
3028 mutex_lock(&local->sta_mtx);
3029 sta = sta_info_get(sdata, bssid);
3030
3031 if (ieee80211_config_bw(sdata, sta, elems.ht_operation,
3032 elems.vht_operation, bssid, &changed)) {
3033 mutex_unlock(&local->sta_mtx);
3034 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3035 WLAN_REASON_DEAUTH_LEAVING,
3036 true, deauth_buf);
3037 return RX_MGMT_CFG80211_TX_DEAUTH;
3038 }
2690 3039
2691 if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param && 3040 if (sta && elems.opmode_notif)
2692 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) 3041 ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif,
2693 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, 3042 rx_status->band, true);
2694 bssid, true); 3043 mutex_unlock(&local->sta_mtx);
2695 3044
2696 if (elems.country_elem && elems.pwr_constr_elem && 3045 if (elems.country_elem && elems.pwr_constr_elem &&
2697 mgmt->u.probe_resp.capab_info & 3046 mgmt->u.probe_resp.capab_info &
@@ -2702,6 +3051,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2702 elems.pwr_constr_elem); 3051 elems.pwr_constr_elem);
2703 3052
2704 ieee80211_bss_info_change_notify(sdata, changed); 3053 ieee80211_bss_info_change_notify(sdata, changed);
3054
3055 return RX_MGMT_NONE;
2705} 3056}
2706 3057
2707void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 3058void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -2712,6 +3063,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2712 struct ieee80211_mgmt *mgmt; 3063 struct ieee80211_mgmt *mgmt;
2713 struct cfg80211_bss *bss = NULL; 3064 struct cfg80211_bss *bss = NULL;
2714 enum rx_mgmt_action rma = RX_MGMT_NONE; 3065 enum rx_mgmt_action rma = RX_MGMT_NONE;
3066 u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
2715 u16 fc; 3067 u16 fc;
2716 3068
2717 rx_status = (struct ieee80211_rx_status *) skb->cb; 3069 rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -2722,7 +3074,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2722 3074
2723 switch (fc & IEEE80211_FCTL_STYPE) { 3075 switch (fc & IEEE80211_FCTL_STYPE) {
2724 case IEEE80211_STYPE_BEACON: 3076 case IEEE80211_STYPE_BEACON:
2725 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); 3077 rma = ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
3078 deauth_buf, rx_status);
2726 break; 3079 break;
2727 case IEEE80211_STYPE_PROBE_RESP: 3080 case IEEE80211_STYPE_PROBE_RESP:
2728 ieee80211_rx_mgmt_probe_resp(sdata, skb); 3081 ieee80211_rx_mgmt_probe_resp(sdata, skb);
@@ -2771,6 +3124,10 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2771 case RX_MGMT_CFG80211_ASSOC_TIMEOUT: 3124 case RX_MGMT_CFG80211_ASSOC_TIMEOUT:
2772 cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); 3125 cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid);
2773 break; 3126 break;
3127 case RX_MGMT_CFG80211_TX_DEAUTH:
3128 cfg80211_send_deauth(sdata->dev, deauth_buf,
3129 sizeof(deauth_buf));
3130 break;
2774 default: 3131 default:
2775 WARN(1, "unexpected: %d", rma); 3132 WARN(1, "unexpected: %d", rma);
2776 } 3133 }
@@ -2792,14 +3149,13 @@ static void ieee80211_sta_timer(unsigned long data)
2792} 3149}
2793 3150
2794static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, 3151static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2795 u8 *bssid, u8 reason) 3152 u8 *bssid, u8 reason, bool tx)
2796{ 3153{
2797 struct ieee80211_local *local = sdata->local;
2798 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3154 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2799 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 3155 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
2800 3156
2801 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, 3157 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2802 false, frame_buf); 3158 tx, frame_buf);
2803 mutex_unlock(&ifmgd->mtx); 3159 mutex_unlock(&ifmgd->mtx);
2804 3160
2805 /* 3161 /*
@@ -2808,10 +3164,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2808 */ 3164 */
2809 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); 3165 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
2810 3166
2811 mutex_lock(&local->mtx);
2812 ieee80211_recalc_idle(local);
2813 mutex_unlock(&local->mtx);
2814
2815 mutex_lock(&ifmgd->mtx); 3167 mutex_lock(&ifmgd->mtx);
2816} 3168}
2817 3169
@@ -2820,12 +3172,17 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2820 struct ieee80211_local *local = sdata->local; 3172 struct ieee80211_local *local = sdata->local;
2821 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3173 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2822 struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; 3174 struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
3175 u32 tx_flags = 0;
2823 3176
2824 lockdep_assert_held(&ifmgd->mtx); 3177 lockdep_assert_held(&ifmgd->mtx);
2825 3178
2826 if (WARN_ON_ONCE(!auth_data)) 3179 if (WARN_ON_ONCE(!auth_data))
2827 return -EINVAL; 3180 return -EINVAL;
2828 3181
3182 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
3183 tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
3184 IEEE80211_TX_INTFL_MLME_CONN_TX;
3185
2829 auth_data->tries++; 3186 auth_data->tries++;
2830 3187
2831 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { 3188 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -2862,7 +3219,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2862 ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, 3219 ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
2863 auth_data->data, auth_data->data_len, 3220 auth_data->data, auth_data->data_len,
2864 auth_data->bss->bssid, 3221 auth_data->bss->bssid,
2865 auth_data->bss->bssid, NULL, 0, 0); 3222 auth_data->bss->bssid, NULL, 0, 0,
3223 tx_flags);
2866 } else { 3224 } else {
2867 const u8 *ssidie; 3225 const u8 *ssidie;
2868 3226
@@ -2881,13 +3239,18 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2881 * will not answer to direct packet in unassociated state. 3239 * will not answer to direct packet in unassociated state.
2882 */ 3240 */
2883 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], 3241 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
2884 NULL, 0, (u32) -1, true, false, 3242 NULL, 0, (u32) -1, true, tx_flags,
2885 auth_data->bss->channel, false); 3243 auth_data->bss->channel, false);
2886 rcu_read_unlock(); 3244 rcu_read_unlock();
2887 } 3245 }
2888 3246
2889 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 3247 if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
2890 run_again(ifmgd, auth_data->timeout); 3248 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
3249 ifmgd->auth_data->timeout_started = true;
3250 run_again(ifmgd, auth_data->timeout);
3251 } else {
3252 auth_data->timeout_started = false;
3253 }
2891 3254
2892 return 0; 3255 return 0;
2893} 3256}
@@ -2918,12 +3281,29 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2918 IEEE80211_ASSOC_MAX_TRIES); 3281 IEEE80211_ASSOC_MAX_TRIES);
2919 ieee80211_send_assoc(sdata); 3282 ieee80211_send_assoc(sdata);
2920 3283
2921 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; 3284 if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
2922 run_again(&sdata->u.mgd, assoc_data->timeout); 3285 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
3286 assoc_data->timeout_started = true;
3287 run_again(&sdata->u.mgd, assoc_data->timeout);
3288 } else {
3289 assoc_data->timeout_started = false;
3290 }
2923 3291
2924 return 0; 3292 return 0;
2925} 3293}
2926 3294
3295void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
3296 __le16 fc, bool acked)
3297{
3298 struct ieee80211_local *local = sdata->local;
3299
3300 sdata->u.mgd.status_fc = fc;
3301 sdata->u.mgd.status_acked = acked;
3302 sdata->u.mgd.status_received = true;
3303
3304 ieee80211_queue_work(&local->hw, &sdata->work);
3305}
3306
2927void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) 3307void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2928{ 3308{
2929 struct ieee80211_local *local = sdata->local; 3309 struct ieee80211_local *local = sdata->local;
@@ -2931,7 +3311,36 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2931 3311
2932 mutex_lock(&ifmgd->mtx); 3312 mutex_lock(&ifmgd->mtx);
2933 3313
2934 if (ifmgd->auth_data && 3314 if (ifmgd->status_received) {
3315 __le16 fc = ifmgd->status_fc;
3316 bool status_acked = ifmgd->status_acked;
3317
3318 ifmgd->status_received = false;
3319 if (ifmgd->auth_data &&
3320 (ieee80211_is_probe_req(fc) || ieee80211_is_auth(fc))) {
3321 if (status_acked) {
3322 ifmgd->auth_data->timeout =
3323 jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
3324 run_again(ifmgd, ifmgd->auth_data->timeout);
3325 } else {
3326 ifmgd->auth_data->timeout = jiffies - 1;
3327 }
3328 ifmgd->auth_data->timeout_started = true;
3329 } else if (ifmgd->assoc_data &&
3330 (ieee80211_is_assoc_req(fc) ||
3331 ieee80211_is_reassoc_req(fc))) {
3332 if (status_acked) {
3333 ifmgd->assoc_data->timeout =
3334 jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT;
3335 run_again(ifmgd, ifmgd->assoc_data->timeout);
3336 } else {
3337 ifmgd->assoc_data->timeout = jiffies - 1;
3338 }
3339 ifmgd->assoc_data->timeout_started = true;
3340 }
3341 }
3342
3343 if (ifmgd->auth_data && ifmgd->auth_data->timeout_started &&
2935 time_after(jiffies, ifmgd->auth_data->timeout)) { 3344 time_after(jiffies, ifmgd->auth_data->timeout)) {
2936 if (ifmgd->auth_data->done) { 3345 if (ifmgd->auth_data->done) {
2937 /* 3346 /*
@@ -2950,12 +3359,13 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2950 cfg80211_send_auth_timeout(sdata->dev, bssid); 3359 cfg80211_send_auth_timeout(sdata->dev, bssid);
2951 mutex_lock(&ifmgd->mtx); 3360 mutex_lock(&ifmgd->mtx);
2952 } 3361 }
2953 } else if (ifmgd->auth_data) 3362 } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started)
2954 run_again(ifmgd, ifmgd->auth_data->timeout); 3363 run_again(ifmgd, ifmgd->auth_data->timeout);
2955 3364
2956 if (ifmgd->assoc_data && 3365 if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started &&
2957 time_after(jiffies, ifmgd->assoc_data->timeout)) { 3366 time_after(jiffies, ifmgd->assoc_data->timeout)) {
2958 if (!ifmgd->assoc_data->have_beacon || 3367 if ((ifmgd->assoc_data->need_beacon &&
3368 !ifmgd->assoc_data->have_beacon) ||
2959 ieee80211_do_assoc(sdata)) { 3369 ieee80211_do_assoc(sdata)) {
2960 u8 bssid[ETH_ALEN]; 3370 u8 bssid[ETH_ALEN];
2961 3371
@@ -2967,7 +3377,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2967 cfg80211_send_assoc_timeout(sdata->dev, bssid); 3377 cfg80211_send_assoc_timeout(sdata->dev, bssid);
2968 mutex_lock(&ifmgd->mtx); 3378 mutex_lock(&ifmgd->mtx);
2969 } 3379 }
2970 } else if (ifmgd->assoc_data) 3380 } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
2971 run_again(ifmgd, ifmgd->assoc_data->timeout); 3381 run_again(ifmgd, ifmgd->assoc_data->timeout);
2972 3382
2973 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 3383 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
@@ -2998,7 +3408,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2998 "No ack for nullfunc frame to AP %pM, disconnecting.\n", 3408 "No ack for nullfunc frame to AP %pM, disconnecting.\n",
2999 bssid); 3409 bssid);
3000 ieee80211_sta_connection_lost(sdata, bssid, 3410 ieee80211_sta_connection_lost(sdata, bssid,
3001 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 3411 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
3412 false);
3002 } 3413 }
3003 } else if (time_is_after_jiffies(ifmgd->probe_timeout)) 3414 } else if (time_is_after_jiffies(ifmgd->probe_timeout))
3004 run_again(ifmgd, ifmgd->probe_timeout); 3415 run_again(ifmgd, ifmgd->probe_timeout);
@@ -3007,7 +3418,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
3007 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", 3418 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
3008 bssid, probe_wait_ms); 3419 bssid, probe_wait_ms);
3009 ieee80211_sta_connection_lost(sdata, bssid, 3420 ieee80211_sta_connection_lost(sdata, bssid,
3010 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 3421 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
3011 } else if (ifmgd->probe_send_count < max_tries) { 3422 } else if (ifmgd->probe_send_count < max_tries) {
3012 mlme_dbg(sdata, 3423 mlme_dbg(sdata,
3013 "No probe response from AP %pM after %dms, try %d/%i\n", 3424 "No probe response from AP %pM after %dms, try %d/%i\n",
@@ -3026,15 +3437,11 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
3026 bssid, probe_wait_ms); 3437 bssid, probe_wait_ms);
3027 3438
3028 ieee80211_sta_connection_lost(sdata, bssid, 3439 ieee80211_sta_connection_lost(sdata, bssid,
3029 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 3440 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
3030 } 3441 }
3031 } 3442 }
3032 3443
3033 mutex_unlock(&ifmgd->mtx); 3444 mutex_unlock(&ifmgd->mtx);
3034
3035 mutex_lock(&local->mtx);
3036 ieee80211_recalc_idle(local);
3037 mutex_unlock(&local->mtx);
3038} 3445}
3039 3446
3040static void ieee80211_sta_bcn_mon_timer(unsigned long data) 3447static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -3046,6 +3453,7 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
3046 if (local->quiescing) 3453 if (local->quiescing)
3047 return; 3454 return;
3048 3455
3456 sdata->u.mgd.connection_loss = false;
3049 ieee80211_queue_work(&sdata->local->hw, 3457 ieee80211_queue_work(&sdata->local->hw,
3050 &sdata->u.mgd.beacon_connection_loss_work); 3458 &sdata->u.mgd.beacon_connection_loss_work);
3051} 3459}
@@ -3121,23 +3529,23 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
3121{ 3529{
3122 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3530 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3123 3531
3124 if (!ifmgd->associated) 3532 mutex_lock(&ifmgd->mtx);
3533 if (!ifmgd->associated) {
3534 mutex_unlock(&ifmgd->mtx);
3125 return; 3535 return;
3536 }
3126 3537
3127 if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) { 3538 if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
3128 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; 3539 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
3129 mutex_lock(&ifmgd->mtx); 3540 mlme_dbg(sdata, "driver requested disconnect after resume\n");
3130 if (ifmgd->associated) { 3541 ieee80211_sta_connection_lost(sdata,
3131 mlme_dbg(sdata, 3542 ifmgd->associated->bssid,
3132 "driver requested disconnect after resume\n"); 3543 WLAN_REASON_UNSPECIFIED,
3133 ieee80211_sta_connection_lost(sdata, 3544 true);
3134 ifmgd->associated->bssid,
3135 WLAN_REASON_UNSPECIFIED);
3136 mutex_unlock(&ifmgd->mtx);
3137 return;
3138 }
3139 mutex_unlock(&ifmgd->mtx); 3545 mutex_unlock(&ifmgd->mtx);
3546 return;
3140 } 3547 }
3548 mutex_unlock(&ifmgd->mtx);
3141 3549
3142 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running)) 3550 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
3143 add_timer(&ifmgd->timer); 3551 add_timer(&ifmgd->timer);
@@ -3213,201 +3621,6 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
3213 return 0; 3621 return 0;
3214} 3622}
3215 3623
3216static u32 chandef_downgrade(struct cfg80211_chan_def *c)
3217{
3218 u32 ret;
3219 int tmp;
3220
3221 switch (c->width) {
3222 case NL80211_CHAN_WIDTH_20:
3223 c->width = NL80211_CHAN_WIDTH_20_NOHT;
3224 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3225 break;
3226 case NL80211_CHAN_WIDTH_40:
3227 c->width = NL80211_CHAN_WIDTH_20;
3228 c->center_freq1 = c->chan->center_freq;
3229 ret = IEEE80211_STA_DISABLE_40MHZ |
3230 IEEE80211_STA_DISABLE_VHT;
3231 break;
3232 case NL80211_CHAN_WIDTH_80:
3233 tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
3234 /* n_P40 */
3235 tmp /= 2;
3236 /* freq_P40 */
3237 c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
3238 c->width = NL80211_CHAN_WIDTH_40;
3239 ret = IEEE80211_STA_DISABLE_VHT;
3240 break;
3241 case NL80211_CHAN_WIDTH_80P80:
3242 c->center_freq2 = 0;
3243 c->width = NL80211_CHAN_WIDTH_80;
3244 ret = IEEE80211_STA_DISABLE_80P80MHZ |
3245 IEEE80211_STA_DISABLE_160MHZ;
3246 break;
3247 case NL80211_CHAN_WIDTH_160:
3248 /* n_P20 */
3249 tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
3250 /* n_P80 */
3251 tmp /= 4;
3252 c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
3253 c->width = NL80211_CHAN_WIDTH_80;
3254 ret = IEEE80211_STA_DISABLE_80P80MHZ |
3255 IEEE80211_STA_DISABLE_160MHZ;
3256 break;
3257 default:
3258 case NL80211_CHAN_WIDTH_20_NOHT:
3259 WARN_ON_ONCE(1);
3260 c->width = NL80211_CHAN_WIDTH_20_NOHT;
3261 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3262 break;
3263 }
3264
3265 WARN_ON_ONCE(!cfg80211_chandef_valid(c));
3266
3267 return ret;
3268}
3269
3270static u32
3271ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
3272 struct ieee80211_supported_band *sband,
3273 struct ieee80211_channel *channel,
3274 const struct ieee80211_ht_operation *ht_oper,
3275 const struct ieee80211_vht_operation *vht_oper,
3276 struct cfg80211_chan_def *chandef)
3277{
3278 struct cfg80211_chan_def vht_chandef;
3279 u32 ht_cfreq, ret;
3280
3281 chandef->chan = channel;
3282 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
3283 chandef->center_freq1 = channel->center_freq;
3284 chandef->center_freq2 = 0;
3285
3286 if (!ht_oper || !sband->ht_cap.ht_supported) {
3287 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3288 goto out;
3289 }
3290
3291 chandef->width = NL80211_CHAN_WIDTH_20;
3292
3293 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
3294 channel->band);
3295 /* check that channel matches the right operating channel */
3296 if (channel->center_freq != ht_cfreq) {
3297 /*
3298 * It's possible that some APs are confused here;
3299 * Netgear WNDR3700 sometimes reports 4 higher than
3300 * the actual channel in association responses, but
3301 * since we look at probe response/beacon data here
3302 * it should be OK.
3303 */
3304 sdata_info(sdata,
3305 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
3306 channel->center_freq, ht_cfreq,
3307 ht_oper->primary_chan, channel->band);
3308 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3309 goto out;
3310 }
3311
3312 /* check 40 MHz support, if we have it */
3313 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
3314 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
3315 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3316 chandef->width = NL80211_CHAN_WIDTH_40;
3317 chandef->center_freq1 += 10;
3318 break;
3319 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3320 chandef->width = NL80211_CHAN_WIDTH_40;
3321 chandef->center_freq1 -= 10;
3322 break;
3323 }
3324 } else {
3325 /* 40 MHz (and 80 MHz) must be supported for VHT */
3326 ret = IEEE80211_STA_DISABLE_VHT;
3327 goto out;
3328 }
3329
3330 if (!vht_oper || !sband->vht_cap.vht_supported) {
3331 ret = IEEE80211_STA_DISABLE_VHT;
3332 goto out;
3333 }
3334
3335 vht_chandef.chan = channel;
3336 vht_chandef.center_freq1 =
3337 ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
3338 channel->band);
3339 vht_chandef.center_freq2 = 0;
3340
3341 if (vht_oper->center_freq_seg2_idx)
3342 vht_chandef.center_freq2 =
3343 ieee80211_channel_to_frequency(
3344 vht_oper->center_freq_seg2_idx,
3345 channel->band);
3346
3347 switch (vht_oper->chan_width) {
3348 case IEEE80211_VHT_CHANWIDTH_USE_HT:
3349 vht_chandef.width = chandef->width;
3350 break;
3351 case IEEE80211_VHT_CHANWIDTH_80MHZ:
3352 vht_chandef.width = NL80211_CHAN_WIDTH_80;
3353 break;
3354 case IEEE80211_VHT_CHANWIDTH_160MHZ:
3355 vht_chandef.width = NL80211_CHAN_WIDTH_160;
3356 break;
3357 case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
3358 vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
3359 break;
3360 default:
3361 sdata_info(sdata,
3362 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
3363 vht_oper->chan_width);
3364 ret = IEEE80211_STA_DISABLE_VHT;
3365 goto out;
3366 }
3367
3368 if (!cfg80211_chandef_valid(&vht_chandef)) {
3369 sdata_info(sdata,
3370 "AP VHT information is invalid, disable VHT\n");
3371 ret = IEEE80211_STA_DISABLE_VHT;
3372 goto out;
3373 }
3374
3375 if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
3376 ret = 0;
3377 goto out;
3378 }
3379
3380 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
3381 sdata_info(sdata,
3382 "AP VHT information doesn't match HT, disable VHT\n");
3383 ret = IEEE80211_STA_DISABLE_VHT;
3384 goto out;
3385 }
3386
3387 *chandef = vht_chandef;
3388
3389 ret = 0;
3390
3391 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
3392 IEEE80211_CHAN_DISABLED)) {
3393 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
3394 ret = IEEE80211_STA_DISABLE_HT |
3395 IEEE80211_STA_DISABLE_VHT;
3396 goto out;
3397 }
3398
3399 ret = chandef_downgrade(chandef);
3400 }
3401
3402 if (chandef->width != vht_chandef.width)
3403 sdata_info(sdata,
3404 "local regulatory prevented using AP HT/VHT configuration, downgraded\n");
3405
3406out:
3407 WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
3408 return ret;
3409}
3410
3411static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata, 3624static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
3412 struct cfg80211_bss *cbss) 3625 struct cfg80211_bss *cbss)
3413{ 3626{
@@ -3473,16 +3686,22 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3473 3686
3474 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && 3687 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
3475 sband->ht_cap.ht_supported) { 3688 sband->ht_cap.ht_supported) {
3476 const u8 *ht_oper_ie; 3689 const u8 *ht_oper_ie, *ht_cap;
3477 3690
3478 ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION); 3691 ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION);
3479 if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) 3692 if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
3480 ht_oper = (void *)(ht_oper_ie + 2); 3693 ht_oper = (void *)(ht_oper_ie + 2);
3694
3695 ht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY);
3696 if (!ht_cap || ht_cap[1] < sizeof(struct ieee80211_ht_cap)) {
3697 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
3698 ht_oper = NULL;
3699 }
3481 } 3700 }
3482 3701
3483 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && 3702 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
3484 sband->vht_cap.vht_supported) { 3703 sband->vht_cap.vht_supported) {
3485 const u8 *vht_oper_ie; 3704 const u8 *vht_oper_ie, *vht_cap;
3486 3705
3487 vht_oper_ie = ieee80211_bss_get_ie(cbss, 3706 vht_oper_ie = ieee80211_bss_get_ie(cbss,
3488 WLAN_EID_VHT_OPERATION); 3707 WLAN_EID_VHT_OPERATION);
@@ -3492,15 +3711,21 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3492 vht_oper = NULL; 3711 vht_oper = NULL;
3493 sdata_info(sdata, 3712 sdata_info(sdata,
3494 "AP advertised VHT without HT, disabling both\n"); 3713 "AP advertised VHT without HT, disabling both\n");
3495 sdata->flags |= IEEE80211_STA_DISABLE_HT; 3714 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
3496 sdata->flags |= IEEE80211_STA_DISABLE_VHT; 3715 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3716 }
3717
3718 vht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY);
3719 if (!vht_cap || vht_cap[1] < sizeof(struct ieee80211_vht_cap)) {
3720 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3721 vht_oper = NULL;
3497 } 3722 }
3498 } 3723 }
3499 3724
3500 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, 3725 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
3501 cbss->channel, 3726 cbss->channel,
3502 ht_oper, vht_oper, 3727 ht_oper, vht_oper,
3503 &chandef); 3728 &chandef, true);
3504 3729
3505 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), 3730 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
3506 local->rx_chains); 3731 local->rx_chains);
@@ -3517,8 +3742,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3517 */ 3742 */
3518 ret = ieee80211_vif_use_channel(sdata, &chandef, 3743 ret = ieee80211_vif_use_channel(sdata, &chandef,
3519 IEEE80211_CHANCTX_SHARED); 3744 IEEE80211_CHANCTX_SHARED);
3520 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) 3745 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
3521 ifmgd->flags |= chandef_downgrade(&chandef); 3746 ifmgd->flags |= chandef_downgrade(&chandef);
3747 ret = ieee80211_vif_use_channel(sdata, &chandef,
3748 IEEE80211_CHANCTX_SHARED);
3749 }
3522 return ret; 3750 return ret;
3523} 3751}
3524 3752
@@ -3547,15 +3775,12 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3547 return -ENOMEM; 3775 return -ENOMEM;
3548 } 3776 }
3549 3777
3550 mutex_lock(&local->mtx);
3551 ieee80211_recalc_idle(sdata->local);
3552 mutex_unlock(&local->mtx);
3553
3554 if (new_sta) { 3778 if (new_sta) {
3555 u32 rates = 0, basic_rates = 0; 3779 u32 rates = 0, basic_rates = 0;
3556 bool have_higher_than_11mbit; 3780 bool have_higher_than_11mbit;
3557 int min_rate = INT_MAX, min_rate_index = -1; 3781 int min_rate = INT_MAX, min_rate_index = -1;
3558 struct ieee80211_supported_band *sband; 3782 struct ieee80211_supported_band *sband;
3783 const struct cfg80211_bss_ies *ies;
3559 3784
3560 sband = local->hw.wiphy->bands[cbss->channel->band]; 3785 sband = local->hw.wiphy->bands[cbss->channel->band];
3561 3786
@@ -3599,8 +3824,34 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3599 3824
3600 /* set timing information */ 3825 /* set timing information */
3601 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval; 3826 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
3602 sdata->vif.bss_conf.sync_tsf = cbss->tsf; 3827 rcu_read_lock();
3603 sdata->vif.bss_conf.sync_device_ts = bss->device_ts; 3828 ies = rcu_dereference(cbss->beacon_ies);
3829 if (ies) {
3830 const u8 *tim_ie;
3831
3832 sdata->vif.bss_conf.sync_tsf = ies->tsf;
3833 sdata->vif.bss_conf.sync_device_ts =
3834 bss->device_ts_beacon;
3835 tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
3836 ies->data, ies->len);
3837 if (tim_ie && tim_ie[1] >= 2)
3838 sdata->vif.bss_conf.sync_dtim_count = tim_ie[2];
3839 else
3840 sdata->vif.bss_conf.sync_dtim_count = 0;
3841 } else if (!(local->hw.flags &
3842 IEEE80211_HW_TIMING_BEACON_ONLY)) {
3843 ies = rcu_dereference(cbss->proberesp_ies);
3844 /* must be non-NULL since beacon IEs were NULL */
3845 sdata->vif.bss_conf.sync_tsf = ies->tsf;
3846 sdata->vif.bss_conf.sync_device_ts =
3847 bss->device_ts_presp;
3848 sdata->vif.bss_conf.sync_dtim_count = 0;
3849 } else {
3850 sdata->vif.bss_conf.sync_tsf = 0;
3851 sdata->vif.bss_conf.sync_device_ts = 0;
3852 sdata->vif.bss_conf.sync_dtim_count = 0;
3853 }
3854 rcu_read_unlock();
3604 3855
3605 /* tell driver about BSSID, basic rates and timing */ 3856 /* tell driver about BSSID, basic rates and timing */
3606 ieee80211_bss_info_change_notify(sdata, 3857 ieee80211_bss_info_change_notify(sdata,
@@ -3720,7 +3971,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
3720 } 3971 }
3721 3972
3722 /* hold our own reference */ 3973 /* hold our own reference */
3723 cfg80211_ref_bss(auth_data->bss); 3974 cfg80211_ref_bss(local->hw.wiphy, auth_data->bss);
3724 err = 0; 3975 err = 0;
3725 goto out_unlock; 3976 goto out_unlock;
3726 3977
@@ -3743,8 +3994,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3743 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3994 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3744 struct ieee80211_bss *bss = (void *)req->bss->priv; 3995 struct ieee80211_bss *bss = (void *)req->bss->priv;
3745 struct ieee80211_mgd_assoc_data *assoc_data; 3996 struct ieee80211_mgd_assoc_data *assoc_data;
3997 const struct cfg80211_bss_ies *beacon_ies;
3746 struct ieee80211_supported_band *sband; 3998 struct ieee80211_supported_band *sband;
3747 const u8 *ssidie, *ht_ie; 3999 const u8 *ssidie, *ht_ie, *vht_ie;
3748 int i, err; 4000 int i, err;
3749 4001
3750 assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL); 4002 assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
@@ -3863,6 +4115,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3863 ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param; 4115 ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param;
3864 else 4116 else
3865 ifmgd->flags |= IEEE80211_STA_DISABLE_HT; 4117 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
4118 vht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_VHT_CAPABILITY);
4119 if (vht_ie && vht_ie[1] >= sizeof(struct ieee80211_vht_cap))
4120 memcpy(&assoc_data->ap_vht_cap, vht_ie + 2,
4121 sizeof(struct ieee80211_vht_cap));
4122 else
4123 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3866 rcu_read_unlock(); 4124 rcu_read_unlock();
3867 4125
3868 if (bss->wmm_used && bss->uapsd_supported && 4126 if (bss->wmm_used && bss->uapsd_supported &&
@@ -3896,13 +4154,17 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3896 /* kick off associate process */ 4154 /* kick off associate process */
3897 4155
3898 ifmgd->assoc_data = assoc_data; 4156 ifmgd->assoc_data = assoc_data;
4157 ifmgd->dtim_period = 0;
3899 4158
3900 err = ieee80211_prep_connection(sdata, req->bss, true); 4159 err = ieee80211_prep_connection(sdata, req->bss, true);
3901 if (err) 4160 if (err)
3902 goto err_clear; 4161 goto err_clear;
3903 4162
3904 if (!bss->dtim_period && 4163 rcu_read_lock();
3905 sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) { 4164 beacon_ies = rcu_dereference(req->bss->beacon_ies);
4165
4166 if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC &&
4167 !beacon_ies) {
3906 /* 4168 /*
3907 * Wait up to one beacon interval ... 4169 * Wait up to one beacon interval ...
3908 * should this be more if we miss one? 4170 * should this be more if we miss one?
@@ -3910,11 +4172,36 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3910 sdata_info(sdata, "waiting for beacon from %pM\n", 4172 sdata_info(sdata, "waiting for beacon from %pM\n",
3911 ifmgd->bssid); 4173 ifmgd->bssid);
3912 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval); 4174 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
3913 } else { 4175 assoc_data->timeout_started = true;
4176 assoc_data->need_beacon = true;
4177 } else if (beacon_ies) {
4178 const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
4179 beacon_ies->data,
4180 beacon_ies->len);
4181 u8 dtim_count = 0;
4182
4183 if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) {
4184 const struct ieee80211_tim_ie *tim;
4185 tim = (void *)(tim_ie + 2);
4186 ifmgd->dtim_period = tim->dtim_period;
4187 dtim_count = tim->dtim_count;
4188 }
3914 assoc_data->have_beacon = true; 4189 assoc_data->have_beacon = true;
3915 assoc_data->sent_assoc = false;
3916 assoc_data->timeout = jiffies; 4190 assoc_data->timeout = jiffies;
4191 assoc_data->timeout_started = true;
4192
4193 if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
4194 sdata->vif.bss_conf.sync_tsf = beacon_ies->tsf;
4195 sdata->vif.bss_conf.sync_device_ts =
4196 bss->device_ts_beacon;
4197 sdata->vif.bss_conf.sync_dtim_count = dtim_count;
4198 }
4199 } else {
4200 assoc_data->timeout = jiffies;
4201 assoc_data->timeout_started = true;
3917 } 4202 }
4203 rcu_read_unlock();
4204
3918 run_again(ifmgd, assoc_data->timeout); 4205 run_again(ifmgd, assoc_data->timeout);
3919 4206
3920 if (bss->corrupt_data) { 4207 if (bss->corrupt_data) {
@@ -3981,10 +4268,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3981 mutex_unlock(&ifmgd->mtx); 4268 mutex_unlock(&ifmgd->mtx);
3982 4269
3983 out: 4270 out:
3984 mutex_lock(&sdata->local->mtx);
3985 ieee80211_recalc_idle(sdata->local);
3986 mutex_unlock(&sdata->local->mtx);
3987
3988 if (sent_frame) 4271 if (sent_frame)
3989 __cfg80211_send_deauth(sdata->dev, frame_buf, 4272 __cfg80211_send_deauth(sdata->dev, frame_buf,
3990 IEEE80211_DEAUTH_FRAME_LEN); 4273 IEEE80211_DEAUTH_FRAME_LEN);
@@ -4025,10 +4308,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
4025 __cfg80211_send_disassoc(sdata->dev, frame_buf, 4308 __cfg80211_send_disassoc(sdata->dev, frame_buf,
4026 IEEE80211_DEAUTH_FRAME_LEN); 4309 IEEE80211_DEAUTH_FRAME_LEN);
4027 4310
4028 mutex_lock(&sdata->local->mtx);
4029 ieee80211_recalc_idle(sdata->local);
4030 mutex_unlock(&sdata->local->mtx);
4031
4032 return 0; 4311 return 0;
4033} 4312}
4034 4313
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index a5379aea7d09..cc79b4a2e821 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -102,8 +102,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
102 ieee80211_sta_reset_conn_monitor(sdata); 102 ieee80211_sta_reset_conn_monitor(sdata);
103} 103}
104 104
105void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, 105void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
106 bool offchannel_ps_enable)
107{ 106{
108 struct ieee80211_sub_if_data *sdata; 107 struct ieee80211_sub_if_data *sdata;
109 108
@@ -114,6 +113,15 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
114 * notify the AP about us leaving the channel and stop all 113 * notify the AP about us leaving the channel and stop all
115 * STA interfaces. 114 * STA interfaces.
116 */ 115 */
116
117 /*
118 * Stop queues and transmit all frames queued by the driver
119 * before sending nullfunc to enable powersave at the AP.
120 */
121 ieee80211_stop_queues_by_reason(&local->hw,
122 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
123 drv_flush(local, false);
124
117 mutex_lock(&local->iflist_mtx); 125 mutex_lock(&local->iflist_mtx);
118 list_for_each_entry(sdata, &local->interfaces, list) { 126 list_for_each_entry(sdata, &local->interfaces, list) {
119 if (!ieee80211_sdata_running(sdata)) 127 if (!ieee80211_sdata_running(sdata))
@@ -126,25 +134,22 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
126 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); 134 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
127 135
128 /* Check to see if we should disable beaconing. */ 136 /* Check to see if we should disable beaconing. */
129 if (sdata->vif.type == NL80211_IFTYPE_AP || 137 if (sdata->vif.bss_conf.enable_beacon) {
130 sdata->vif.type == NL80211_IFTYPE_ADHOC || 138 set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
131 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 139 &sdata->state);
140 sdata->vif.bss_conf.enable_beacon = false;
132 ieee80211_bss_info_change_notify( 141 ieee80211_bss_info_change_notify(
133 sdata, BSS_CHANGED_BEACON_ENABLED); 142 sdata, BSS_CHANGED_BEACON_ENABLED);
134
135 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
136 netif_tx_stop_all_queues(sdata->dev);
137 if (offchannel_ps_enable &&
138 (sdata->vif.type == NL80211_IFTYPE_STATION) &&
139 sdata->u.mgd.associated)
140 ieee80211_offchannel_ps_enable(sdata);
141 } 143 }
144
145 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
146 sdata->u.mgd.associated)
147 ieee80211_offchannel_ps_enable(sdata);
142 } 148 }
143 mutex_unlock(&local->iflist_mtx); 149 mutex_unlock(&local->iflist_mtx);
144} 150}
145 151
146void ieee80211_offchannel_return(struct ieee80211_local *local, 152void ieee80211_offchannel_return(struct ieee80211_local *local)
147 bool offchannel_ps_disable)
148{ 153{
149 struct ieee80211_sub_if_data *sdata; 154 struct ieee80211_sub_if_data *sdata;
150 155
@@ -163,33 +168,21 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
163 continue; 168 continue;
164 169
165 /* Tell AP we're back */ 170 /* Tell AP we're back */
166 if (offchannel_ps_disable && 171 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
167 sdata->vif.type == NL80211_IFTYPE_STATION) { 172 sdata->u.mgd.associated)
168 if (sdata->u.mgd.associated) 173 ieee80211_offchannel_ps_disable(sdata);
169 ieee80211_offchannel_ps_disable(sdata);
170 }
171 174
172 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 175 if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
173 /* 176 &sdata->state)) {
174 * This may wake up queues even though the driver 177 sdata->vif.bss_conf.enable_beacon = true;
175 * currently has them stopped. This is not very
176 * likely, since the driver won't have gotten any
177 * (or hardly any) new packets while we weren't
178 * on the right channel, and even if it happens
179 * it will at most lead to queueing up one more
180 * packet per queue in mac80211 rather than on
181 * the interface qdisc.
182 */
183 netif_tx_wake_all_queues(sdata->dev);
184 }
185
186 if (sdata->vif.type == NL80211_IFTYPE_AP ||
187 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
188 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
189 ieee80211_bss_info_change_notify( 178 ieee80211_bss_info_change_notify(
190 sdata, BSS_CHANGED_BEACON_ENABLED); 179 sdata, BSS_CHANGED_BEACON_ENABLED);
180 }
191 } 181 }
192 mutex_unlock(&local->iflist_mtx); 182 mutex_unlock(&local->iflist_mtx);
183
184 ieee80211_wake_queues_by_reason(&local->hw,
185 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
193} 186}
194 187
195void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc) 188void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
@@ -385,7 +378,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
385 local->tmp_channel = NULL; 378 local->tmp_channel = NULL;
386 ieee80211_hw_config(local, 0); 379 ieee80211_hw_config(local, 0);
387 380
388 ieee80211_offchannel_return(local, true); 381 ieee80211_offchannel_return(local);
389 } 382 }
390 383
391 ieee80211_recalc_idle(local); 384 ieee80211_recalc_idle(local);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 79a48f37d409..d0275f34bf70 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -7,25 +7,23 @@
7#include "led.h" 7#include "led.h"
8 8
9/* return value indicates whether the driver should be further notified */ 9/* return value indicates whether the driver should be further notified */
10static bool ieee80211_quiesce(struct ieee80211_sub_if_data *sdata) 10static void ieee80211_quiesce(struct ieee80211_sub_if_data *sdata)
11{ 11{
12 switch (sdata->vif.type) { 12 switch (sdata->vif.type) {
13 case NL80211_IFTYPE_STATION: 13 case NL80211_IFTYPE_STATION:
14 ieee80211_sta_quiesce(sdata); 14 ieee80211_sta_quiesce(sdata);
15 return true; 15 break;
16 case NL80211_IFTYPE_ADHOC: 16 case NL80211_IFTYPE_ADHOC:
17 ieee80211_ibss_quiesce(sdata); 17 ieee80211_ibss_quiesce(sdata);
18 return true; 18 break;
19 case NL80211_IFTYPE_MESH_POINT: 19 case NL80211_IFTYPE_MESH_POINT:
20 ieee80211_mesh_quiesce(sdata); 20 ieee80211_mesh_quiesce(sdata);
21 return true; 21 break;
22 case NL80211_IFTYPE_AP_VLAN:
23 case NL80211_IFTYPE_MONITOR:
24 /* don't tell driver about this */
25 return false;
26 default: 22 default:
27 return true; 23 break;
28 } 24 }
25
26 cancel_work_sync(&sdata->work);
29} 27}
30 28
31int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 29int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
@@ -40,11 +38,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
40 38
41 ieee80211_scan_cancel(local); 39 ieee80211_scan_cancel(local);
42 40
41 ieee80211_dfs_cac_cancel(local);
42
43 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 43 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
44 mutex_lock(&local->sta_mtx); 44 mutex_lock(&local->sta_mtx);
45 list_for_each_entry(sta, &local->sta_list, list) { 45 list_for_each_entry(sta, &local->sta_list, list) {
46 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 46 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
47 ieee80211_sta_tear_down_BA_sessions(sta, true); 47 ieee80211_sta_tear_down_BA_sessions(
48 sta, AGG_STOP_LOCAL_REQUEST);
48 } 49 }
49 mutex_unlock(&local->sta_mtx); 50 mutex_unlock(&local->sta_mtx);
50 } 51 }
@@ -94,10 +95,9 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94 WARN_ON(err != 1); 95 WARN_ON(err != 1);
95 local->wowlan = false; 96 local->wowlan = false;
96 } else { 97 } else {
97 list_for_each_entry(sdata, &local->interfaces, list) { 98 list_for_each_entry(sdata, &local->interfaces, list)
98 cancel_work_sync(&sdata->work); 99 if (ieee80211_sdata_running(sdata))
99 ieee80211_quiesce(sdata); 100 ieee80211_quiesce(sdata);
100 }
101 goto suspend; 101 goto suspend;
102 } 102 }
103 } 103 }
@@ -124,17 +124,43 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
124 124
125 /* remove all interfaces */ 125 /* remove all interfaces */
126 list_for_each_entry(sdata, &local->interfaces, list) { 126 list_for_each_entry(sdata, &local->interfaces, list) {
127 cancel_work_sync(&sdata->work); 127 static u8 zero_addr[ETH_ALEN] = {};
128 u32 changed = 0;
128 129
129 if (!ieee80211_quiesce(sdata)) 130 if (!ieee80211_sdata_running(sdata))
130 continue; 131 continue;
131 132
132 if (!ieee80211_sdata_running(sdata)) 133 switch (sdata->vif.type) {
134 case NL80211_IFTYPE_AP_VLAN:
135 case NL80211_IFTYPE_MONITOR:
136 /* skip these */
133 continue; 137 continue;
138 case NL80211_IFTYPE_STATION:
139 if (sdata->vif.bss_conf.assoc)
140 changed = BSS_CHANGED_ASSOC |
141 BSS_CHANGED_BSSID |
142 BSS_CHANGED_IDLE;
143 break;
144 case NL80211_IFTYPE_AP:
145 case NL80211_IFTYPE_ADHOC:
146 case NL80211_IFTYPE_MESH_POINT:
147 if (sdata->vif.bss_conf.enable_beacon)
148 changed = BSS_CHANGED_BEACON_ENABLED;
149 break;
150 default:
151 break;
152 }
134 153
135 /* disable beaconing */ 154 ieee80211_quiesce(sdata);
136 ieee80211_bss_info_change_notify(sdata, 155
137 BSS_CHANGED_BEACON_ENABLED); 156 sdata->suspend_bss_conf = sdata->vif.bss_conf;
157 memset(&sdata->vif.bss_conf, 0, sizeof(sdata->vif.bss_conf));
158 sdata->vif.bss_conf.idle = true;
159 if (sdata->suspend_bss_conf.bssid)
160 sdata->vif.bss_conf.bssid = zero_addr;
161
162 /* disable beaconing or remove association */
163 ieee80211_bss_info_change_notify(sdata, changed);
138 164
139 if (sdata->vif.type == NL80211_IFTYPE_AP && 165 if (sdata->vif.type == NL80211_IFTYPE_AP &&
140 rcu_access_pointer(sdata->u.ap.beacon)) 166 rcu_access_pointer(sdata->u.ap.beacon))
@@ -204,3 +230,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
204 * ieee80211_reconfig(), which is also needed for hardware 230 * ieee80211_reconfig(), which is also needed for hardware
205 * hang/firmware failure/etc. recovery. 231 * hang/firmware failure/etc. recovery.
206 */ 232 */
233
234void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
235 struct cfg80211_wowlan_wakeup *wakeup,
236 gfp_t gfp)
237{
238 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
239
240 cfg80211_report_wowlan_wakeup(&sdata->wdev, wakeup, gfp);
241}
242EXPORT_SYMBOL(ieee80211_report_wowlan_wakeup);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 301386dabf88..d35a5dd3fb13 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -68,6 +68,8 @@ static inline void rate_control_rate_init(struct sta_info *sta)
68 sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; 68 sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
69 rcu_read_unlock(); 69 rcu_read_unlock();
70 70
71 ieee80211_sta_set_rx_nss(sta);
72
71 ref->ops->rate_init(ref->priv, sband, ista, priv_sta); 73 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
72 set_sta_flag(sta, WLAN_STA_RATE_CONTROL); 74 set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
73} 75}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 8c5acdc06226..eea45a2c7c35 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -494,6 +494,33 @@ minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
494 kfree(mi); 494 kfree(mi);
495} 495}
496 496
497static void
498minstrel_init_cck_rates(struct minstrel_priv *mp)
499{
500 static const int bitrates[4] = { 10, 20, 55, 110 };
501 struct ieee80211_supported_band *sband;
502 int i, j;
503
504 sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
505 if (!sband)
506 return;
507
508 for (i = 0, j = 0; i < sband->n_bitrates; i++) {
509 struct ieee80211_rate *rate = &sband->bitrates[i];
510
511 if (rate->flags & IEEE80211_RATE_ERP_G)
512 continue;
513
514 for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
515 if (rate->bitrate != bitrates[j])
516 continue;
517
518 mp->cck_rates[j] = i;
519 break;
520 }
521 }
522}
523
497static void * 524static void *
498minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 525minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
499{ 526{
@@ -539,6 +566,8 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
539 S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx); 566 S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
540#endif 567#endif
541 568
569 minstrel_init_cck_rates(mp);
570
542 return mp; 571 return mp;
543} 572}
544 573
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 5d278eccaef0..5ecf757817f2 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -79,6 +79,8 @@ struct minstrel_priv {
79 unsigned int lookaround_rate; 79 unsigned int lookaround_rate;
80 unsigned int lookaround_rate_mrr; 80 unsigned int lookaround_rate_mrr;
81 81
82 u8 cck_rates[4];
83
82#ifdef CONFIG_MAC80211_DEBUGFS 84#ifdef CONFIG_MAC80211_DEBUGFS
83 /* 85 /*
84 * enable fixed rate processing per RC 86 * enable fixed rate processing per RC
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 9f9c453bc45d..3af141c69712 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> 2 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -63,6 +63,30 @@
63 } \ 63 } \
64} 64}
65 65
66#define CCK_DURATION(_bitrate, _short, _len) \
67 (10 /* SIFS */ + \
68 (_short ? 72 + 24 : 144 + 48 ) + \
69 (8 * (_len + 4) * 10) / (_bitrate))
70
71#define CCK_ACK_DURATION(_bitrate, _short) \
72 (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \
73 CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))
74
75#define CCK_DURATION_LIST(_short) \
76 CCK_ACK_DURATION(10, _short), \
77 CCK_ACK_DURATION(20, _short), \
78 CCK_ACK_DURATION(55, _short), \
79 CCK_ACK_DURATION(110, _short)
80
81#define CCK_GROUP \
82 [MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS] = { \
83 .streams = 0, \
84 .duration = { \
85 CCK_DURATION_LIST(false), \
86 CCK_DURATION_LIST(true) \
87 } \
88 }
89
66/* 90/*
67 * To enable sufficiently targeted rate sampling, MCS rates are divided into 91 * To enable sufficiently targeted rate sampling, MCS rates are divided into
68 * groups, based on the number of streams and flags (HT40, SGI) that they 92 * groups, based on the number of streams and flags (HT40, SGI) that they
@@ -95,8 +119,13 @@ const struct mcs_group minstrel_mcs_groups[] = {
95#if MINSTREL_MAX_STREAMS >= 3 119#if MINSTREL_MAX_STREAMS >= 3
96 MCS_GROUP(3, 1, 1), 120 MCS_GROUP(3, 1, 1),
97#endif 121#endif
122
123 /* must be last */
124 CCK_GROUP
98}; 125};
99 126
127#define MINSTREL_CCK_GROUP (ARRAY_SIZE(minstrel_mcs_groups) - 1)
128
100static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES]; 129static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
101 130
102/* 131/*
@@ -119,6 +148,29 @@ minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
119 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); 148 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
120} 149}
121 150
151static struct minstrel_rate_stats *
152minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
153 struct ieee80211_tx_rate *rate)
154{
155 int group, idx;
156
157 if (rate->flags & IEEE80211_TX_RC_MCS) {
158 group = minstrel_ht_get_group_idx(rate);
159 idx = rate->idx % MCS_GROUP_RATES;
160 } else {
161 group = MINSTREL_CCK_GROUP;
162
163 for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++)
164 if (rate->idx == mp->cck_rates[idx])
165 break;
166
167 /* short preamble */
168 if (!(mi->groups[group].supported & BIT(idx)))
169 idx += 4;
170 }
171 return &mi->groups[group].rates[idx];
172}
173
122static inline struct minstrel_rate_stats * 174static inline struct minstrel_rate_stats *
123minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) 175minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
124{ 176{
@@ -159,7 +211,7 @@ static void
159minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) 211minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
160{ 212{
161 struct minstrel_rate_stats *mr; 213 struct minstrel_rate_stats *mr;
162 unsigned int usecs; 214 unsigned int usecs = 0;
163 215
164 mr = &mi->groups[group].rates[rate]; 216 mr = &mi->groups[group].rates[rate];
165 217
@@ -168,7 +220,9 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
168 return; 220 return;
169 } 221 }
170 222
171 usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 223 if (group != MINSTREL_CCK_GROUP)
224 usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
225
172 usecs += minstrel_mcs_groups[group].duration[rate]; 226 usecs += minstrel_mcs_groups[group].duration[rate];
173 mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability); 227 mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability);
174} 228}
@@ -231,10 +285,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
231 if (!mr->cur_tp) 285 if (!mr->cur_tp)
232 continue; 286 continue;
233 287
234 /* ignore the lowest rate of each single-stream group */
235 if (!i && minstrel_mcs_groups[group].streams == 1)
236 continue;
237
238 if ((mr->cur_tp > cur_prob_tp && mr->probability > 288 if ((mr->cur_tp > cur_prob_tp && mr->probability >
239 MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) { 289 MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
240 mg->max_prob_rate = index; 290 mg->max_prob_rate = index;
@@ -297,7 +347,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
297} 347}
298 348
299static bool 349static bool
300minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) 350minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate)
301{ 351{
302 if (rate->idx < 0) 352 if (rate->idx < 0)
303 return false; 353 return false;
@@ -305,7 +355,13 @@ minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
305 if (!rate->count) 355 if (!rate->count)
306 return false; 356 return false;
307 357
308 return !!(rate->flags & IEEE80211_TX_RC_MCS); 358 if (rate->flags & IEEE80211_TX_RC_MCS)
359 return true;
360
361 return rate->idx == mp->cck_rates[0] ||
362 rate->idx == mp->cck_rates[1] ||
363 rate->idx == mp->cck_rates[2] ||
364 rate->idx == mp->cck_rates[3];
309} 365}
310 366
311static void 367static void
@@ -390,7 +446,6 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
390 struct minstrel_rate_stats *rate, *rate2; 446 struct minstrel_rate_stats *rate, *rate2;
391 struct minstrel_priv *mp = priv; 447 struct minstrel_priv *mp = priv;
392 bool last; 448 bool last;
393 int group;
394 int i; 449 int i;
395 450
396 if (!msp->is_ht) 451 if (!msp->is_ht)
@@ -419,13 +474,12 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
419 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 474 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
420 mi->sample_packets += info->status.ampdu_len; 475 mi->sample_packets += info->status.ampdu_len;
421 476
422 last = !minstrel_ht_txstat_valid(&ar[0]); 477 last = !minstrel_ht_txstat_valid(mp, &ar[0]);
423 for (i = 0; !last; i++) { 478 for (i = 0; !last; i++) {
424 last = (i == IEEE80211_TX_MAX_RATES - 1) || 479 last = (i == IEEE80211_TX_MAX_RATES - 1) ||
425 !minstrel_ht_txstat_valid(&ar[i + 1]); 480 !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
426 481
427 group = minstrel_ht_get_group_idx(&ar[i]); 482 rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
428 rate = &mi->groups[group].rates[ar[i].idx % 8];
429 483
430 if (last) 484 if (last)
431 rate->success += info->status.ampdu_ack_len; 485 rate->success += info->status.ampdu_ack_len;
@@ -451,7 +505,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
451 505
452 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { 506 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
453 minstrel_ht_update_stats(mp, mi); 507 minstrel_ht_update_stats(mp, mi);
454 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 508 if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
509 mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
455 minstrel_aggr_check(sta, skb); 510 minstrel_aggr_check(sta, skb);
456 } 511 }
457} 512}
@@ -467,6 +522,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
467 unsigned int ctime = 0; 522 unsigned int ctime = 0;
468 unsigned int t_slot = 9; /* FIXME */ 523 unsigned int t_slot = 9; /* FIXME */
469 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); 524 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
525 unsigned int overhead = 0, overhead_rtscts = 0;
470 526
471 mr = minstrel_get_ratestats(mi, index); 527 mr = minstrel_get_ratestats(mi, index);
472 if (mr->probability < MINSTREL_FRAC(1, 10)) { 528 if (mr->probability < MINSTREL_FRAC(1, 10)) {
@@ -488,9 +544,14 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
488 ctime += (t_slot * cw) >> 1; 544 ctime += (t_slot * cw) >> 1;
489 cw = min((cw << 1) | 1, mp->cw_max); 545 cw = min((cw << 1) | 1, mp->cw_max);
490 546
547 if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) {
548 overhead = mi->overhead;
549 overhead_rtscts = mi->overhead_rtscts;
550 }
551
491 /* Total TX time for data and Contention after first 2 tries */ 552 /* Total TX time for data and Contention after first 2 tries */
492 tx_time = ctime + 2 * (mi->overhead + tx_time_data); 553 tx_time = ctime + 2 * (overhead + tx_time_data);
493 tx_time_rtscts = ctime + 2 * (mi->overhead_rtscts + tx_time_data); 554 tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
494 555
495 /* See how many more tries we can fit inside segment size */ 556 /* See how many more tries we can fit inside segment size */
496 do { 557 do {
@@ -499,8 +560,8 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
499 cw = min((cw << 1) | 1, mp->cw_max); 560 cw = min((cw << 1) | 1, mp->cw_max);
500 561
501 /* Total TX time after this try */ 562 /* Total TX time after this try */
502 tx_time += ctime + mi->overhead + tx_time_data; 563 tx_time += ctime + overhead + tx_time_data;
503 tx_time_rtscts += ctime + mi->overhead_rtscts + tx_time_data; 564 tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
504 565
505 if (tx_time_rtscts < mp->segment_size) 566 if (tx_time_rtscts < mp->segment_size)
506 mr->retry_count_rtscts++; 567 mr->retry_count_rtscts++;
@@ -530,9 +591,16 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
530 else 591 else
531 rate->count = mr->retry_count; 592 rate->count = mr->retry_count;
532 593
533 rate->flags = IEEE80211_TX_RC_MCS | group->flags; 594 rate->flags = 0;
534 if (rtscts) 595 if (rtscts)
535 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; 596 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
597
598 if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
599 rate->idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
600 return;
601 }
602
603 rate->flags |= IEEE80211_TX_RC_MCS | group->flags;
536 rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES; 604 rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
537} 605}
538 606
@@ -596,6 +664,22 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
596} 664}
597 665
598static void 666static void
667minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp,
668 struct minstrel_ht_sta *mi, bool val)
669{
670 u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported;
671
672 if (!supported || !mi->cck_supported_short)
673 return;
674
675 if (supported & (mi->cck_supported_short << (val * 4)))
676 return;
677
678 supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4);
679 mi->groups[MINSTREL_CCK_GROUP].supported = supported;
680}
681
682static void
599minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, 683minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
600 struct ieee80211_tx_rate_control *txrc) 684 struct ieee80211_tx_rate_control *txrc)
601{ 685{
@@ -614,6 +698,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
614 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); 698 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
615 699
616 info->flags |= mi->tx_flags; 700 info->flags |= mi->tx_flags;
701 minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
617 702
618 /* Don't use EAPOL frames for sampling on non-mrr hw */ 703 /* Don't use EAPOL frames for sampling on non-mrr hw */
619 if (mp->hw->max_rates == 1 && 704 if (mp->hw->max_rates == 1 &&
@@ -687,6 +772,30 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
687} 772}
688 773
689static void 774static void
775minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
776 struct ieee80211_supported_band *sband,
777 struct ieee80211_sta *sta)
778{
779 int i;
780
781 if (sband->band != IEEE80211_BAND_2GHZ)
782 return;
783
784 mi->cck_supported = 0;
785 mi->cck_supported_short = 0;
786 for (i = 0; i < 4; i++) {
787 if (!rate_supported(sta, sband->band, mp->cck_rates[i]))
788 continue;
789
790 mi->cck_supported |= BIT(i);
791 if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
792 mi->cck_supported_short |= BIT(i);
793 }
794
795 mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported;
796}
797
798static void
690minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, 799minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
691 struct ieee80211_sta *sta, void *priv_sta) 800 struct ieee80211_sta *sta, void *priv_sta)
692{ 801{
@@ -699,14 +808,13 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
699 int ack_dur; 808 int ack_dur;
700 int stbc; 809 int stbc;
701 int i; 810 int i;
702 unsigned int smps;
703 811
704 /* fall back to the old minstrel for legacy stations */ 812 /* fall back to the old minstrel for legacy stations */
705 if (!sta->ht_cap.ht_supported) 813 if (!sta->ht_cap.ht_supported)
706 goto use_legacy; 814 goto use_legacy;
707 815
708 BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != 816 BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
709 MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); 817 MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1);
710 818
711 msp->is_ht = true; 819 msp->is_ht = true;
712 memset(mi, 0, sizeof(*mi)); 820 memset(mi, 0, sizeof(*mi));
@@ -735,28 +843,29 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
735 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) 843 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
736 mi->tx_flags |= IEEE80211_TX_CTL_LDPC; 844 mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
737 845
738 smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
739 IEEE80211_HT_CAP_SM_PS_SHIFT;
740
741 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { 846 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
742 u16 req = 0;
743
744 mi->groups[i].supported = 0; 847 mi->groups[i].supported = 0;
745 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) { 848 if (i == MINSTREL_CCK_GROUP) {
746 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 849 minstrel_ht_update_cck(mp, mi, sband, sta);
747 req |= IEEE80211_HT_CAP_SGI_40; 850 continue;
748 else
749 req |= IEEE80211_HT_CAP_SGI_20;
750 } 851 }
751 852
752 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 853 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
753 req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 854 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
855 if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
856 continue;
857 } else {
858 if (!(sta_cap & IEEE80211_HT_CAP_SGI_20))
859 continue;
860 }
861 }
754 862
755 if ((sta_cap & req) != req) 863 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
864 sta->bandwidth < IEEE80211_STA_RX_BW_40)
756 continue; 865 continue;
757 866
758 /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */ 867 /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
759 if (smps == WLAN_HT_CAP_SM_PS_STATIC && 868 if (sta->smps_mode == IEEE80211_SMPS_STATIC &&
760 minstrel_mcs_groups[i].streams > 1) 869 minstrel_mcs_groups[i].streams > 1)
761 continue; 870 continue;
762 871
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 462d2b227ed5..302dbd52180d 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -107,8 +107,11 @@ struct minstrel_ht_sta {
107 /* current MCS group to be sampled */ 107 /* current MCS group to be sampled */
108 u8 sample_group; 108 u8 sample_group;
109 109
110 u8 cck_supported;
111 u8 cck_supported_short;
112
110 /* MCS rate group info and statistics */ 113 /* MCS rate group info and statistics */
111 struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS]; 114 struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1];
112}; 115};
113 116
114struct minstrel_ht_sta_priv { 117struct minstrel_ht_sta_priv {
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index e788f76a1dfe..df44a5ad8270 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -15,13 +15,76 @@
15#include "rc80211_minstrel.h" 15#include "rc80211_minstrel.h"
16#include "rc80211_minstrel_ht.h" 16#include "rc80211_minstrel_ht.h"
17 17
18static char *
19minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
20{
21 unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS;
22 const struct mcs_group *mg;
23 unsigned int j, tp, prob, eprob;
24 char htmode = '2';
25 char gimode = 'L';
26
27 if (!mi->groups[i].supported)
28 return p;
29
30 mg = &minstrel_mcs_groups[i];
31 if (mg->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
32 htmode = '4';
33 if (mg->flags & IEEE80211_TX_RC_SHORT_GI)
34 gimode = 'S';
35
36 for (j = 0; j < MCS_GROUP_RATES; j++) {
37 struct minstrel_rate_stats *mr = &mi->groups[i].rates[j];
38 static const int bitrates[4] = { 10, 20, 55, 110 };
39 int idx = i * MCS_GROUP_RATES + j;
40
41 if (!(mi->groups[i].supported & BIT(j)))
42 continue;
43
44 if (i == max_mcs)
45 p += sprintf(p, "CCK/%cP ", j < 4 ? 'L' : 'S');
46 else
47 p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
48
49 *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
50 *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
51 *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
52
53 if (i == max_mcs) {
54 int r = bitrates[j % 4];
55 p += sprintf(p, " %2u.%1uM", r / 10, r % 10);
56 } else {
57 p += sprintf(p, " MCS%-2u", (mg->streams - 1) *
58 MCS_GROUP_RATES + j);
59 }
60
61 tp = mr->cur_tp / 10;
62 prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
63 eprob = MINSTREL_TRUNC(mr->probability * 1000);
64
65 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
66 "%3u %3u(%3u) %8llu %8llu\n",
67 tp / 10, tp % 10,
68 eprob / 10, eprob % 10,
69 prob / 10, prob % 10,
70 mr->retry_count,
71 mr->last_success,
72 mr->last_attempts,
73 (unsigned long long)mr->succ_hist,
74 (unsigned long long)mr->att_hist);
75 }
76
77 return p;
78}
79
18static int 80static int
19minstrel_ht_stats_open(struct inode *inode, struct file *file) 81minstrel_ht_stats_open(struct inode *inode, struct file *file)
20{ 82{
21 struct minstrel_ht_sta_priv *msp = inode->i_private; 83 struct minstrel_ht_sta_priv *msp = inode->i_private;
22 struct minstrel_ht_sta *mi = &msp->ht; 84 struct minstrel_ht_sta *mi = &msp->ht;
23 struct minstrel_debugfs_info *ms; 85 struct minstrel_debugfs_info *ms;
24 unsigned int i, j, tp, prob, eprob; 86 unsigned int i;
87 unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS;
25 char *p; 88 char *p;
26 int ret; 89 int ret;
27 90
@@ -38,50 +101,13 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
38 101
39 file->private_data = ms; 102 file->private_data = ms;
40 p = ms->buf; 103 p = ms->buf;
41 p += sprintf(p, "type rate throughput ewma prob this prob " 104 p += sprintf(p, "type rate throughput ewma prob this prob "
42 "this succ/attempt success attempts\n"); 105 "retry this succ/attempt success attempts\n");
43 for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) {
44 char htmode = '2';
45 char gimode = 'L';
46
47 if (!mi->groups[i].supported)
48 continue;
49
50 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
51 htmode = '4';
52 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI)
53 gimode = 'S';
54 106
55 for (j = 0; j < MCS_GROUP_RATES; j++) { 107 p = minstrel_ht_stats_dump(mi, max_mcs, p);
56 struct minstrel_rate_stats *mr = &mi->groups[i].rates[j]; 108 for (i = 0; i < max_mcs; i++)
57 int idx = i * MCS_GROUP_RATES + j; 109 p = minstrel_ht_stats_dump(mi, i, p);
58 110
59 if (!(mi->groups[i].supported & BIT(j)))
60 continue;
61
62 p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
63
64 *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
65 *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
66 *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
67 p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) *
68 MCS_GROUP_RATES + j);
69
70 tp = mr->cur_tp / 10;
71 prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
72 eprob = MINSTREL_TRUNC(mr->probability * 1000);
73
74 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
75 "%3u(%3u) %8llu %8llu\n",
76 tp / 10, tp % 10,
77 eprob / 10, eprob % 10,
78 prob / 10, prob % 10,
79 mr->last_success,
80 mr->last_attempts,
81 (unsigned long long)mr->succ_hist,
82 (unsigned long long)mr->att_hist);
83 }
84 }
85 p += sprintf(p, "\nTotal packet count:: ideal %d " 111 p += sprintf(p, "\nTotal packet count:: ideal %d "
86 "lookaround %d\n", 112 "lookaround %d\n",
87 max(0, (int) mi->total_packets - (int) mi->sample_packets), 113 max(0, (int) mi->total_packets - (int) mi->sample_packets),
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 580704eba8b8..bb73ed2d20b9 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -668,9 +668,9 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
668 668
669static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 669static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
670 struct tid_ampdu_rx *tid_agg_rx, 670 struct tid_ampdu_rx *tid_agg_rx,
671 int index) 671 int index,
672 struct sk_buff_head *frames)
672{ 673{
673 struct ieee80211_local *local = sdata->local;
674 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 674 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
675 struct ieee80211_rx_status *status; 675 struct ieee80211_rx_status *status;
676 676
@@ -684,7 +684,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
684 tid_agg_rx->reorder_buf[index] = NULL; 684 tid_agg_rx->reorder_buf[index] = NULL;
685 status = IEEE80211_SKB_RXCB(skb); 685 status = IEEE80211_SKB_RXCB(skb);
686 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 686 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
687 skb_queue_tail(&local->rx_skb_queue, skb); 687 __skb_queue_tail(frames, skb);
688 688
689no_frame: 689no_frame:
690 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 690 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -692,7 +692,8 @@ no_frame:
692 692
693static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 693static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
694 struct tid_ampdu_rx *tid_agg_rx, 694 struct tid_ampdu_rx *tid_agg_rx,
695 u16 head_seq_num) 695 u16 head_seq_num,
696 struct sk_buff_head *frames)
696{ 697{
697 int index; 698 int index;
698 699
@@ -701,7 +702,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata
701 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 702 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
702 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 703 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
703 tid_agg_rx->buf_size; 704 tid_agg_rx->buf_size;
704 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index); 705 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
706 frames);
705 } 707 }
706} 708}
707 709
@@ -717,7 +719,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata
717#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 719#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
718 720
719static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 721static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
720 struct tid_ampdu_rx *tid_agg_rx) 722 struct tid_ampdu_rx *tid_agg_rx,
723 struct sk_buff_head *frames)
721{ 724{
722 int index, j; 725 int index, j;
723 726
@@ -746,7 +749,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
746 749
747 ht_dbg_ratelimited(sdata, 750 ht_dbg_ratelimited(sdata,
748 "release an RX reorder frame due to timeout on earlier frames\n"); 751 "release an RX reorder frame due to timeout on earlier frames\n");
749 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j); 752 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
753 frames);
750 754
751 /* 755 /*
752 * Increment the head seq# also for the skipped slots. 756 * Increment the head seq# also for the skipped slots.
@@ -756,7 +760,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
756 skipped = 0; 760 skipped = 0;
757 } 761 }
758 } else while (tid_agg_rx->reorder_buf[index]) { 762 } else while (tid_agg_rx->reorder_buf[index]) {
759 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index); 763 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
764 frames);
760 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 765 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
761 tid_agg_rx->buf_size; 766 tid_agg_rx->buf_size;
762 } 767 }
@@ -788,7 +793,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
788 */ 793 */
789static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 794static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
790 struct tid_ampdu_rx *tid_agg_rx, 795 struct tid_ampdu_rx *tid_agg_rx,
791 struct sk_buff *skb) 796 struct sk_buff *skb,
797 struct sk_buff_head *frames)
792{ 798{
793 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 799 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
794 u16 sc = le16_to_cpu(hdr->seq_ctrl); 800 u16 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -816,7 +822,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
816 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 822 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
817 /* release stored frames up to new head to stack */ 823 /* release stored frames up to new head to stack */
818 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 824 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
819 head_seq_num); 825 head_seq_num, frames);
820 } 826 }
821 827
822 /* Now the new frame is always in the range of the reordering buffer */ 828 /* Now the new frame is always in the range of the reordering buffer */
@@ -846,7 +852,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
846 tid_agg_rx->reorder_buf[index] = skb; 852 tid_agg_rx->reorder_buf[index] = skb;
847 tid_agg_rx->reorder_time[index] = jiffies; 853 tid_agg_rx->reorder_time[index] = jiffies;
848 tid_agg_rx->stored_mpdu_num++; 854 tid_agg_rx->stored_mpdu_num++;
849 ieee80211_sta_reorder_release(sdata, tid_agg_rx); 855 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
850 856
851 out: 857 out:
852 spin_unlock(&tid_agg_rx->reorder_lock); 858 spin_unlock(&tid_agg_rx->reorder_lock);
@@ -857,7 +863,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
857 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 863 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
858 * true if the MPDU was buffered, false if it should be processed. 864 * true if the MPDU was buffered, false if it should be processed.
859 */ 865 */
860static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) 866static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
867 struct sk_buff_head *frames)
861{ 868{
862 struct sk_buff *skb = rx->skb; 869 struct sk_buff *skb = rx->skb;
863 struct ieee80211_local *local = rx->local; 870 struct ieee80211_local *local = rx->local;
@@ -922,11 +929,12 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
922 * sure that we cannot get to it any more before doing 929 * sure that we cannot get to it any more before doing
923 * anything with it. 930 * anything with it.
924 */ 931 */
925 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb)) 932 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
933 frames))
926 return; 934 return;
927 935
928 dont_reorder: 936 dont_reorder:
929 skb_queue_tail(&local->rx_skb_queue, skb); 937 __skb_queue_tail(frames, skb);
930} 938}
931 939
932static ieee80211_rx_result debug_noinline 940static ieee80211_rx_result debug_noinline
@@ -1452,6 +1460,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1452 } 1460 }
1453 } 1461 }
1454 1462
1463 /* mesh power save support */
1464 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1465 ieee80211_mps_rx_h_sta_process(sta, hdr);
1466
1455 /* 1467 /*
1456 * Drop (qos-)data::nullfunc frames silently, since they 1468 * Drop (qos-)data::nullfunc frames silently, since they
1457 * are used only to control station power saving mode. 1469 * are used only to control station power saving mode.
@@ -2015,7 +2027,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2015 /* frame is in RMC, don't forward */ 2027 /* frame is in RMC, don't forward */
2016 if (ieee80211_is_data(hdr->frame_control) && 2028 if (ieee80211_is_data(hdr->frame_control) &&
2017 is_multicast_ether_addr(hdr->addr1) && 2029 is_multicast_ether_addr(hdr->addr1) &&
2018 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata)) 2030 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2019 return RX_DROP_MONITOR; 2031 return RX_DROP_MONITOR;
2020 2032
2021 if (!ieee80211_is_data(hdr->frame_control) || 2033 if (!ieee80211_is_data(hdr->frame_control) ||
@@ -2042,9 +2054,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2042 } 2054 }
2043 2055
2044 rcu_read_lock(); 2056 rcu_read_lock();
2045 mppath = mpp_path_lookup(proxied_addr, sdata); 2057 mppath = mpp_path_lookup(sdata, proxied_addr);
2046 if (!mppath) { 2058 if (!mppath) {
2047 mpp_path_add(proxied_addr, mpp_addr, sdata); 2059 mpp_path_add(sdata, proxied_addr, mpp_addr);
2048 } else { 2060 } else {
2049 spin_lock_bh(&mppath->state_lock); 2061 spin_lock_bh(&mppath->state_lock);
2050 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2062 if (!ether_addr_equal(mppath->mpp, mpp_addr))
@@ -2090,12 +2102,15 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2090 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2102 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2091 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2103 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2092 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2104 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2093 } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) { 2105 /* update power mode indication when forwarding */
2106 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2107 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2108 /* mesh power mode flags updated in mesh_nexthop_lookup */
2094 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2109 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2095 } else { 2110 } else {
2096 /* unable to resolve next hop */ 2111 /* unable to resolve next hop */
2097 mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3, 2112 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2098 0, reason, fwd_hdr->addr2, sdata); 2113 fwd_hdr->addr3, 0, reason, fwd_hdr->addr2);
2099 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2114 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2100 kfree_skb(fwd_skb); 2115 kfree_skb(fwd_skb);
2101 return RX_DROP_MONITOR; 2116 return RX_DROP_MONITOR;
@@ -2177,7 +2192,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2177} 2192}
2178 2193
2179static ieee80211_rx_result debug_noinline 2194static ieee80211_rx_result debug_noinline
2180ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 2195ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2181{ 2196{
2182 struct sk_buff *skb = rx->skb; 2197 struct sk_buff *skb = rx->skb;
2183 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2198 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
@@ -2216,7 +2231,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2216 spin_lock(&tid_agg_rx->reorder_lock); 2231 spin_lock(&tid_agg_rx->reorder_lock);
2217 /* release stored frames up to start of BAR */ 2232 /* release stored frames up to start of BAR */
2218 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2233 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2219 start_seq_num); 2234 start_seq_num, frames);
2220 spin_unlock(&tid_agg_rx->reorder_lock); 2235 spin_unlock(&tid_agg_rx->reorder_lock);
2221 2236
2222 kfree_skb(skb); 2237 kfree_skb(skb);
@@ -2353,38 +2368,34 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2353 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2368 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2354 break; 2369 break;
2355 2370
2356 /* verify action & smps_control are present */ 2371 /* verify action & smps_control/chanwidth are present */
2357 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2372 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2358 goto invalid; 2373 goto invalid;
2359 2374
2360 switch (mgmt->u.action.u.ht_smps.action) { 2375 switch (mgmt->u.action.u.ht_smps.action) {
2361 case WLAN_HT_ACTION_SMPS: { 2376 case WLAN_HT_ACTION_SMPS: {
2362 struct ieee80211_supported_band *sband; 2377 struct ieee80211_supported_band *sband;
2363 u8 smps; 2378 enum ieee80211_smps_mode smps_mode;
2364 2379
2365 /* convert to HT capability */ 2380 /* convert to HT capability */
2366 switch (mgmt->u.action.u.ht_smps.smps_control) { 2381 switch (mgmt->u.action.u.ht_smps.smps_control) {
2367 case WLAN_HT_SMPS_CONTROL_DISABLED: 2382 case WLAN_HT_SMPS_CONTROL_DISABLED:
2368 smps = WLAN_HT_CAP_SM_PS_DISABLED; 2383 smps_mode = IEEE80211_SMPS_OFF;
2369 break; 2384 break;
2370 case WLAN_HT_SMPS_CONTROL_STATIC: 2385 case WLAN_HT_SMPS_CONTROL_STATIC:
2371 smps = WLAN_HT_CAP_SM_PS_STATIC; 2386 smps_mode = IEEE80211_SMPS_STATIC;
2372 break; 2387 break;
2373 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 2388 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
2374 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 2389 smps_mode = IEEE80211_SMPS_DYNAMIC;
2375 break; 2390 break;
2376 default: 2391 default:
2377 goto invalid; 2392 goto invalid;
2378 } 2393 }
2379 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
2380 2394
2381 /* if no change do nothing */ 2395 /* if no change do nothing */
2382 if ((rx->sta->sta.ht_cap.cap & 2396 if (rx->sta->sta.smps_mode == smps_mode)
2383 IEEE80211_HT_CAP_SM_PS) == smps)
2384 goto handled; 2397 goto handled;
2385 2398 rx->sta->sta.smps_mode = smps_mode;
2386 rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS;
2387 rx->sta->sta.ht_cap.cap |= smps;
2388 2399
2389 sband = rx->local->hw.wiphy->bands[status->band]; 2400 sband = rx->local->hw.wiphy->bands[status->band];
2390 2401
@@ -2392,11 +2403,66 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2392 IEEE80211_RC_SMPS_CHANGED); 2403 IEEE80211_RC_SMPS_CHANGED);
2393 goto handled; 2404 goto handled;
2394 } 2405 }
2406 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
2407 struct ieee80211_supported_band *sband;
2408 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
2409 enum ieee80211_sta_rx_bandwidth new_bw;
2410
2411 /* If it doesn't support 40 MHz it can't change ... */
2412 if (!(rx->sta->sta.ht_cap.cap &
2413 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
2414 goto handled;
2415
2416 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
2417 new_bw = IEEE80211_STA_RX_BW_20;
2418 else
2419 new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
2420
2421 if (rx->sta->sta.bandwidth == new_bw)
2422 goto handled;
2423
2424 sband = rx->local->hw.wiphy->bands[status->band];
2425
2426 rate_control_rate_update(local, sband, rx->sta,
2427 IEEE80211_RC_BW_CHANGED);
2428 goto handled;
2429 }
2395 default: 2430 default:
2396 goto invalid; 2431 goto invalid;
2397 } 2432 }
2398 2433
2399 break; 2434 break;
2435 case WLAN_CATEGORY_VHT:
2436 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2437 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2438 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2439 sdata->vif.type != NL80211_IFTYPE_AP &&
2440 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2441 break;
2442
2443 /* verify action code is present */
2444 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2445 goto invalid;
2446
2447 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
2448 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
2449 u8 opmode;
2450
2451 /* verify opmode is present */
2452 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2453 goto invalid;
2454
2455 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
2456
2457 ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
2458 opmode, status->band,
2459 false);
2460 goto handled;
2461 }
2462 default:
2463 break;
2464 }
2465 break;
2400 case WLAN_CATEGORY_BACK: 2466 case WLAN_CATEGORY_BACK:
2401 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2467 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2402 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2468 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
@@ -2648,8 +2714,9 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2648 return RX_DROP_MONITOR; 2714 return RX_DROP_MONITOR;
2649 break; 2715 break;
2650 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2716 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2651 /* process only for ibss */ 2717 /* process only for ibss and mesh */
2652 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2718 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2719 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
2653 return RX_DROP_MONITOR; 2720 return RX_DROP_MONITOR;
2654 break; 2721 break;
2655 default: 2722 default:
@@ -2772,7 +2839,8 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2772 } 2839 }
2773} 2840}
2774 2841
2775static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) 2842static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2843 struct sk_buff_head *frames)
2776{ 2844{
2777 ieee80211_rx_result res = RX_DROP_MONITOR; 2845 ieee80211_rx_result res = RX_DROP_MONITOR;
2778 struct sk_buff *skb; 2846 struct sk_buff *skb;
@@ -2784,15 +2852,9 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2784 goto rxh_next; \ 2852 goto rxh_next; \
2785 } while (0); 2853 } while (0);
2786 2854
2787 spin_lock(&rx->local->rx_skb_queue.lock); 2855 spin_lock_bh(&rx->local->rx_path_lock);
2788 if (rx->local->running_rx_handler)
2789 goto unlock;
2790
2791 rx->local->running_rx_handler = true;
2792
2793 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2794 spin_unlock(&rx->local->rx_skb_queue.lock);
2795 2856
2857 while ((skb = __skb_dequeue(frames))) {
2796 /* 2858 /*
2797 * all the other fields are valid across frames 2859 * all the other fields are valid across frames
2798 * that belong to an aMPDU since they are on the 2860 * that belong to an aMPDU since they are on the
@@ -2813,7 +2875,12 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2813#endif 2875#endif
2814 CALL_RXH(ieee80211_rx_h_amsdu) 2876 CALL_RXH(ieee80211_rx_h_amsdu)
2815 CALL_RXH(ieee80211_rx_h_data) 2877 CALL_RXH(ieee80211_rx_h_data)
2816 CALL_RXH(ieee80211_rx_h_ctrl); 2878
2879 /* special treatment -- needs the queue */
2880 res = ieee80211_rx_h_ctrl(rx, frames);
2881 if (res != RX_CONTINUE)
2882 goto rxh_next;
2883
2817 CALL_RXH(ieee80211_rx_h_mgmt_check) 2884 CALL_RXH(ieee80211_rx_h_mgmt_check)
2818 CALL_RXH(ieee80211_rx_h_action) 2885 CALL_RXH(ieee80211_rx_h_action)
2819 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 2886 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
@@ -2822,20 +2889,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2822 2889
2823 rxh_next: 2890 rxh_next:
2824 ieee80211_rx_handlers_result(rx, res); 2891 ieee80211_rx_handlers_result(rx, res);
2825 spin_lock(&rx->local->rx_skb_queue.lock); 2892
2826#undef CALL_RXH 2893#undef CALL_RXH
2827 } 2894 }
2828 2895
2829 rx->local->running_rx_handler = false; 2896 spin_unlock_bh(&rx->local->rx_path_lock);
2830
2831 unlock:
2832 spin_unlock(&rx->local->rx_skb_queue.lock);
2833} 2897}
2834 2898
2835static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 2899static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2836{ 2900{
2901 struct sk_buff_head reorder_release;
2837 ieee80211_rx_result res = RX_DROP_MONITOR; 2902 ieee80211_rx_result res = RX_DROP_MONITOR;
2838 2903
2904 __skb_queue_head_init(&reorder_release);
2905
2839#define CALL_RXH(rxh) \ 2906#define CALL_RXH(rxh) \
2840 do { \ 2907 do { \
2841 res = rxh(rx); \ 2908 res = rxh(rx); \
@@ -2845,9 +2912,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2845 2912
2846 CALL_RXH(ieee80211_rx_h_check) 2913 CALL_RXH(ieee80211_rx_h_check)
2847 2914
2848 ieee80211_rx_reorder_ampdu(rx); 2915 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2849 2916
2850 ieee80211_rx_handlers(rx); 2917 ieee80211_rx_handlers(rx, &reorder_release);
2851 return; 2918 return;
2852 2919
2853 rxh_next: 2920 rxh_next:
@@ -2862,6 +2929,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2862 */ 2929 */
2863void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 2930void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2864{ 2931{
2932 struct sk_buff_head frames;
2865 struct ieee80211_rx_data rx = { 2933 struct ieee80211_rx_data rx = {
2866 .sta = sta, 2934 .sta = sta,
2867 .sdata = sta->sdata, 2935 .sdata = sta->sdata,
@@ -2877,11 +2945,13 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2877 if (!tid_agg_rx) 2945 if (!tid_agg_rx)
2878 return; 2946 return;
2879 2947
2948 __skb_queue_head_init(&frames);
2949
2880 spin_lock(&tid_agg_rx->reorder_lock); 2950 spin_lock(&tid_agg_rx->reorder_lock);
2881 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx); 2951 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
2882 spin_unlock(&tid_agg_rx->reorder_lock); 2952 spin_unlock(&tid_agg_rx->reorder_lock);
2883 2953
2884 ieee80211_rx_handlers(&rx); 2954 ieee80211_rx_handlers(&rx, &frames);
2885} 2955}
2886 2956
2887/* main receive path */ 2957/* main receive path */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 8ed83dcc149f..43a45cf00e06 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -27,22 +27,15 @@
27 27
28#define IEEE80211_PROBE_DELAY (HZ / 33) 28#define IEEE80211_PROBE_DELAY (HZ / 33)
29#define IEEE80211_CHANNEL_TIME (HZ / 33) 29#define IEEE80211_CHANNEL_TIME (HZ / 33)
30#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8) 30#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 9)
31
32static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
33{
34 struct ieee80211_bss *bss = (void *)cbss->priv;
35
36 kfree(bss_mesh_id(bss));
37 kfree(bss_mesh_cfg(bss));
38}
39 31
40void ieee80211_rx_bss_put(struct ieee80211_local *local, 32void ieee80211_rx_bss_put(struct ieee80211_local *local,
41 struct ieee80211_bss *bss) 33 struct ieee80211_bss *bss)
42{ 34{
43 if (!bss) 35 if (!bss)
44 return; 36 return;
45 cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv)); 37 cfg80211_put_bss(local->hw.wiphy,
38 container_of((void *)bss, struct cfg80211_bss, priv));
46} 39}
47 40
48static bool is_uapsd_supported(struct ieee802_11_elems *elems) 41static bool is_uapsd_supported(struct ieee802_11_elems *elems)
@@ -65,12 +58,11 @@ static bool is_uapsd_supported(struct ieee802_11_elems *elems)
65struct ieee80211_bss * 58struct ieee80211_bss *
66ieee80211_bss_info_update(struct ieee80211_local *local, 59ieee80211_bss_info_update(struct ieee80211_local *local,
67 struct ieee80211_rx_status *rx_status, 60 struct ieee80211_rx_status *rx_status,
68 struct ieee80211_mgmt *mgmt, 61 struct ieee80211_mgmt *mgmt, size_t len,
69 size_t len,
70 struct ieee802_11_elems *elems, 62 struct ieee802_11_elems *elems,
71 struct ieee80211_channel *channel, 63 struct ieee80211_channel *channel)
72 bool beacon)
73{ 64{
65 bool beacon = ieee80211_is_beacon(mgmt->frame_control);
74 struct cfg80211_bss *cbss; 66 struct cfg80211_bss *cbss;
75 struct ieee80211_bss *bss; 67 struct ieee80211_bss *bss;
76 int clen, srlen; 68 int clen, srlen;
@@ -86,10 +78,12 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
86 if (!cbss) 78 if (!cbss)
87 return NULL; 79 return NULL;
88 80
89 cbss->free_priv = ieee80211_rx_bss_free;
90 bss = (void *)cbss->priv; 81 bss = (void *)cbss->priv;
91 82
92 bss->device_ts = rx_status->device_timestamp; 83 if (beacon)
84 bss->device_ts_beacon = rx_status->device_timestamp;
85 else
86 bss->device_ts_presp = rx_status->device_timestamp;
93 87
94 if (elems->parse_error) { 88 if (elems->parse_error) {
95 if (beacon) 89 if (beacon)
@@ -113,18 +107,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
113 bss->valid_data |= IEEE80211_BSS_VALID_ERP; 107 bss->valid_data |= IEEE80211_BSS_VALID_ERP;
114 } 108 }
115 109
116 if (elems->tim && (!elems->parse_error ||
117 !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
118 struct ieee80211_tim_ie *tim_ie = elems->tim;
119 bss->dtim_period = tim_ie->dtim_period;
120 if (!elems->parse_error)
121 bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
122 }
123
124 /* If the beacon had no TIM IE, or it was invalid, use 1 */
125 if (beacon && !bss->dtim_period)
126 bss->dtim_period = 1;
127
128 /* replace old supported rates if we get new values */ 110 /* replace old supported rates if we get new values */
129 if (!elems->parse_error || 111 if (!elems->parse_error ||
130 !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) { 112 !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
@@ -159,9 +141,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
159 bss->valid_data |= IEEE80211_BSS_VALID_WMM; 141 bss->valid_data |= IEEE80211_BSS_VALID_WMM;
160 } 142 }
161 143
162 if (!beacon)
163 bss->last_probe_resp = jiffies;
164
165 return bss; 144 return bss;
166} 145}
167 146
@@ -215,7 +194,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
215 194
216 bss = ieee80211_bss_info_update(local, rx_status, 195 bss = ieee80211_bss_info_update(local, rx_status,
217 mgmt, skb->len, &elems, 196 mgmt, skb->len, &elems,
218 channel, beacon); 197 channel);
219 if (bss) 198 if (bss)
220 ieee80211_rx_bss_put(local, bss); 199 ieee80211_rx_bss_put(local, bss);
221} 200}
@@ -304,7 +283,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
304 if (!was_hw_scan) { 283 if (!was_hw_scan) {
305 ieee80211_configure_filter(local); 284 ieee80211_configure_filter(local);
306 drv_sw_scan_complete(local); 285 drv_sw_scan_complete(local);
307 ieee80211_offchannel_return(local, true); 286 ieee80211_offchannel_return(local);
308 } 287 }
309 288
310 ieee80211_recalc_idle(local); 289 ieee80211_recalc_idle(local);
@@ -353,7 +332,10 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
353 local->next_scan_state = SCAN_DECISION; 332 local->next_scan_state = SCAN_DECISION;
354 local->scan_channel_idx = 0; 333 local->scan_channel_idx = 0;
355 334
356 ieee80211_offchannel_stop_vifs(local, true); 335 ieee80211_offchannel_stop_vifs(local);
336
337 /* ensure nullfunc is transmitted before leaving operating channel */
338 drv_flush(local, false);
357 339
358 ieee80211_configure_filter(local); 340 ieee80211_configure_filter(local);
359 341
@@ -369,6 +351,9 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
369static bool ieee80211_can_scan(struct ieee80211_local *local, 351static bool ieee80211_can_scan(struct ieee80211_local *local,
370 struct ieee80211_sub_if_data *sdata) 352 struct ieee80211_sub_if_data *sdata)
371{ 353{
354 if (local->radar_detect_enabled)
355 return false;
356
372 if (!list_empty(&local->roc_list)) 357 if (!list_empty(&local->roc_list))
373 return false; 358 return false;
374 359
@@ -403,6 +388,11 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
403 int i; 388 int i;
404 struct ieee80211_sub_if_data *sdata; 389 struct ieee80211_sub_if_data *sdata;
405 enum ieee80211_band band = local->hw.conf.channel->band; 390 enum ieee80211_band band = local->hw.conf.channel->band;
391 u32 tx_flags;
392
393 tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
394 if (local->scan_req->no_cck)
395 tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
406 396
407 sdata = rcu_dereference_protected(local->scan_sdata, 397 sdata = rcu_dereference_protected(local->scan_sdata,
408 lockdep_is_held(&local->mtx)); 398 lockdep_is_held(&local->mtx));
@@ -414,8 +404,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
414 local->scan_req->ssids[i].ssid_len, 404 local->scan_req->ssids[i].ssid_len,
415 local->scan_req->ie, local->scan_req->ie_len, 405 local->scan_req->ie, local->scan_req->ie_len,
416 local->scan_req->rates[band], false, 406 local->scan_req->rates[band], false,
417 local->scan_req->no_cck, 407 tx_flags, local->hw.conf.channel, true);
418 local->hw.conf.channel, true);
419 408
420 /* 409 /*
421 * After sending probe requests, wait for probe responses 410 * After sending probe requests, wait for probe responses
@@ -559,8 +548,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
559 bool associated = false; 548 bool associated = false;
560 bool tx_empty = true; 549 bool tx_empty = true;
561 bool bad_latency; 550 bool bad_latency;
562 bool listen_int_exceeded;
563 unsigned long min_beacon_int = 0;
564 struct ieee80211_sub_if_data *sdata; 551 struct ieee80211_sub_if_data *sdata;
565 struct ieee80211_channel *next_chan; 552 struct ieee80211_channel *next_chan;
566 enum mac80211_scan_state next_scan_state; 553 enum mac80211_scan_state next_scan_state;
@@ -579,11 +566,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
579 if (sdata->u.mgd.associated) { 566 if (sdata->u.mgd.associated) {
580 associated = true; 567 associated = true;
581 568
582 if (sdata->vif.bss_conf.beacon_int <
583 min_beacon_int || min_beacon_int == 0)
584 min_beacon_int =
585 sdata->vif.bss_conf.beacon_int;
586
587 if (!qdisc_all_tx_empty(sdata->dev)) { 569 if (!qdisc_all_tx_empty(sdata->dev)) {
588 tx_empty = false; 570 tx_empty = false;
589 break; 571 break;
@@ -600,34 +582,19 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
600 * see if we can scan another channel without interfering 582 * see if we can scan another channel without interfering
601 * with the current traffic situation. 583 * with the current traffic situation.
602 * 584 *
603 * Since we don't know if the AP has pending frames for us 585 * Keep good latency, do not stay off-channel more than 125 ms.
604 * we can only check for our tx queues and use the current
605 * pm_qos requirements for rx. Hence, if no tx traffic occurs
606 * at all we will scan as many channels in a row as the pm_qos
607 * latency allows us to. Additionally we also check for the
608 * currently negotiated listen interval to prevent losing
609 * frames unnecessarily.
610 *
611 * Otherwise switch back to the operating channel.
612 */ 586 */
613 587
614 bad_latency = time_after(jiffies + 588 bad_latency = time_after(jiffies +
615 ieee80211_scan_get_channel_time(next_chan), 589 ieee80211_scan_get_channel_time(next_chan),
616 local->leave_oper_channel_time + 590 local->leave_oper_channel_time + HZ / 8);
617 usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
618
619 listen_int_exceeded = time_after(jiffies +
620 ieee80211_scan_get_channel_time(next_chan),
621 local->leave_oper_channel_time +
622 usecs_to_jiffies(min_beacon_int * 1024) *
623 local->hw.conf.listen_interval);
624 591
625 if (associated && !tx_empty) { 592 if (associated && !tx_empty) {
626 if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) 593 if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
627 next_scan_state = SCAN_ABORT; 594 next_scan_state = SCAN_ABORT;
628 else 595 else
629 next_scan_state = SCAN_SUSPEND; 596 next_scan_state = SCAN_SUSPEND;
630 } else if (associated && (bad_latency || listen_int_exceeded)) { 597 } else if (associated && bad_latency) {
631 next_scan_state = SCAN_SUSPEND; 598 next_scan_state = SCAN_SUSPEND;
632 } else { 599 } else {
633 next_scan_state = SCAN_SET_CHANNEL; 600 next_scan_state = SCAN_SET_CHANNEL;
@@ -690,12 +657,8 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
690 local->scan_channel = NULL; 657 local->scan_channel = NULL;
691 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 658 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
692 659
693 /* 660 /* disable PS */
694 * Re-enable vifs and beaconing. Leave PS 661 ieee80211_offchannel_return(local);
695 * in off-channel state..will put that back
696 * on-channel at the end of scanning.
697 */
698 ieee80211_offchannel_return(local, false);
699 662
700 *next_delay = HZ / 5; 663 *next_delay = HZ / 5;
701 /* afterwards, resume scan & go to next channel */ 664 /* afterwards, resume scan & go to next channel */
@@ -705,8 +668,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
705static void ieee80211_scan_state_resume(struct ieee80211_local *local, 668static void ieee80211_scan_state_resume(struct ieee80211_local *local,
706 unsigned long *next_delay) 669 unsigned long *next_delay)
707{ 670{
708 /* PS already is in off-channel mode */ 671 ieee80211_offchannel_stop_vifs(local);
709 ieee80211_offchannel_stop_vifs(local, false);
710 672
711 if (local->ops->flush) { 673 if (local->ops->flush) {
712 drv_flush(local, false); 674 drv_flush(local, false);
@@ -832,9 +794,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
832 return res; 794 return res;
833} 795}
834 796
835int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, 797int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
836 const u8 *ssid, u8 ssid_len, 798 const u8 *ssid, u8 ssid_len,
837 struct ieee80211_channel *chan) 799 struct ieee80211_channel *chan)
838{ 800{
839 struct ieee80211_local *local = sdata->local; 801 struct ieee80211_local *local = sdata->local;
840 int ret = -EBUSY; 802 int ret = -EBUSY;
@@ -848,22 +810,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
848 810
849 /* fill internal scan request */ 811 /* fill internal scan request */
850 if (!chan) { 812 if (!chan) {
851 int i, nchan = 0; 813 int i, max_n;
814 int n_ch = 0;
852 815
853 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 816 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
854 if (!local->hw.wiphy->bands[band]) 817 if (!local->hw.wiphy->bands[band])
855 continue; 818 continue;
856 for (i = 0; 819
857 i < local->hw.wiphy->bands[band]->n_channels; 820 max_n = local->hw.wiphy->bands[band]->n_channels;
858 i++) { 821 for (i = 0; i < max_n; i++) {
859 local->int_scan_req->channels[nchan] = 822 struct ieee80211_channel *tmp_ch =
860 &local->hw.wiphy->bands[band]->channels[i]; 823 &local->hw.wiphy->bands[band]->channels[i];
861 nchan++; 824
825 if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS |
826 IEEE80211_CHAN_DISABLED))
827 continue;
828
829 local->int_scan_req->channels[n_ch] = tmp_ch;
830 n_ch++;
862 } 831 }
863 } 832 }
864 833
865 local->int_scan_req->n_channels = nchan; 834 if (WARN_ON_ONCE(n_ch == 0))
835 goto unlock;
836
837 local->int_scan_req->n_channels = n_ch;
866 } else { 838 } else {
839 if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS |
840 IEEE80211_CHAN_DISABLED)))
841 goto unlock;
842
867 local->int_scan_req->channels[0] = chan; 843 local->int_scan_req->channels[0] = chan;
868 local->int_scan_req->n_channels = 1; 844 local->int_scan_req->n_channels = 1;
869 } 845 }
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f3e502502fee..a79ce820cb50 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -91,9 +91,8 @@ static int sta_info_hash_del(struct ieee80211_local *local,
91 return -ENOENT; 91 return -ENOENT;
92} 92}
93 93
94static void free_sta_work(struct work_struct *wk) 94static void cleanup_single_sta(struct sta_info *sta)
95{ 95{
96 struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
97 int ac, i; 96 int ac, i;
98 struct tid_ampdu_tx *tid_tx; 97 struct tid_ampdu_tx *tid_tx;
99 struct ieee80211_sub_if_data *sdata = sta->sdata; 98 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -105,12 +104,24 @@ static void free_sta_work(struct work_struct *wk)
105 * neither mac80211 nor the driver can reference this 104 * neither mac80211 nor the driver can reference this
106 * sta struct any more except by still existing timers 105 * sta struct any more except by still existing timers
107 * associated with this station that we clean up below. 106 * associated with this station that we clean up below.
107 *
108 * Note though that this still uses the sdata and even
109 * calls the driver in AP and mesh mode, so interfaces
110 * of those types mush use call sta_info_flush_cleanup()
111 * (typically via sta_info_flush()) before deconfiguring
112 * the driver.
113 *
114 * In station mode, nothing happens here so it doesn't
115 * have to (and doesn't) do that, this is intentional to
116 * speed up roaming.
108 */ 117 */
109 118
110 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 119 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
111 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 120 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
112 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 121 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
113 ps = &sdata->bss->ps; 122 ps = &sdata->bss->ps;
123 else if (ieee80211_vif_is_mesh(&sdata->vif))
124 ps = &sdata->u.mesh.ps;
114 else 125 else
115 return; 126 return;
116 127
@@ -126,13 +137,8 @@ static void free_sta_work(struct work_struct *wk)
126 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 137 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
127 } 138 }
128 139
129#ifdef CONFIG_MAC80211_MESH 140 if (ieee80211_vif_is_mesh(&sdata->vif))
130 if (ieee80211_vif_is_mesh(&sdata->vif)) { 141 mesh_sta_cleanup(sta);
131 mesh_accept_plinks_update(sdata);
132 mesh_plink_deactivate(sta);
133 del_timer_sync(&sta->plink_timer);
134 }
135#endif
136 142
137 cancel_work_sync(&sta->drv_unblock_wk); 143 cancel_work_sync(&sta->drv_unblock_wk);
138 144
@@ -153,11 +159,35 @@ static void free_sta_work(struct work_struct *wk)
153 sta_info_free(local, sta); 159 sta_info_free(local, sta);
154} 160}
155 161
162void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata)
163{
164 struct sta_info *sta;
165
166 spin_lock_bh(&sdata->cleanup_stations_lock);
167 while (!list_empty(&sdata->cleanup_stations)) {
168 sta = list_first_entry(&sdata->cleanup_stations,
169 struct sta_info, list);
170 list_del(&sta->list);
171 spin_unlock_bh(&sdata->cleanup_stations_lock);
172
173 cleanup_single_sta(sta);
174
175 spin_lock_bh(&sdata->cleanup_stations_lock);
176 }
177
178 spin_unlock_bh(&sdata->cleanup_stations_lock);
179}
180
156static void free_sta_rcu(struct rcu_head *h) 181static void free_sta_rcu(struct rcu_head *h)
157{ 182{
158 struct sta_info *sta = container_of(h, struct sta_info, rcu_head); 183 struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
184 struct ieee80211_sub_if_data *sdata = sta->sdata;
185
186 spin_lock(&sdata->cleanup_stations_lock);
187 list_add_tail(&sta->list, &sdata->cleanup_stations);
188 spin_unlock(&sdata->cleanup_stations_lock);
159 189
160 ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk); 190 ieee80211_queue_work(&sdata->local->hw, &sdata->cleanup_stations_wk);
161} 191}
162 192
163/* protected by RCU */ 193/* protected by RCU */
@@ -310,7 +340,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
310 340
311 spin_lock_init(&sta->lock); 341 spin_lock_init(&sta->lock);
312 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 342 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
313 INIT_WORK(&sta->free_sta_wk, free_sta_work);
314 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 343 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
315 mutex_init(&sta->ampdu_mlme.mtx); 344 mutex_init(&sta->ampdu_mlme.mtx);
316 345
@@ -346,12 +375,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
346 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 375 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
347 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 376 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
348 377
349 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 378 sta->sta.smps_mode = IEEE80211_SMPS_OFF;
350 379
351#ifdef CONFIG_MAC80211_MESH 380 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
352 sta->plink_state = NL80211_PLINK_LISTEN;
353 init_timer(&sta->plink_timer);
354#endif
355 381
356 return sta; 382 return sta;
357} 383}
@@ -547,7 +573,6 @@ void sta_info_recalc_tim(struct sta_info *sta)
547{ 573{
548 struct ieee80211_local *local = sta->local; 574 struct ieee80211_local *local = sta->local;
549 struct ps_data *ps; 575 struct ps_data *ps;
550 unsigned long flags;
551 bool indicate_tim = false; 576 bool indicate_tim = false;
552 u8 ignore_for_tim = sta->sta.uapsd_queues; 577 u8 ignore_for_tim = sta->sta.uapsd_queues;
553 int ac; 578 int ac;
@@ -560,6 +585,12 @@ void sta_info_recalc_tim(struct sta_info *sta)
560 585
561 ps = &sta->sdata->bss->ps; 586 ps = &sta->sdata->bss->ps;
562 id = sta->sta.aid; 587 id = sta->sta.aid;
588#ifdef CONFIG_MAC80211_MESH
589 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
590 ps = &sta->sdata->u.mesh.ps;
591 /* TIM map only for PLID <= IEEE80211_MAX_AID */
592 id = le16_to_cpu(sta->plid) % IEEE80211_MAX_AID;
593#endif
563 } else { 594 } else {
564 return; 595 return;
565 } 596 }
@@ -598,7 +629,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
598 } 629 }
599 630
600 done: 631 done:
601 spin_lock_irqsave(&local->tim_lock, flags); 632 spin_lock_bh(&local->tim_lock);
602 633
603 if (indicate_tim) 634 if (indicate_tim)
604 __bss_tim_set(ps->tim, id); 635 __bss_tim_set(ps->tim, id);
@@ -611,7 +642,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
611 local->tim_in_locked_section = false; 642 local->tim_in_locked_section = false;
612 } 643 }
613 644
614 spin_unlock_irqrestore(&local->tim_lock, flags); 645 spin_unlock_bh(&local->tim_lock);
615} 646}
616 647
617static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 648static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
@@ -718,8 +749,9 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
718 bool have_buffered = false; 749 bool have_buffered = false;
719 int ac; 750 int ac;
720 751
721 /* This is only necessary for stations on BSS interfaces */ 752 /* This is only necessary for stations on BSS/MBSS interfaces */
722 if (!sta->sdata->bss) 753 if (!sta->sdata->bss &&
754 !ieee80211_vif_is_mesh(&sta->sdata->vif))
723 return false; 755 return false;
724 756
725 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 757 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -752,7 +784,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
752 * will be sufficient. 784 * will be sufficient.
753 */ 785 */
754 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 786 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
755 ieee80211_sta_tear_down_BA_sessions(sta, false); 787 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
756 788
757 ret = sta_info_hash_del(local, sta); 789 ret = sta_info_hash_del(local, sta);
758 if (ret) 790 if (ret)
@@ -862,21 +894,13 @@ void sta_info_init(struct ieee80211_local *local)
862 894
863void sta_info_stop(struct ieee80211_local *local) 895void sta_info_stop(struct ieee80211_local *local)
864{ 896{
865 del_timer(&local->sta_cleanup); 897 del_timer_sync(&local->sta_cleanup);
866 sta_info_flush(local, NULL);
867} 898}
868 899
869/** 900
870 * sta_info_flush - flush matching STA entries from the STA table 901int sta_info_flush_defer(struct ieee80211_sub_if_data *sdata)
871 *
872 * Returns the number of removed STA entries.
873 *
874 * @local: local interface data
875 * @sdata: matching rule for the net device (sta->dev) or %NULL to match all STAs
876 */
877int sta_info_flush(struct ieee80211_local *local,
878 struct ieee80211_sub_if_data *sdata)
879{ 902{
903 struct ieee80211_local *local = sdata->local;
880 struct sta_info *sta, *tmp; 904 struct sta_info *sta, *tmp;
881 int ret = 0; 905 int ret = 0;
882 906
@@ -884,7 +908,7 @@ int sta_info_flush(struct ieee80211_local *local,
884 908
885 mutex_lock(&local->sta_mtx); 909 mutex_lock(&local->sta_mtx);
886 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 910 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
887 if (!sdata || sdata == sta->sdata) { 911 if (sdata == sta->sdata) {
888 WARN_ON(__sta_info_destroy(sta)); 912 WARN_ON(__sta_info_destroy(sta));
889 ret++; 913 ret++;
890 } 914 }
@@ -894,6 +918,12 @@ int sta_info_flush(struct ieee80211_local *local,
894 return ret; 918 return ret;
895} 919}
896 920
921void sta_info_flush_cleanup(struct ieee80211_sub_if_data *sdata)
922{
923 ieee80211_cleanup_sdata_stas(sdata);
924 cancel_work_sync(&sdata->cleanup_stations_wk);
925}
926
897void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 927void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
898 unsigned long exp_time) 928 unsigned long exp_time)
899{ 929{
@@ -909,6 +939,11 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
909 if (time_after(jiffies, sta->last_rx + exp_time)) { 939 if (time_after(jiffies, sta->last_rx + exp_time)) {
910 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 940 sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
911 sta->sta.addr); 941 sta->sta.addr);
942
943 if (ieee80211_vif_is_mesh(&sdata->vif) &&
944 test_sta_flag(sta, WLAN_STA_PS_STA))
945 atomic_dec(&sdata->u.mesh.ps.num_sta_ps);
946
912 WARN_ON(__sta_info_destroy(sta)); 947 WARN_ON(__sta_info_destroy(sta));
913 } 948 }
914 } 949 }
@@ -967,6 +1002,8 @@ static void clear_sta_ps_flags(void *_sta)
967 if (sdata->vif.type == NL80211_IFTYPE_AP || 1002 if (sdata->vif.type == NL80211_IFTYPE_AP ||
968 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1003 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
969 ps = &sdata->bss->ps; 1004 ps = &sdata->bss->ps;
1005 else if (ieee80211_vif_is_mesh(&sdata->vif))
1006 ps = &sdata->u.mesh.ps;
970 else 1007 else
971 return; 1008 return;
972 1009
@@ -1084,6 +1121,8 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1084 1121
1085 drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false); 1122 drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
1086 1123
1124 skb->dev = sdata->dev;
1125
1087 rcu_read_lock(); 1126 rcu_read_lock();
1088 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 1127 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1089 if (WARN_ON(!chanctx_conf)) { 1128 if (WARN_ON(!chanctx_conf)) {
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 1489bca9ea97..4947341a2a82 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -56,6 +56,8 @@
56 * @WLAN_STA_INSERTED: This station is inserted into the hash table. 56 * @WLAN_STA_INSERTED: This station is inserted into the hash table.
57 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. 57 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
58 * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid. 58 * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid.
59 * @WLAN_STA_MPSP_OWNER: local STA is owner of a mesh Peer Service Period.
60 * @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP.
59 */ 61 */
60enum ieee80211_sta_info_flags { 62enum ieee80211_sta_info_flags {
61 WLAN_STA_AUTH, 63 WLAN_STA_AUTH,
@@ -78,6 +80,8 @@ enum ieee80211_sta_info_flags {
78 WLAN_STA_INSERTED, 80 WLAN_STA_INSERTED,
79 WLAN_STA_RATE_CONTROL, 81 WLAN_STA_RATE_CONTROL,
80 WLAN_STA_TOFFSET_KNOWN, 82 WLAN_STA_TOFFSET_KNOWN,
83 WLAN_STA_MPSP_OWNER,
84 WLAN_STA_MPSP_RECIPIENT,
81}; 85};
82 86
83#define ADDBA_RESP_INTERVAL HZ 87#define ADDBA_RESP_INTERVAL HZ
@@ -92,6 +96,13 @@ enum ieee80211_sta_info_flags {
92#define HT_AGG_STATE_WANT_START 4 96#define HT_AGG_STATE_WANT_START 4
93#define HT_AGG_STATE_WANT_STOP 5 97#define HT_AGG_STATE_WANT_STOP 5
94 98
99enum ieee80211_agg_stop_reason {
100 AGG_STOP_DECLINED,
101 AGG_STOP_LOCAL_REQUEST,
102 AGG_STOP_PEER_REQUEST,
103 AGG_STOP_DESTROY_STA,
104};
105
95/** 106/**
96 * struct tid_ampdu_tx - TID aggregation information (Tx). 107 * struct tid_ampdu_tx - TID aggregation information (Tx).
97 * 108 *
@@ -274,7 +285,9 @@ struct sta_ampdu_mlme {
274 * @t_offset: timing offset relative to this host 285 * @t_offset: timing offset relative to this host
275 * @t_offset_setpoint: reference timing offset of this sta to be used when 286 * @t_offset_setpoint: reference timing offset of this sta to be used when
276 * calculating clockdrift 287 * calculating clockdrift
277 * @ch_width: peer's channel width 288 * @local_pm: local link-specific power save mode
289 * @peer_pm: peer-specific power save mode towards local STA
290 * @nonpeer_pm: STA power save mode towards non-peer neighbors
278 * @debugfs: debug filesystem info 291 * @debugfs: debug filesystem info
279 * @dead: set to true when sta is unlinked 292 * @dead: set to true when sta is unlinked
280 * @uploaded: set to true when sta is uploaded to the driver 293 * @uploaded: set to true when sta is uploaded to the driver
@@ -282,8 +295,9 @@ struct sta_ampdu_mlme {
282 * @sta: station information we share with the driver 295 * @sta: station information we share with the driver
283 * @sta_state: duplicates information about station state (for debug) 296 * @sta_state: duplicates information about station state (for debug)
284 * @beacon_loss_count: number of times beacon loss has triggered 297 * @beacon_loss_count: number of times beacon loss has triggered
285 * @supports_40mhz: tracks whether the station advertised 40 MHz support 298 * @rcu_head: RCU head used for freeing this station struct
286 * as we overwrite its HT parameters with the currently used value 299 * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
300 * taken from HT/VHT capabilities or VHT operating mode notification
287 */ 301 */
288struct sta_info { 302struct sta_info {
289 /* General information, mostly static */ 303 /* General information, mostly static */
@@ -299,7 +313,6 @@ struct sta_info {
299 spinlock_t lock; 313 spinlock_t lock;
300 314
301 struct work_struct drv_unblock_wk; 315 struct work_struct drv_unblock_wk;
302 struct work_struct free_sta_wk;
303 316
304 u16 listen_interval; 317 u16 listen_interval;
305 318
@@ -372,7 +385,10 @@ struct sta_info {
372 struct timer_list plink_timer; 385 struct timer_list plink_timer;
373 s64 t_offset; 386 s64 t_offset;
374 s64 t_offset_setpoint; 387 s64 t_offset_setpoint;
375 enum nl80211_chan_width ch_width; 388 /* mesh power save */
389 enum nl80211_mesh_power_mode local_pm;
390 enum nl80211_mesh_power_mode peer_pm;
391 enum nl80211_mesh_power_mode nonpeer_pm;
376#endif 392#endif
377 393
378#ifdef CONFIG_MAC80211_DEBUGFS 394#ifdef CONFIG_MAC80211_DEBUGFS
@@ -382,11 +398,11 @@ struct sta_info {
382 } debugfs; 398 } debugfs;
383#endif 399#endif
384 400
401 enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
402
385 unsigned int lost_packets; 403 unsigned int lost_packets;
386 unsigned int beacon_loss_count; 404 unsigned int beacon_loss_count;
387 405
388 bool supports_40mhz;
389
390 /* keep last! */ 406 /* keep last! */
391 struct ieee80211_sta sta; 407 struct ieee80211_sta sta;
392}; 408};
@@ -549,8 +565,39 @@ void sta_info_recalc_tim(struct sta_info *sta);
549 565
550void sta_info_init(struct ieee80211_local *local); 566void sta_info_init(struct ieee80211_local *local);
551void sta_info_stop(struct ieee80211_local *local); 567void sta_info_stop(struct ieee80211_local *local);
552int sta_info_flush(struct ieee80211_local *local, 568int sta_info_flush_defer(struct ieee80211_sub_if_data *sdata);
553 struct ieee80211_sub_if_data *sdata); 569
570/**
571 * sta_info_flush_cleanup - flush the sta_info cleanup queue
572 * @sdata: the interface
573 *
574 * Flushes the sta_info cleanup queue for a given interface;
575 * this is necessary before the interface is removed or, for
576 * AP/mesh interfaces, before it is deconfigured.
577 *
578 * Note an rcu_barrier() must precede the function, after all
579 * stations have been flushed/removed to ensure the call_rcu()
580 * calls that add stations to the cleanup queue have completed.
581 */
582void sta_info_flush_cleanup(struct ieee80211_sub_if_data *sdata);
583
584/**
585 * sta_info_flush - flush matching STA entries from the STA table
586 *
587 * Returns the number of removed STA entries.
588 *
589 * @sdata: sdata to remove all stations from
590 */
591static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata)
592{
593 int ret = sta_info_flush_defer(sdata);
594
595 rcu_barrier();
596 sta_info_flush_cleanup(sdata);
597
598 return ret;
599}
600
554void sta_set_rate_info_tx(struct sta_info *sta, 601void sta_set_rate_info_tx(struct sta_info *sta,
555 const struct ieee80211_tx_rate *rate, 602 const struct ieee80211_tx_rate *rate,
556 struct rate_info *rinfo); 603 struct rate_info *rinfo);
@@ -563,4 +610,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
563void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); 610void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
564void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); 611void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
565 612
613void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata);
614
566#endif /* STA_INFO_H */ 615#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 07d99578a2b1..43439203f4e4 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -335,7 +335,8 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
335 if (dropped) 335 if (dropped)
336 acked = false; 336 acked = false;
337 337
338 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 338 if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
339 IEEE80211_TX_INTFL_MLME_CONN_TX)) {
339 struct ieee80211_sub_if_data *sdata = NULL; 340 struct ieee80211_sub_if_data *sdata = NULL;
340 struct ieee80211_sub_if_data *iter_sdata; 341 struct ieee80211_sub_if_data *iter_sdata;
341 u64 cookie = (unsigned long)skb; 342 u64 cookie = (unsigned long)skb;
@@ -357,10 +358,13 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
357 sdata = rcu_dereference(local->p2p_sdata); 358 sdata = rcu_dereference(local->p2p_sdata);
358 } 359 }
359 360
360 if (!sdata) 361 if (!sdata) {
361 skb->dev = NULL; 362 skb->dev = NULL;
362 else if (ieee80211_is_nullfunc(hdr->frame_control) || 363 } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
363 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 364 ieee80211_mgd_conn_tx_status(sdata, hdr->frame_control,
365 acked);
366 } else if (ieee80211_is_nullfunc(hdr->frame_control) ||
367 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
364 cfg80211_probe_status(sdata->dev, hdr->addr1, 368 cfg80211_probe_status(sdata->dev, hdr->addr1,
365 cookie, acked, GFP_ATOMIC); 369 cookie, acked, GFP_ATOMIC);
366 } else { 370 } else {
@@ -468,6 +472,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
468 return; 472 return;
469 } 473 }
470 474
475 /* mesh Peer Service Period support */
476 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
477 ieee80211_is_data_qos(fc))
478 ieee80211_mpsp_trigger_process(
479 ieee80211_get_qos_ctl(hdr),
480 sta, true, acked);
481
471 if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) && 482 if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
472 (rates_idx != -1)) 483 (rates_idx != -1))
473 sta->last_tx_rate = info->status.rates[rates_idx]; 484 sta->last_tx_rate = info->status.rates[rates_idx];
@@ -502,11 +513,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
502 IEEE80211_BAR_CTRL_TID_INFO_MASK) >> 513 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
503 IEEE80211_BAR_CTRL_TID_INFO_SHIFT; 514 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
504 515
505 if (local->hw.flags & 516 ieee80211_set_bar_pending(sta, tid, ssn);
506 IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
507 ieee80211_stop_tx_ba_session(&sta->sta, tid);
508 else
509 ieee80211_set_bar_pending(sta, tid, ssn);
510 } 517 }
511 } 518 }
512 519
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 57e14d59e12f..3ed801d90f1e 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -177,12 +177,11 @@ void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
177 struct ieee80211_key *key = (struct ieee80211_key *) 177 struct ieee80211_key *key = (struct ieee80211_key *)
178 container_of(keyconf, struct ieee80211_key, conf); 178 container_of(keyconf, struct ieee80211_key, conf);
179 struct tkip_ctx *ctx = &key->u.tkip.tx; 179 struct tkip_ctx *ctx = &key->u.tkip.tx;
180 unsigned long flags;
181 180
182 spin_lock_irqsave(&key->u.tkip.txlock, flags); 181 spin_lock_bh(&key->u.tkip.txlock);
183 ieee80211_compute_tkip_p1k(key, iv32); 182 ieee80211_compute_tkip_p1k(key, iv32);
184 memcpy(p1k, ctx->p1k, sizeof(ctx->p1k)); 183 memcpy(p1k, ctx->p1k, sizeof(ctx->p1k));
185 spin_unlock_irqrestore(&key->u.tkip.txlock, flags); 184 spin_unlock_bh(&key->u.tkip.txlock);
186} 185}
187EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv); 186EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
188 187
@@ -208,12 +207,11 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
208 const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); 207 const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
209 u32 iv32 = get_unaligned_le32(&data[4]); 208 u32 iv32 = get_unaligned_le32(&data[4]);
210 u16 iv16 = data[2] | (data[0] << 8); 209 u16 iv16 = data[2] | (data[0] << 8);
211 unsigned long flags;
212 210
213 spin_lock_irqsave(&key->u.tkip.txlock, flags); 211 spin_lock_bh(&key->u.tkip.txlock);
214 ieee80211_compute_tkip_p1k(key, iv32); 212 ieee80211_compute_tkip_p1k(key, iv32);
215 tkip_mixing_phase2(tk, ctx, iv16, p2k); 213 tkip_mixing_phase2(tk, ctx, iv16, p2k);
216 spin_unlock_irqrestore(&key->u.tkip.txlock, flags); 214 spin_unlock_bh(&key->u.tkip.txlock);
217} 215}
218EXPORT_SYMBOL(ieee80211_get_tkip_p2k); 216EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
219 217
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index a8270b441a6f..3d7cd2a0582f 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -28,21 +28,27 @@
28#define VIF_PR_FMT " vif:%s(%d%s)" 28#define VIF_PR_FMT " vif:%s(%d%s)"
29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" 29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
30 30
31#define CHANCTX_ENTRY __field(u32, control_freq) \ 31#define CHANDEF_ENTRY __field(u32, control_freq) \
32 __field(u32, chan_width) \ 32 __field(u32, chan_width) \
33 __field(u32, center_freq1) \ 33 __field(u32, center_freq1) \
34 __field(u32, center_freq2) \ 34 __field(u32, center_freq2)
35#define CHANDEF_ASSIGN(c) \
36 __entry->control_freq = (c)->chan->center_freq; \
37 __entry->chan_width = (c)->width; \
38 __entry->center_freq1 = (c)->center_freq1; \
39 __entry->center_freq2 = (c)->center_freq2;
40#define CHANDEF_PR_FMT " control:%d MHz width:%d center: %d/%d MHz"
41#define CHANDEF_PR_ARG __entry->control_freq, __entry->chan_width, \
42 __entry->center_freq1, __entry->center_freq2
43
44#define CHANCTX_ENTRY CHANDEF_ENTRY \
35 __field(u8, rx_chains_static) \ 45 __field(u8, rx_chains_static) \
36 __field(u8, rx_chains_dynamic) 46 __field(u8, rx_chains_dynamic)
37#define CHANCTX_ASSIGN __entry->control_freq = ctx->conf.def.chan->center_freq;\ 47#define CHANCTX_ASSIGN CHANDEF_ASSIGN(&ctx->conf.def) \
38 __entry->chan_width = ctx->conf.def.width; \
39 __entry->center_freq1 = ctx->conf.def.center_freq1; \
40 __entry->center_freq2 = ctx->conf.def.center_freq2; \
41 __entry->rx_chains_static = ctx->conf.rx_chains_static; \ 48 __entry->rx_chains_static = ctx->conf.rx_chains_static; \
42 __entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic 49 __entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic
43#define CHANCTX_PR_FMT " control:%d MHz width:%d center: %d/%d MHz chains:%d/%d" 50#define CHANCTX_PR_FMT CHANDEF_PR_FMT " chains:%d/%d"
44#define CHANCTX_PR_ARG __entry->control_freq, __entry->chan_width, \ 51#define CHANCTX_PR_ARG CHANDEF_PR_ARG, \
45 __entry->center_freq1, __entry->center_freq2, \
46 __entry->rx_chains_static, __entry->rx_chains_dynamic 52 __entry->rx_chains_static, __entry->rx_chains_dynamic
47 53
48 54
@@ -334,6 +340,7 @@ TRACE_EVENT(drv_bss_info_changed,
334 __field(u16, assoc_cap) 340 __field(u16, assoc_cap)
335 __field(u64, sync_tsf) 341 __field(u64, sync_tsf)
336 __field(u32, sync_device_ts) 342 __field(u32, sync_device_ts)
343 __field(u8, sync_dtim_count)
337 __field(u32, basic_rates) 344 __field(u32, basic_rates)
338 __array(int, mcast_rate, IEEE80211_NUM_BANDS) 345 __array(int, mcast_rate, IEEE80211_NUM_BANDS)
339 __field(u16, ht_operation_mode) 346 __field(u16, ht_operation_mode)
@@ -341,8 +348,11 @@ TRACE_EVENT(drv_bss_info_changed,
341 __field(s32, cqm_rssi_hyst); 348 __field(s32, cqm_rssi_hyst);
342 __field(u32, channel_width); 349 __field(u32, channel_width);
343 __field(u32, channel_cfreq1); 350 __field(u32, channel_cfreq1);
344 __dynamic_array(u32, arp_addr_list, info->arp_addr_cnt); 351 __dynamic_array(u32, arp_addr_list,
345 __field(bool, arp_filter_enabled); 352 info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
353 IEEE80211_BSS_ARP_ADDR_LIST_LEN :
354 info->arp_addr_cnt);
355 __field(int, arp_addr_cnt);
346 __field(bool, qos); 356 __field(bool, qos);
347 __field(bool, idle); 357 __field(bool, idle);
348 __field(bool, ps); 358 __field(bool, ps);
@@ -370,6 +380,7 @@ TRACE_EVENT(drv_bss_info_changed,
370 __entry->assoc_cap = info->assoc_capability; 380 __entry->assoc_cap = info->assoc_capability;
371 __entry->sync_tsf = info->sync_tsf; 381 __entry->sync_tsf = info->sync_tsf;
372 __entry->sync_device_ts = info->sync_device_ts; 382 __entry->sync_device_ts = info->sync_device_ts;
383 __entry->sync_dtim_count = info->sync_dtim_count;
373 __entry->basic_rates = info->basic_rates; 384 __entry->basic_rates = info->basic_rates;
374 memcpy(__entry->mcast_rate, info->mcast_rate, 385 memcpy(__entry->mcast_rate, info->mcast_rate,
375 sizeof(__entry->mcast_rate)); 386 sizeof(__entry->mcast_rate));
@@ -378,9 +389,11 @@ TRACE_EVENT(drv_bss_info_changed,
378 __entry->cqm_rssi_hyst = info->cqm_rssi_hyst; 389 __entry->cqm_rssi_hyst = info->cqm_rssi_hyst;
379 __entry->channel_width = info->chandef.width; 390 __entry->channel_width = info->chandef.width;
380 __entry->channel_cfreq1 = info->chandef.center_freq1; 391 __entry->channel_cfreq1 = info->chandef.center_freq1;
392 __entry->arp_addr_cnt = info->arp_addr_cnt;
381 memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list, 393 memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list,
382 sizeof(u32) * info->arp_addr_cnt); 394 sizeof(u32) * (info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
383 __entry->arp_filter_enabled = info->arp_filter_enabled; 395 IEEE80211_BSS_ARP_ADDR_LIST_LEN :
396 info->arp_addr_cnt));
384 __entry->qos = info->qos; 397 __entry->qos = info->qos;
385 __entry->idle = info->idle; 398 __entry->idle = info->idle;
386 __entry->ps = info->ps; 399 __entry->ps = info->ps;
@@ -466,7 +479,7 @@ TRACE_EVENT(drv_set_tim,
466 479
467 TP_printk( 480 TP_printk(
468 LOCAL_PR_FMT STA_PR_FMT " set:%d", 481 LOCAL_PR_FMT STA_PR_FMT " set:%d",
469 LOCAL_PR_ARG, STA_PR_FMT, __entry->set 482 LOCAL_PR_ARG, STA_PR_ARG, __entry->set
470 ) 483 )
471); 484);
472 485
@@ -1178,23 +1191,26 @@ TRACE_EVENT(drv_set_rekey_data,
1178 1191
1179TRACE_EVENT(drv_rssi_callback, 1192TRACE_EVENT(drv_rssi_callback,
1180 TP_PROTO(struct ieee80211_local *local, 1193 TP_PROTO(struct ieee80211_local *local,
1194 struct ieee80211_sub_if_data *sdata,
1181 enum ieee80211_rssi_event rssi_event), 1195 enum ieee80211_rssi_event rssi_event),
1182 1196
1183 TP_ARGS(local, rssi_event), 1197 TP_ARGS(local, sdata, rssi_event),
1184 1198
1185 TP_STRUCT__entry( 1199 TP_STRUCT__entry(
1186 LOCAL_ENTRY 1200 LOCAL_ENTRY
1201 VIF_ENTRY
1187 __field(u32, rssi_event) 1202 __field(u32, rssi_event)
1188 ), 1203 ),
1189 1204
1190 TP_fast_assign( 1205 TP_fast_assign(
1191 LOCAL_ASSIGN; 1206 LOCAL_ASSIGN;
1207 VIF_ASSIGN;
1192 __entry->rssi_event = rssi_event; 1208 __entry->rssi_event = rssi_event;
1193 ), 1209 ),
1194 1210
1195 TP_printk( 1211 TP_printk(
1196 LOCAL_PR_FMT " rssi_event:%d", 1212 LOCAL_PR_FMT VIF_PR_FMT " rssi_event:%d",
1197 LOCAL_PR_ARG, __entry->rssi_event 1213 LOCAL_PR_ARG, VIF_PR_ARG, __entry->rssi_event
1198 ) 1214 )
1199); 1215);
1200 1216
@@ -1426,6 +1442,14 @@ DEFINE_EVENT(local_only_evt, drv_restart_complete,
1426 TP_ARGS(local) 1442 TP_ARGS(local)
1427); 1443);
1428 1444
1445#if IS_ENABLED(CONFIG_IPV6)
1446DEFINE_EVENT(local_sdata_evt, drv_ipv6_addr_change,
1447 TP_PROTO(struct ieee80211_local *local,
1448 struct ieee80211_sub_if_data *sdata),
1449 TP_ARGS(local, sdata)
1450);
1451#endif
1452
1429/* 1453/*
1430 * Tracing for API calls that drivers call. 1454 * Tracing for API calls that drivers call.
1431 */ 1455 */
@@ -1660,7 +1684,7 @@ TRACE_EVENT(api_sta_block_awake,
1660 1684
1661 TP_printk( 1685 TP_printk(
1662 LOCAL_PR_FMT STA_PR_FMT " block:%d", 1686 LOCAL_PR_FMT STA_PR_FMT " block:%d",
1663 LOCAL_PR_ARG, STA_PR_FMT, __entry->block 1687 LOCAL_PR_ARG, STA_PR_ARG, __entry->block
1664 ) 1688 )
1665); 1689);
1666 1690
@@ -1758,7 +1782,7 @@ TRACE_EVENT(api_eosp,
1758 1782
1759 TP_printk( 1783 TP_printk(
1760 LOCAL_PR_FMT STA_PR_FMT, 1784 LOCAL_PR_FMT STA_PR_FMT,
1761 LOCAL_PR_ARG, STA_PR_FMT 1785 LOCAL_PR_ARG, STA_PR_ARG
1762 ) 1786 )
1763); 1787);
1764 1788
@@ -1815,6 +1839,48 @@ TRACE_EVENT(stop_queue,
1815 ) 1839 )
1816); 1840);
1817 1841
1842TRACE_EVENT(drv_set_default_unicast_key,
1843 TP_PROTO(struct ieee80211_local *local,
1844 struct ieee80211_sub_if_data *sdata,
1845 int key_idx),
1846
1847 TP_ARGS(local, sdata, key_idx),
1848
1849 TP_STRUCT__entry(
1850 LOCAL_ENTRY
1851 VIF_ENTRY
1852 __field(int, key_idx)
1853 ),
1854
1855 TP_fast_assign(
1856 LOCAL_ASSIGN;
1857 VIF_ASSIGN;
1858 __entry->key_idx = key_idx;
1859 ),
1860
1861 TP_printk(LOCAL_PR_FMT VIF_PR_FMT " key_idx:%d",
1862 LOCAL_PR_ARG, VIF_PR_ARG, __entry->key_idx)
1863);
1864
1865TRACE_EVENT(api_radar_detected,
1866 TP_PROTO(struct ieee80211_local *local),
1867
1868 TP_ARGS(local),
1869
1870 TP_STRUCT__entry(
1871 LOCAL_ENTRY
1872 ),
1873
1874 TP_fast_assign(
1875 LOCAL_ASSIGN;
1876 ),
1877
1878 TP_printk(
1879 LOCAL_PR_FMT " radar detected",
1880 LOCAL_PR_ARG
1881 )
1882);
1883
1818#ifdef CONFIG_MAC80211_MESSAGE_TRACING 1884#ifdef CONFIG_MAC80211_MESSAGE_TRACING
1819#undef TRACE_SYSTEM 1885#undef TRACE_SYSTEM
1820#define TRACE_SYSTEM mac80211_msg 1886#define TRACE_SYSTEM mac80211_msg
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e9eadc40c09c..5b9602b62405 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -329,6 +329,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
329 329
330 if (sdata->vif.type == NL80211_IFTYPE_AP) 330 if (sdata->vif.type == NL80211_IFTYPE_AP)
331 ps = &sdata->u.ap.ps; 331 ps = &sdata->u.ap.ps;
332 else if (ieee80211_vif_is_mesh(&sdata->vif))
333 ps = &sdata->u.mesh.ps;
332 else 334 else
333 continue; 335 continue;
334 336
@@ -372,18 +374,20 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
372 /* 374 /*
373 * broadcast/multicast frame 375 * broadcast/multicast frame
374 * 376 *
375 * If any of the associated stations is in power save mode, 377 * If any of the associated/peer stations is in power save mode,
376 * the frame is buffered to be sent after DTIM beacon frame. 378 * the frame is buffered to be sent after DTIM beacon frame.
377 * This is done either by the hardware or us. 379 * This is done either by the hardware or us.
378 */ 380 */
379 381
380 /* powersaving STAs currently only in AP/VLAN mode */ 382 /* powersaving STAs currently only in AP/VLAN/mesh mode */
381 if (tx->sdata->vif.type == NL80211_IFTYPE_AP || 383 if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
382 tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 384 tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
383 if (!tx->sdata->bss) 385 if (!tx->sdata->bss)
384 return TX_CONTINUE; 386 return TX_CONTINUE;
385 387
386 ps = &tx->sdata->bss->ps; 388 ps = &tx->sdata->bss->ps;
389 } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
390 ps = &tx->sdata->u.mesh.ps;
387 } else { 391 } else {
388 return TX_CONTINUE; 392 return TX_CONTINUE;
389 } 393 }
@@ -594,7 +598,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
594 break; 598 break;
595 } 599 }
596 600
597 if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED)) 601 if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
602 !ieee80211_is_deauth(hdr->frame_control)))
598 return TX_DROP; 603 return TX_DROP;
599 604
600 if (!skip_hw && tx->key && 605 if (!skip_hw && tx->key &&
@@ -1225,6 +1230,21 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1225 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1230 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1226 if (local->queue_stop_reasons[q] || 1231 if (local->queue_stop_reasons[q] ||
1227 (!txpending && !skb_queue_empty(&local->pending[q]))) { 1232 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1233 if (unlikely(info->flags &
1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK &&
1235 local->queue_stop_reasons[q] &
1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) {
1237 /*
1238 * Drop off-channel frames if queues are stopped
1239 * for any reason other than off-channel
1240 * operation. Never queue them.
1241 */
1242 spin_unlock_irqrestore(
1243 &local->queue_stop_reason_lock, flags);
1244 ieee80211_purge_tx_queue(&local->hw, skbs);
1245 return true;
1246 }
1247
1228 /* 1248 /*
1229 * Since queue is stopped, queue up frames for later 1249 * Since queue is stopped, queue up frames for later
1230 * transmission from the tx-pending tasklet when the 1250 * transmission from the tx-pending tasklet when the
@@ -1472,12 +1492,14 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1472 hdr = (struct ieee80211_hdr *) skb->data; 1492 hdr = (struct ieee80211_hdr *) skb->data;
1473 info->control.vif = &sdata->vif; 1493 info->control.vif = &sdata->vif;
1474 1494
1475 if (ieee80211_vif_is_mesh(&sdata->vif) && 1495 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1476 ieee80211_is_data(hdr->frame_control) && 1496 if (ieee80211_is_data(hdr->frame_control) &&
1477 !is_multicast_ether_addr(hdr->addr1) && 1497 is_unicast_ether_addr(hdr->addr1)) {
1478 mesh_nexthop_resolve(skb, sdata)) { 1498 if (mesh_nexthop_resolve(sdata, skb))
1479 /* skb queued: don't free */ 1499 return; /* skb queued: don't free */
1480 return; 1500 } else {
1501 ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
1502 }
1481 } 1503 }
1482 1504
1483 ieee80211_set_qos_hdr(sdata, skb); 1505 ieee80211_set_qos_hdr(sdata, skb);
@@ -1673,10 +1695,13 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1673 chanctx_conf = 1695 chanctx_conf =
1674 rcu_dereference(tmp_sdata->vif.chanctx_conf); 1696 rcu_dereference(tmp_sdata->vif.chanctx_conf);
1675 } 1697 }
1676 if (!chanctx_conf)
1677 goto fail_rcu;
1678 1698
1679 chan = chanctx_conf->def.chan; 1699 if (chanctx_conf)
1700 chan = chanctx_conf->def.chan;
1701 else if (!local->use_chanctx)
1702 chan = local->_oper_channel;
1703 else
1704 goto fail_rcu;
1680 1705
1681 /* 1706 /*
1682 * Frame injection is not allowed if beaconing is not allowed 1707 * Frame injection is not allowed if beaconing is not allowed
@@ -1784,16 +1809,16 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1784 break; 1809 break;
1785 /* fall through */ 1810 /* fall through */
1786 case NL80211_IFTYPE_AP: 1811 case NL80211_IFTYPE_AP:
1812 if (sdata->vif.type == NL80211_IFTYPE_AP)
1813 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1814 if (!chanctx_conf)
1815 goto fail_rcu;
1787 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1816 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1788 /* DA BSSID SA */ 1817 /* DA BSSID SA */
1789 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1818 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1790 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1819 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1791 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1820 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1792 hdrlen = 24; 1821 hdrlen = 24;
1793 if (sdata->vif.type == NL80211_IFTYPE_AP)
1794 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1795 if (!chanctx_conf)
1796 goto fail_rcu;
1797 band = chanctx_conf->def.chan->band; 1822 band = chanctx_conf->def.chan->band;
1798 break; 1823 break;
1799 case NL80211_IFTYPE_WDS: 1824 case NL80211_IFTYPE_WDS:
@@ -1819,9 +1844,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1819 } 1844 }
1820 1845
1821 if (!is_multicast_ether_addr(skb->data)) { 1846 if (!is_multicast_ether_addr(skb->data)) {
1822 mpath = mesh_path_lookup(skb->data, sdata); 1847 mpath = mesh_path_lookup(sdata, skb->data);
1823 if (!mpath) 1848 if (!mpath)
1824 mppath = mpp_path_lookup(skb->data, sdata); 1849 mppath = mpp_path_lookup(sdata, skb->data);
1825 } 1850 }
1826 1851
1827 /* 1852 /*
@@ -1834,8 +1859,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1834 !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { 1859 !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
1835 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1860 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1836 skb->data, skb->data + ETH_ALEN); 1861 skb->data, skb->data + ETH_ALEN);
1837 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1862 meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr,
1838 sdata, NULL, NULL); 1863 NULL, NULL);
1839 } else { 1864 } else {
1840 /* DS -> MBSS (802.11-2012 13.11.3.3). 1865 /* DS -> MBSS (802.11-2012 13.11.3.3).
1841 * For unicast with unknown forwarding information, 1866 * For unicast with unknown forwarding information,
@@ -1854,18 +1879,14 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1854 mesh_da, sdata->vif.addr); 1879 mesh_da, sdata->vif.addr);
1855 if (is_multicast_ether_addr(mesh_da)) 1880 if (is_multicast_ether_addr(mesh_da))
1856 /* DA TA mSA AE:SA */ 1881 /* DA TA mSA AE:SA */
1857 meshhdrlen = 1882 meshhdrlen = ieee80211_new_mesh_header(
1858 ieee80211_new_mesh_header(&mesh_hdr, 1883 sdata, &mesh_hdr,
1859 sdata, 1884 skb->data + ETH_ALEN, NULL);
1860 skb->data + ETH_ALEN,
1861 NULL);
1862 else 1885 else
1863 /* RA TA mDA mSA AE:DA SA */ 1886 /* RA TA mDA mSA AE:DA SA */
1864 meshhdrlen = 1887 meshhdrlen = ieee80211_new_mesh_header(
1865 ieee80211_new_mesh_header(&mesh_hdr, 1888 sdata, &mesh_hdr, skb->data,
1866 sdata, 1889 skb->data + ETH_ALEN);
1867 skb->data,
1868 skb->data + ETH_ALEN);
1869 1890
1870 } 1891 }
1871 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 1892 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
@@ -2261,9 +2282,8 @@ void ieee80211_tx_pending(unsigned long data)
2261 2282
2262/* functions for drivers to get certain frames */ 2283/* functions for drivers to get certain frames */
2263 2284
2264static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, 2285static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2265 struct ps_data *ps, 2286 struct ps_data *ps, struct sk_buff *skb)
2266 struct sk_buff *skb)
2267{ 2287{
2268 u8 *pos, *tim; 2288 u8 *pos, *tim;
2269 int aid0 = 0; 2289 int aid0 = 0;
@@ -2325,6 +2345,29 @@ static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2325 } 2345 }
2326} 2346}
2327 2347
2348static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2349 struct ps_data *ps, struct sk_buff *skb)
2350{
2351 struct ieee80211_local *local = sdata->local;
2352
2353 /*
2354 * Not very nice, but we want to allow the driver to call
2355 * ieee80211_beacon_get() as a response to the set_tim()
2356 * callback. That, however, is already invoked under the
2357 * sta_lock to guarantee consistent and race-free update
2358 * of the tim bitmap in mac80211 and the driver.
2359 */
2360 if (local->tim_in_locked_section) {
2361 __ieee80211_beacon_add_tim(sdata, ps, skb);
2362 } else {
2363 spin_lock(&local->tim_lock);
2364 __ieee80211_beacon_add_tim(sdata, ps, skb);
2365 spin_unlock(&local->tim_lock);
2366 }
2367
2368 return 0;
2369}
2370
2328struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, 2371struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2329 struct ieee80211_vif *vif, 2372 struct ieee80211_vif *vif,
2330 u16 *tim_offset, u16 *tim_length) 2373 u16 *tim_offset, u16 *tim_length)
@@ -2369,22 +2412,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2369 memcpy(skb_put(skb, beacon->head_len), beacon->head, 2412 memcpy(skb_put(skb, beacon->head_len), beacon->head,
2370 beacon->head_len); 2413 beacon->head_len);
2371 2414
2372 /* 2415 ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
2373 * Not very nice, but we want to allow the driver to call
2374 * ieee80211_beacon_get() as a response to the set_tim()
2375 * callback. That, however, is already invoked under the
2376 * sta_lock to guarantee consistent and race-free update
2377 * of the tim bitmap in mac80211 and the driver.
2378 */
2379 if (local->tim_in_locked_section) {
2380 ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
2381 } else {
2382 unsigned long flags;
2383
2384 spin_lock_irqsave(&local->tim_lock, flags);
2385 ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
2386 spin_unlock_irqrestore(&local->tim_lock, flags);
2387 }
2388 2416
2389 if (tim_offset) 2417 if (tim_offset)
2390 *tim_offset = beacon->head_len; 2418 *tim_offset = beacon->head_len;
@@ -2412,66 +2440,26 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2412 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2440 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2413 IEEE80211_STYPE_BEACON); 2441 IEEE80211_STYPE_BEACON);
2414 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 2442 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2415 struct ieee80211_mgmt *mgmt;
2416 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2443 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2417 u8 *pos; 2444 struct beacon_data *bcn = rcu_dereference(ifmsh->beacon);
2418 int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
2419 sizeof(mgmt->u.beacon);
2420 2445
2421#ifdef CONFIG_MAC80211_MESH 2446 if (!bcn)
2422 if (!sdata->u.mesh.mesh_id_len)
2423 goto out; 2447 goto out;
2424#endif
2425 2448
2426 if (ifmsh->sync_ops) 2449 if (ifmsh->sync_ops)
2427 ifmsh->sync_ops->adjust_tbtt( 2450 ifmsh->sync_ops->adjust_tbtt(
2428 sdata); 2451 sdata);
2429 2452
2430 skb = dev_alloc_skb(local->tx_headroom + 2453 skb = dev_alloc_skb(local->tx_headroom +
2431 hdr_len + 2454 bcn->head_len +
2432 2 + /* NULL SSID */ 2455 256 + /* TIM IE */
2433 2 + 8 + /* supported rates */ 2456 bcn->tail_len);
2434 2 + 3 + /* DS params */
2435 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2436 2 + sizeof(struct ieee80211_ht_cap) +
2437 2 + sizeof(struct ieee80211_ht_operation) +
2438 2 + sdata->u.mesh.mesh_id_len +
2439 2 + sizeof(struct ieee80211_meshconf_ie) +
2440 sdata->u.mesh.ie_len);
2441 if (!skb) 2457 if (!skb)
2442 goto out; 2458 goto out;
2443 2459 skb_reserve(skb, local->tx_headroom);
2444 skb_reserve(skb, local->hw.extra_tx_headroom); 2460 memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len);
2445 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 2461 ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb);
2446 memset(mgmt, 0, hdr_len); 2462 memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len);
2447 mgmt->frame_control =
2448 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2449 eth_broadcast_addr(mgmt->da);
2450 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2451 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2452 mgmt->u.beacon.beacon_int =
2453 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2454 mgmt->u.beacon.capab_info |= cpu_to_le16(
2455 sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
2456
2457 pos = skb_put(skb, 2);
2458 *pos++ = WLAN_EID_SSID;
2459 *pos++ = 0x0;
2460
2461 band = chanctx_conf->def.chan->band;
2462
2463 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
2464 mesh_add_ds_params_ie(skb, sdata) ||
2465 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
2466 mesh_add_rsn_ie(skb, sdata) ||
2467 mesh_add_ht_cap_ie(skb, sdata) ||
2468 mesh_add_ht_oper_ie(skb, sdata) ||
2469 mesh_add_meshid_ie(skb, sdata) ||
2470 mesh_add_meshconf_ie(skb, sdata) ||
2471 mesh_add_vendor_ies(skb, sdata)) {
2472 pr_err("o11s: couldn't add ies!\n");
2473 goto out;
2474 }
2475 } else { 2463 } else {
2476 WARN_ON(1); 2464 WARN_ON(1);
2477 goto out; 2465 goto out;
@@ -2721,6 +2709,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2721 goto out; 2709 goto out;
2722 2710
2723 ps = &sdata->u.ap.ps; 2711 ps = &sdata->u.ap.ps;
2712 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2713 ps = &sdata->u.mesh.ps;
2724 } else { 2714 } else {
2725 goto out; 2715 goto out;
2726 } 2716 }
@@ -2744,6 +2734,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2744 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2734 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2745 } 2735 }
2746 2736
2737 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
2747 if (!ieee80211_tx_prepare(sdata, &tx, skb)) 2738 if (!ieee80211_tx_prepare(sdata, &tx, skb))
2748 break; 2739 break;
2749 dev_kfree_skb_any(skb); 2740 dev_kfree_skb_any(skb);
@@ -2776,6 +2767,8 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
2776 skb_set_queue_mapping(skb, ac); 2767 skb_set_queue_mapping(skb, ac);
2777 skb->priority = tid; 2768 skb->priority = tid;
2778 2769
2770 skb->dev = sdata->dev;
2771
2779 /* 2772 /*
2780 * The other path calling ieee80211_xmit is from the tasklet, 2773 * The other path calling ieee80211_xmit is from the tasklet,
2781 * and while we can handle concurrent transmissions locking 2774 * and while we can handle concurrent transmissions locking
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index f11e8c540db4..0f38f43ac62e 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -739,11 +739,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
739 if (calc_crc) 739 if (calc_crc)
740 crc = crc32_be(crc, pos - 2, elen + 2); 740 crc = crc32_be(crc, pos - 2, elen + 2);
741 741
742 if (pos[3] == 1) { 742 if (elen >= 5 && pos[3] == 2) {
743 /* OUI Type 1 - WPA IE */
744 elems->wpa = pos;
745 elems->wpa_len = elen;
746 } else if (elen >= 5 && pos[3] == 2) {
747 /* OUI Type 2 - WMM IE */ 743 /* OUI Type 2 - WMM IE */
748 if (pos[4] == 0) { 744 if (pos[4] == 0) {
749 elems->wmm_info = pos; 745 elems->wmm_info = pos;
@@ -791,6 +787,12 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
791 else 787 else
792 elem_parse_failed = true; 788 elem_parse_failed = true;
793 break; 789 break;
790 case WLAN_EID_OPMODE_NOTIF:
791 if (elen > 0)
792 elems->opmode_notif = pos;
793 else
794 elem_parse_failed = true;
795 break;
794 case WLAN_EID_MESH_ID: 796 case WLAN_EID_MESH_ID:
795 elems->mesh_id = pos; 797 elems->mesh_id = pos;
796 elems->mesh_id_len = elen; 798 elems->mesh_id_len = elen;
@@ -805,6 +807,10 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
805 elems->peering = pos; 807 elems->peering = pos;
806 elems->peering_len = elen; 808 elems->peering_len = elen;
807 break; 809 break;
810 case WLAN_EID_MESH_AWAKE_WINDOW:
811 if (elen >= 2)
812 elems->awake_window = (void *)pos;
813 break;
808 case WLAN_EID_PREQ: 814 case WLAN_EID_PREQ:
809 elems->preq = pos; 815 elems->preq = pos;
810 elems->preq_len = elen; 816 elems->preq_len = elen;
@@ -1029,8 +1035,9 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
1029 1035
1030void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1036void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1031 u16 transaction, u16 auth_alg, u16 status, 1037 u16 transaction, u16 auth_alg, u16 status,
1032 u8 *extra, size_t extra_len, const u8 *da, 1038 const u8 *extra, size_t extra_len, const u8 *da,
1033 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx) 1039 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx,
1040 u32 tx_flags)
1034{ 1041{
1035 struct ieee80211_local *local = sdata->local; 1042 struct ieee80211_local *local = sdata->local;
1036 struct sk_buff *skb; 1043 struct sk_buff *skb;
@@ -1063,7 +1070,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1063 WARN_ON(err); 1070 WARN_ON(err);
1064 } 1071 }
1065 1072
1066 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1073 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
1074 tx_flags;
1067 ieee80211_tx_skb(sdata, skb); 1075 ieee80211_tx_skb(sdata, skb);
1068} 1076}
1069 1077
@@ -1277,7 +1285,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1277void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1285void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1278 const u8 *ssid, size_t ssid_len, 1286 const u8 *ssid, size_t ssid_len,
1279 const u8 *ie, size_t ie_len, 1287 const u8 *ie, size_t ie_len,
1280 u32 ratemask, bool directed, bool no_cck, 1288 u32 ratemask, bool directed, u32 tx_flags,
1281 struct ieee80211_channel *channel, bool scan) 1289 struct ieee80211_channel *channel, bool scan)
1282{ 1290{
1283 struct sk_buff *skb; 1291 struct sk_buff *skb;
@@ -1286,9 +1294,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1286 ssid, ssid_len, 1294 ssid, ssid_len,
1287 ie, ie_len, directed); 1295 ie, ie_len, directed);
1288 if (skb) { 1296 if (skb) {
1289 if (no_cck) 1297 IEEE80211_SKB_CB(skb)->flags |= tx_flags;
1290 IEEE80211_SKB_CB(skb)->flags |=
1291 IEEE80211_TX_CTL_NO_CCK_RATE;
1292 if (scan) 1298 if (scan)
1293 ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); 1299 ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
1294 else 1300 else
@@ -1358,6 +1364,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1358 struct ieee80211_chanctx *ctx; 1364 struct ieee80211_chanctx *ctx;
1359 struct sta_info *sta; 1365 struct sta_info *sta;
1360 int res, i; 1366 int res, i;
1367 bool reconfig_due_to_wowlan = false;
1361 1368
1362#ifdef CONFIG_PM 1369#ifdef CONFIG_PM
1363 if (local->suspended) 1370 if (local->suspended)
@@ -1377,6 +1384,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1377 * res is 1, which means the driver requested 1384 * res is 1, which means the driver requested
1378 * to go through a regular reset on wakeup. 1385 * to go through a regular reset on wakeup.
1379 */ 1386 */
1387 reconfig_due_to_wowlan = true;
1380 } 1388 }
1381#endif 1389#endif
1382 /* everything else happens only if HW was up & running */ 1390 /* everything else happens only if HW was up & running */
@@ -1526,11 +1534,20 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1526 BSS_CHANGED_IDLE | 1534 BSS_CHANGED_IDLE |
1527 BSS_CHANGED_TXPOWER; 1535 BSS_CHANGED_TXPOWER;
1528 1536
1537#ifdef CONFIG_PM
1538 if (local->resuming && !reconfig_due_to_wowlan)
1539 sdata->vif.bss_conf = sdata->suspend_bss_conf;
1540#endif
1541
1529 switch (sdata->vif.type) { 1542 switch (sdata->vif.type) {
1530 case NL80211_IFTYPE_STATION: 1543 case NL80211_IFTYPE_STATION:
1531 changed |= BSS_CHANGED_ASSOC | 1544 changed |= BSS_CHANGED_ASSOC |
1532 BSS_CHANGED_ARP_FILTER | 1545 BSS_CHANGED_ARP_FILTER |
1533 BSS_CHANGED_PS; 1546 BSS_CHANGED_PS;
1547
1548 if (sdata->u.mgd.dtim_period)
1549 changed |= BSS_CHANGED_DTIM_PERIOD;
1550
1534 mutex_lock(&sdata->u.mgd.mtx); 1551 mutex_lock(&sdata->u.mgd.mtx);
1535 ieee80211_bss_info_change_notify(sdata, changed); 1552 ieee80211_bss_info_change_notify(sdata, changed);
1536 mutex_unlock(&sdata->u.mgd.mtx); 1553 mutex_unlock(&sdata->u.mgd.mtx);
@@ -1550,9 +1567,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1550 1567
1551 /* fall through */ 1568 /* fall through */
1552 case NL80211_IFTYPE_MESH_POINT: 1569 case NL80211_IFTYPE_MESH_POINT:
1553 changed |= BSS_CHANGED_BEACON | 1570 if (sdata->vif.bss_conf.enable_beacon) {
1554 BSS_CHANGED_BEACON_ENABLED; 1571 changed |= BSS_CHANGED_BEACON |
1555 ieee80211_bss_info_change_notify(sdata, changed); 1572 BSS_CHANGED_BEACON_ENABLED;
1573 ieee80211_bss_info_change_notify(sdata, changed);
1574 }
1556 break; 1575 break;
1557 case NL80211_IFTYPE_WDS: 1576 case NL80211_IFTYPE_WDS:
1558 break; 1577 break;
@@ -1632,7 +1651,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1632 mutex_lock(&local->sta_mtx); 1651 mutex_lock(&local->sta_mtx);
1633 1652
1634 list_for_each_entry(sta, &local->sta_list, list) { 1653 list_for_each_entry(sta, &local->sta_list, list) {
1635 ieee80211_sta_tear_down_BA_sessions(sta, true); 1654 ieee80211_sta_tear_down_BA_sessions(
1655 sta, AGG_STOP_LOCAL_REQUEST);
1636 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 1656 clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
1637 } 1657 }
1638 1658
@@ -1646,10 +1666,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1646 * If this is for hw restart things are still running. 1666 * If this is for hw restart things are still running.
1647 * We may want to change that later, however. 1667 * We may want to change that later, however.
1648 */ 1668 */
1649 if (!local->suspended) { 1669 if (!local->suspended || reconfig_due_to_wowlan)
1650 drv_restart_complete(local); 1670 drv_restart_complete(local);
1671
1672 if (!local->suspended)
1651 return 0; 1673 return 0;
1652 }
1653 1674
1654#ifdef CONFIG_PM 1675#ifdef CONFIG_PM
1655 /* first set suspended false, then resuming */ 1676 /* first set suspended false, then resuming */
@@ -1864,7 +1885,7 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1864} 1885}
1865 1886
1866u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, 1887u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
1867 u32 cap) 1888 u32 cap)
1868{ 1889{
1869 __le32 tmp; 1890 __le32 tmp;
1870 1891
@@ -1926,7 +1947,7 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1926} 1947}
1927 1948
1928void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan, 1949void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
1929 struct ieee80211_ht_operation *ht_oper, 1950 const struct ieee80211_ht_operation *ht_oper,
1930 struct cfg80211_chan_def *chandef) 1951 struct cfg80211_chan_def *chandef)
1931{ 1952{
1932 enum nl80211_channel_type channel_type; 1953 enum nl80211_channel_type channel_type;
@@ -2114,3 +2135,49 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
2114 2135
2115 return ts; 2136 return ts;
2116} 2137}
2138
2139void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
2140{
2141 struct ieee80211_sub_if_data *sdata;
2142
2143 mutex_lock(&local->iflist_mtx);
2144 list_for_each_entry(sdata, &local->interfaces, list) {
2145 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
2146
2147 if (sdata->wdev.cac_started) {
2148 ieee80211_vif_release_channel(sdata);
2149 cfg80211_cac_event(sdata->dev,
2150 NL80211_RADAR_CAC_ABORTED,
2151 GFP_KERNEL);
2152 }
2153 }
2154 mutex_unlock(&local->iflist_mtx);
2155}
2156
2157void ieee80211_dfs_radar_detected_work(struct work_struct *work)
2158{
2159 struct ieee80211_local *local =
2160 container_of(work, struct ieee80211_local, radar_detected_work);
2161 struct cfg80211_chan_def chandef;
2162
2163 ieee80211_dfs_cac_cancel(local);
2164
2165 if (local->use_chanctx)
2166 /* currently not handled */
2167 WARN_ON(1);
2168 else {
2169 cfg80211_chandef_create(&chandef, local->hw.conf.channel,
2170 local->hw.conf.channel_type);
2171 cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
2172 }
2173}
2174
2175void ieee80211_radar_detected(struct ieee80211_hw *hw)
2176{
2177 struct ieee80211_local *local = hw_to_local(hw);
2178
2179 trace_api_radar_detected(local);
2180
2181 ieee80211_queue_work(hw, &local->radar_detected_work);
2182}
2183EXPORT_SYMBOL(ieee80211_radar_detected);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index f311388aeedf..a2c2258bc84e 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -10,21 +10,29 @@
10#include <linux/export.h> 10#include <linux/export.h>
11#include <net/mac80211.h> 11#include <net/mac80211.h>
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13#include "rate.h"
13 14
14 15
15void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, 16void
16 struct ieee80211_supported_band *sband, 17ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
17 struct ieee80211_vht_cap *vht_cap_ie, 18 struct ieee80211_supported_band *sband,
18 struct ieee80211_sta_vht_cap *vht_cap) 19 const struct ieee80211_vht_cap *vht_cap_ie,
20 struct sta_info *sta)
19{ 21{
20 if (WARN_ON_ONCE(!vht_cap)) 22 struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
21 return;
22 23
23 memset(vht_cap, 0, sizeof(*vht_cap)); 24 memset(vht_cap, 0, sizeof(*vht_cap));
24 25
26 if (!sta->sta.ht_cap.ht_supported)
27 return;
28
25 if (!vht_cap_ie || !sband->vht_cap.vht_supported) 29 if (!vht_cap_ie || !sband->vht_cap.vht_supported)
26 return; 30 return;
27 31
32 /* A VHT STA must support 40 MHz */
33 if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
34 return;
35
28 vht_cap->vht_supported = true; 36 vht_cap->vht_supported = true;
29 37
30 vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info); 38 vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info);
@@ -32,4 +40,156 @@ void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
32 /* Copy peer MCS info, the driver might need them. */ 40 /* Copy peer MCS info, the driver might need them. */
33 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, 41 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
34 sizeof(struct ieee80211_vht_mcs_info)); 42 sizeof(struct ieee80211_vht_mcs_info));
43
44 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
45 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
46 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
47 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
48 break;
49 default:
50 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
51 }
52
53 sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
54}
55
56enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
57{
58 struct ieee80211_sub_if_data *sdata = sta->sdata;
59 u32 cap = sta->sta.vht_cap.cap;
60 enum ieee80211_sta_rx_bandwidth bw;
61
62 if (!sta->sta.vht_cap.vht_supported) {
63 bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
64 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
65 goto check_max;
66 }
67
68 switch (sdata->vif.bss_conf.chandef.width) {
69 default:
70 WARN_ON_ONCE(1);
71 /* fall through */
72 case NL80211_CHAN_WIDTH_20_NOHT:
73 case NL80211_CHAN_WIDTH_20:
74 case NL80211_CHAN_WIDTH_40:
75 bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
76 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
77 break;
78 case NL80211_CHAN_WIDTH_160:
79 if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) ==
80 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) {
81 bw = IEEE80211_STA_RX_BW_160;
82 break;
83 }
84 /* fall through */
85 case NL80211_CHAN_WIDTH_80P80:
86 if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) ==
87 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) {
88 bw = IEEE80211_STA_RX_BW_160;
89 break;
90 }
91 /* fall through */
92 case NL80211_CHAN_WIDTH_80:
93 bw = IEEE80211_STA_RX_BW_80;
94 }
95
96 check_max:
97 if (bw > sta->cur_max_bandwidth)
98 bw = sta->cur_max_bandwidth;
99 return bw;
100}
101
102void ieee80211_sta_set_rx_nss(struct sta_info *sta)
103{
104 u8 ht_rx_nss = 0, vht_rx_nss = 0;
105
106 /* if we received a notification already don't overwrite it */
107 if (sta->sta.rx_nss)
108 return;
109
110 if (sta->sta.ht_cap.ht_supported) {
111 if (sta->sta.ht_cap.mcs.rx_mask[0])
112 ht_rx_nss++;
113 if (sta->sta.ht_cap.mcs.rx_mask[1])
114 ht_rx_nss++;
115 if (sta->sta.ht_cap.mcs.rx_mask[2])
116 ht_rx_nss++;
117 if (sta->sta.ht_cap.mcs.rx_mask[3])
118 ht_rx_nss++;
119 /* FIXME: consider rx_highest? */
120 }
121
122 if (sta->sta.vht_cap.vht_supported) {
123 int i;
124 u16 rx_mcs_map;
125
126 rx_mcs_map = le16_to_cpu(sta->sta.vht_cap.vht_mcs.rx_mcs_map);
127
128 for (i = 7; i >= 0; i--) {
129 u8 mcs = (rx_mcs_map >> (2 * i)) & 3;
130
131 if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
132 vht_rx_nss = i + 1;
133 break;
134 }
135 }
136 /* FIXME: consider rx_highest? */
137 }
138
139 ht_rx_nss = max(ht_rx_nss, vht_rx_nss);
140 sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss);
141}
142
143void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
144 struct sta_info *sta, u8 opmode,
145 enum ieee80211_band band, bool nss_only)
146{
147 struct ieee80211_local *local = sdata->local;
148 struct ieee80211_supported_band *sband;
149 enum ieee80211_sta_rx_bandwidth new_bw;
150 u32 changed = 0;
151 u8 nss;
152
153 sband = local->hw.wiphy->bands[band];
154
155 /* ignore - no support for BF yet */
156 if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
157 return;
158
159 nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
160 nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
161 nss += 1;
162
163 if (sta->sta.rx_nss != nss) {
164 sta->sta.rx_nss = nss;
165 changed |= IEEE80211_RC_NSS_CHANGED;
166 }
167
168 if (nss_only)
169 goto change;
170
171 switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
172 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
173 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
174 break;
175 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ:
176 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40;
177 break;
178 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ:
179 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
180 break;
181 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ:
182 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
183 break;
184 }
185
186 new_bw = ieee80211_sta_cur_vht_bw(sta);
187 if (new_bw != sta->sta.bandwidth) {
188 sta->sta.bandwidth = new_bw;
189 changed |= IEEE80211_RC_NSS_CHANGED;
190 }
191
192 change:
193 if (changed)
194 rate_control_rate_update(local, sband, sta, changed);
35} 195}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 906f00cd6d2f..afba19cb6f87 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -191,6 +191,15 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
191 191
192 /* qos header is 2 bytes */ 192 /* qos header is 2 bytes */
193 *p++ = ack_policy | tid; 193 *p++ = ack_policy | tid;
194 *p = ieee80211_vif_is_mesh(&sdata->vif) ? 194 if (ieee80211_vif_is_mesh(&sdata->vif)) {
195 (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0; 195 /* preserve RSPI and Mesh PS Level bit */
196 *p &= ((IEEE80211_QOS_CTL_RSPI |
197 IEEE80211_QOS_CTL_MESH_PS_LEVEL) >> 8);
198
199 /* Nulls don't have a mesh header (frame body) */
200 if (!ieee80211_is_qos_nullfunc(hdr->frame_control))
201 *p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8);
202 } else {
203 *p = 0;
204 }
196} 205}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c175ee866ff4..c7c6d644486f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -181,7 +181,6 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
182 struct ieee80211_key *key = tx->key; 182 struct ieee80211_key *key = tx->key;
183 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 183 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
184 unsigned long flags;
185 unsigned int hdrlen; 184 unsigned int hdrlen;
186 int len, tail; 185 int len, tail;
187 u8 *pos; 186 u8 *pos;
@@ -216,12 +215,12 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
216 return 0; 215 return 0;
217 216
218 /* Increase IV for the frame */ 217 /* Increase IV for the frame */
219 spin_lock_irqsave(&key->u.tkip.txlock, flags); 218 spin_lock(&key->u.tkip.txlock);
220 key->u.tkip.tx.iv16++; 219 key->u.tkip.tx.iv16++;
221 if (key->u.tkip.tx.iv16 == 0) 220 if (key->u.tkip.tx.iv16 == 0)
222 key->u.tkip.tx.iv32++; 221 key->u.tkip.tx.iv32++;
223 pos = ieee80211_tkip_add_iv(pos, key); 222 pos = ieee80211_tkip_add_iv(pos, key);
224 spin_unlock_irqrestore(&key->u.tkip.txlock, flags); 223 spin_unlock(&key->u.tkip.txlock);
225 224
226 /* hwaccel - with software IV */ 225 /* hwaccel - with software IV */
227 if (info->control.hw_key) 226 if (info->control.hw_key)
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
index a967ddaa4e2f..b33dd76d4307 100644
--- a/net/mac802154/Kconfig
+++ b/net/mac802154/Kconfig
@@ -1,6 +1,6 @@
1config MAC802154 1config MAC802154
2 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)" 2 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
3 depends on IEEE802154 && EXPERIMENTAL 3 depends on IEEE802154
4 select CRC_CCITT 4 select CRC_CCITT
5 ---help--- 5 ---help---
6 This option enables the hardware independent IEEE 802.15.4 6 This option enables the hardware independent IEEE 802.15.4
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index e748aed290aa..b7c7f815deae 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -224,9 +224,9 @@ void ieee802154_free_device(struct ieee802154_dev *hw)
224 224
225 BUG_ON(!list_empty(&priv->slaves)); 225 BUG_ON(!list_empty(&priv->slaves));
226 226
227 wpan_phy_free(priv->phy);
228
229 mutex_destroy(&priv->slaves_mtx); 227 mutex_destroy(&priv->slaves_mtx);
228
229 wpan_phy_free(priv->phy);
230} 230}
231EXPORT_SYMBOL(ieee802154_free_device); 231EXPORT_SYMBOL(ieee802154_free_device);
232 232
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index 1191039c2b1b..d20c6d3c247d 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -41,7 +41,7 @@ static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val)
41 return -EINVAL; 41 return -EINVAL;
42 42
43 *val = skb->data[0]; 43 *val = skb->data[0];
44 skb_pull(skb, 1); 44 skb_pull(skb, 1);
45 45
46 return 0; 46 return 0;
47} 47}
@@ -137,16 +137,12 @@ static int mac802154_header_create(struct sk_buff *skb,
137 struct ieee802154_addr dev_addr; 137 struct ieee802154_addr dev_addr;
138 struct mac802154_sub_if_data *priv = netdev_priv(dev); 138 struct mac802154_sub_if_data *priv = netdev_priv(dev);
139 int pos = 2; 139 int pos = 2;
140 u8 *head; 140 u8 head[MAC802154_FRAME_HARD_HEADER_LEN];
141 u16 fc; 141 u16 fc;
142 142
143 if (!daddr) 143 if (!daddr)
144 return -EINVAL; 144 return -EINVAL;
145 145
146 head = kzalloc(MAC802154_FRAME_HARD_HEADER_LEN, GFP_KERNEL);
147 if (head == NULL)
148 return -ENOMEM;
149
150 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */ 146 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
151 fc = mac_cb_type(skb); 147 fc = mac_cb_type(skb);
152 148
@@ -210,7 +206,6 @@ static int mac802154_header_create(struct sk_buff *skb,
210 head[1] = fc >> 8; 206 head[1] = fc >> 8;
211 207
212 memcpy(skb_push(skb, pos), head, pos); 208 memcpy(skb_push(skb, pos), head, pos);
213 kfree(head);
214 209
215 return pos; 210 return pos;
216} 211}
@@ -389,7 +384,7 @@ void mac802154_wpan_setup(struct net_device *dev)
389 384
390static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb) 385static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
391{ 386{
392 return netif_rx(skb); 387 return netif_rx_ni(skb);
393} 388}
394 389
395static int 390static int
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index fefa514b9917..56d22cae5906 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -124,9 +124,14 @@ config NF_CONNTRACK_TIMESTAMP
124 124
125 If unsure, say `N'. 125 If unsure, say `N'.
126 126
127config NF_CONNTRACK_LABELS
128 bool
129 help
130 This option enables support for assigning user-defined flag bits
131 to connection tracking entries. It selected by the connlabel match.
132
127config NF_CT_PROTO_DCCP 133config NF_CT_PROTO_DCCP
128 tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)' 134 tristate 'DCCP protocol connection tracking support'
129 depends on EXPERIMENTAL
130 depends on NETFILTER_ADVANCED 135 depends on NETFILTER_ADVANCED
131 default IP_DCCP 136 default IP_DCCP
132 help 137 help
@@ -139,8 +144,7 @@ config NF_CT_PROTO_GRE
139 tristate 144 tristate
140 145
141config NF_CT_PROTO_SCTP 146config NF_CT_PROTO_SCTP
142 tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' 147 tristate 'SCTP protocol connection tracking support'
143 depends on EXPERIMENTAL
144 depends on NETFILTER_ADVANCED 148 depends on NETFILTER_ADVANCED
145 default IP_SCTP 149 default IP_SCTP
146 help 150 help
@@ -281,8 +285,7 @@ config NF_CONNTRACK_PPTP
281 To compile it as a module, choose M here. If unsure, say N. 285 To compile it as a module, choose M here. If unsure, say N.
282 286
283config NF_CONNTRACK_SANE 287config NF_CONNTRACK_SANE
284 tristate "SANE protocol support (EXPERIMENTAL)" 288 tristate "SANE protocol support"
285 depends on EXPERIMENTAL
286 depends on NETFILTER_ADVANCED 289 depends on NETFILTER_ADVANCED
287 help 290 help
288 SANE is a protocol for remote access to scanners as implemented 291 SANE is a protocol for remote access to scanners as implemented
@@ -409,8 +412,7 @@ endif # NF_CONNTRACK
409 412
410# transparent proxy support 413# transparent proxy support
411config NETFILTER_TPROXY 414config NETFILTER_TPROXY
412 tristate "Transparent proxying support (EXPERIMENTAL)" 415 tristate "Transparent proxying support"
413 depends on EXPERIMENTAL
414 depends on IP_NF_MANGLE 416 depends on IP_NF_MANGLE
415 depends on NETFILTER_ADVANCED 417 depends on NETFILTER_ADVANCED
416 help 418 help
@@ -680,6 +682,13 @@ config NETFILTER_XT_TARGET_NFQUEUE
680 682
681 To compile it as a module, choose M here. If unsure, say N. 683 To compile it as a module, choose M here. If unsure, say N.
682 684
685config NETFILTER_XT_TARGET_NOTRACK
686 tristate '"NOTRACK" target support (DEPRECATED)'
687 depends on NF_CONNTRACK
688 depends on IP_NF_RAW || IP6_NF_RAW
689 depends on NETFILTER_ADVANCED
690 select NETFILTER_XT_TARGET_CT
691
683config NETFILTER_XT_TARGET_RATEEST 692config NETFILTER_XT_TARGET_RATEEST
684 tristate '"RATEEST" target support' 693 tristate '"RATEEST" target support'
685 depends on NETFILTER_ADVANCED 694 depends on NETFILTER_ADVANCED
@@ -711,8 +720,7 @@ config NETFILTER_XT_TARGET_TEE
711 this clone be rerouted to another nexthop. 720 this clone be rerouted to another nexthop.
712 721
713config NETFILTER_XT_TARGET_TPROXY 722config NETFILTER_XT_TARGET_TPROXY
714 tristate '"TPROXY" target support (EXPERIMENTAL)' 723 tristate '"TPROXY" target support'
715 depends on EXPERIMENTAL
716 depends on NETFILTER_TPROXY 724 depends on NETFILTER_TPROXY
717 depends on NETFILTER_XTABLES 725 depends on NETFILTER_XTABLES
718 depends on NETFILTER_ADVANCED 726 depends on NETFILTER_ADVANCED
@@ -776,8 +784,7 @@ config NETFILTER_XT_TARGET_TCPMSS
776 To compile it as a module, choose M here. If unsure, say N. 784 To compile it as a module, choose M here. If unsure, say N.
777 785
778config NETFILTER_XT_TARGET_TCPOPTSTRIP 786config NETFILTER_XT_TARGET_TCPOPTSTRIP
779 tristate '"TCPOPTSTRIP" target support (EXPERIMENTAL)' 787 tristate '"TCPOPTSTRIP" target support'
780 depends on EXPERIMENTAL
781 depends on IP_NF_MANGLE || IP6_NF_MANGLE 788 depends on IP_NF_MANGLE || IP6_NF_MANGLE
782 depends on NETFILTER_ADVANCED 789 depends on NETFILTER_ADVANCED
783 help 790 help
@@ -798,6 +805,15 @@ config NETFILTER_XT_MATCH_ADDRTYPE
798 If you want to compile it as a module, say M here and read 805 If you want to compile it as a module, say M here and read
799 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 806 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
800 807
808config NETFILTER_XT_MATCH_BPF
809 tristate '"bpf" match support'
810 depends on NETFILTER_ADVANCED
811 help
812 BPF matching applies a linux socket filter to each packet and
813 accepts those for which the filter returns non-zero.
814
815 To compile it as a module, choose M here. If unsure, say N.
816
801config NETFILTER_XT_MATCH_CLUSTER 817config NETFILTER_XT_MATCH_CLUSTER
802 tristate '"cluster" match support' 818 tristate '"cluster" match support'
803 depends on NF_CONNTRACK 819 depends on NF_CONNTRACK
@@ -835,6 +851,19 @@ config NETFILTER_XT_MATCH_CONNBYTES
835 If you want to compile it as a module, say M here and read 851 If you want to compile it as a module, say M here and read
836 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 852 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
837 853
854config NETFILTER_XT_MATCH_CONNLABEL
855 tristate '"connlabel" match support'
856 select NF_CONNTRACK_LABELS
857 depends on NF_CONNTRACK
858 depends on NETFILTER_ADVANCED
859 ---help---
860 This match allows you to test and assign userspace-defined labels names
861 to a connection. The kernel only stores bit values - mapping
862 names to bits is done by userspace.
863
864 Unlike connmark, more than 32 flag bits may be assigned to a
865 connection simultaneously.
866
838config NETFILTER_XT_MATCH_CONNLIMIT 867config NETFILTER_XT_MATCH_CONNLIMIT
839 tristate '"connlimit" match support"' 868 tristate '"connlimit" match support"'
840 depends on NF_CONNTRACK 869 depends on NF_CONNTRACK
@@ -1138,8 +1167,7 @@ config NETFILTER_XT_MATCH_RECENT
1138 Official Website: <http://snowman.net/projects/ipt_recent/> 1167 Official Website: <http://snowman.net/projects/ipt_recent/>
1139 1168
1140config NETFILTER_XT_MATCH_SCTP 1169config NETFILTER_XT_MATCH_SCTP
1141 tristate '"sctp" protocol match support (EXPERIMENTAL)' 1170 tristate '"sctp" protocol match support'
1142 depends on EXPERIMENTAL
1143 depends on NETFILTER_ADVANCED 1171 depends on NETFILTER_ADVANCED
1144 default IP_SCTP 1172 default IP_SCTP
1145 help 1173 help
@@ -1151,8 +1179,7 @@ config NETFILTER_XT_MATCH_SCTP
1151 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 1179 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
1152 1180
1153config NETFILTER_XT_MATCH_SOCKET 1181config NETFILTER_XT_MATCH_SOCKET
1154 tristate '"socket" match support (EXPERIMENTAL)' 1182 tristate '"socket" match support'
1155 depends on EXPERIMENTAL
1156 depends on NETFILTER_TPROXY 1183 depends on NETFILTER_TPROXY
1157 depends on NETFILTER_XTABLES 1184 depends on NETFILTER_XTABLES
1158 depends on NETFILTER_ADVANCED 1185 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 32596978df1d..a1abf87d43bf 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -4,6 +4,7 @@ nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_exp
4nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o 4nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
5nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o 5nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
6nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o 6nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
7nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o
7 8
8obj-$(CONFIG_NETFILTER) = netfilter.o 9obj-$(CONFIG_NETFILTER) = netfilter.o
9 10
@@ -98,9 +99,11 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
98 99
99# matches 100# matches
100obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o 101obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o
102obj-$(CONFIG_NETFILTER_XT_MATCH_BPF) += xt_bpf.o
101obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o 103obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
102obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o 104obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
103obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o 105obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
106obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLABEL) += xt_connlabel.o
104obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o 107obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
105obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o 108obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
106obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o 109obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 6d6d8f2b033e..f82b2e606cfd 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -88,14 +88,14 @@ find_set_type(const char *name, u8 family, u8 revision)
88static bool 88static bool
89load_settype(const char *name) 89load_settype(const char *name)
90{ 90{
91 nfnl_unlock(); 91 nfnl_unlock(NFNL_SUBSYS_IPSET);
92 pr_debug("try to load ip_set_%s\n", name); 92 pr_debug("try to load ip_set_%s\n", name);
93 if (request_module("ip_set_%s", name) < 0) { 93 if (request_module("ip_set_%s", name) < 0) {
94 pr_warning("Can't find ip_set type %s\n", name); 94 pr_warning("Can't find ip_set type %s\n", name);
95 nfnl_lock(); 95 nfnl_lock(NFNL_SUBSYS_IPSET);
96 return false; 96 return false;
97 } 97 }
98 nfnl_lock(); 98 nfnl_lock(NFNL_SUBSYS_IPSET);
99 return true; 99 return true;
100} 100}
101 101
@@ -532,7 +532,7 @@ ip_set_nfnl_get(const char *name)
532 ip_set_id_t i, index = IPSET_INVALID_ID; 532 ip_set_id_t i, index = IPSET_INVALID_ID;
533 struct ip_set *s; 533 struct ip_set *s;
534 534
535 nfnl_lock(); 535 nfnl_lock(NFNL_SUBSYS_IPSET);
536 for (i = 0; i < ip_set_max; i++) { 536 for (i = 0; i < ip_set_max; i++) {
537 s = nfnl_set(i); 537 s = nfnl_set(i);
538 if (s != NULL && STREQ(s->name, name)) { 538 if (s != NULL && STREQ(s->name, name)) {
@@ -541,7 +541,7 @@ ip_set_nfnl_get(const char *name)
541 break; 541 break;
542 } 542 }
543 } 543 }
544 nfnl_unlock(); 544 nfnl_unlock(NFNL_SUBSYS_IPSET);
545 545
546 return index; 546 return index;
547} 547}
@@ -561,13 +561,13 @@ ip_set_nfnl_get_byindex(ip_set_id_t index)
561 if (index > ip_set_max) 561 if (index > ip_set_max)
562 return IPSET_INVALID_ID; 562 return IPSET_INVALID_ID;
563 563
564 nfnl_lock(); 564 nfnl_lock(NFNL_SUBSYS_IPSET);
565 set = nfnl_set(index); 565 set = nfnl_set(index);
566 if (set) 566 if (set)
567 __ip_set_get(set); 567 __ip_set_get(set);
568 else 568 else
569 index = IPSET_INVALID_ID; 569 index = IPSET_INVALID_ID;
570 nfnl_unlock(); 570 nfnl_unlock(NFNL_SUBSYS_IPSET);
571 571
572 return index; 572 return index;
573} 573}
@@ -584,11 +584,11 @@ void
584ip_set_nfnl_put(ip_set_id_t index) 584ip_set_nfnl_put(ip_set_id_t index)
585{ 585{
586 struct ip_set *set; 586 struct ip_set *set;
587 nfnl_lock(); 587 nfnl_lock(NFNL_SUBSYS_IPSET);
588 set = nfnl_set(index); 588 set = nfnl_set(index);
589 if (set != NULL) 589 if (set != NULL)
590 __ip_set_put(set); 590 __ip_set_put(set);
591 nfnl_unlock(); 591 nfnl_unlock(NFNL_SUBSYS_IPSET);
592} 592}
593EXPORT_SYMBOL_GPL(ip_set_nfnl_put); 593EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
594 594
@@ -1763,10 +1763,10 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1763 goto done; 1763 goto done;
1764 } 1764 }
1765 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; 1765 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
1766 nfnl_lock(); 1766 nfnl_lock(NFNL_SUBSYS_IPSET);
1767 find_set_and_id(req_get->set.name, &id); 1767 find_set_and_id(req_get->set.name, &id);
1768 req_get->set.index = id; 1768 req_get->set.index = id;
1769 nfnl_unlock(); 1769 nfnl_unlock(NFNL_SUBSYS_IPSET);
1770 goto copy; 1770 goto copy;
1771 } 1771 }
1772 case IP_SET_OP_GET_BYINDEX: { 1772 case IP_SET_OP_GET_BYINDEX: {
@@ -1778,11 +1778,11 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1778 ret = -EINVAL; 1778 ret = -EINVAL;
1779 goto done; 1779 goto done;
1780 } 1780 }
1781 nfnl_lock(); 1781 nfnl_lock(NFNL_SUBSYS_IPSET);
1782 set = nfnl_set(req_get->set.index); 1782 set = nfnl_set(req_get->set.index);
1783 strncpy(req_get->set.name, set ? set->name : "", 1783 strncpy(req_get->set.name, set ? set->name : "",
1784 IPSET_MAXNAMELEN); 1784 IPSET_MAXNAMELEN);
1785 nfnl_unlock(); 1785 nfnl_unlock(NFNL_SUBSYS_IPSET);
1786 goto copy; 1786 goto copy;
1787 } 1787 }
1788 default: 1788 default:
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 5c0b78528e55..b7d4cb475ae6 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -234,7 +234,7 @@ hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
234 const struct hash_ip6_elem *ip2, 234 const struct hash_ip6_elem *ip2,
235 u32 *multi) 235 u32 *multi)
236{ 236{
237 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0; 237 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6);
238} 238}
239 239
240static inline bool 240static inline bool
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 6283351f4eeb..d8f77bacae86 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -284,7 +284,7 @@ hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
284 const struct hash_ipport6_elem *ip2, 284 const struct hash_ipport6_elem *ip2,
285 u32 *multi) 285 u32 *multi)
286{ 286{
287 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 287 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
288 ip1->port == ip2->port && 288 ip1->port == ip2->port &&
289 ip1->proto == ip2->proto; 289 ip1->proto == ip2->proto;
290} 290}
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 6a21271c8d5a..1da1e955f38b 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -294,8 +294,8 @@ hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
294 const struct hash_ipportip6_elem *ip2, 294 const struct hash_ipportip6_elem *ip2,
295 u32 *multi) 295 u32 *multi)
296{ 296{
297 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 297 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
298 ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && 298 ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) &&
299 ip1->port == ip2->port && 299 ip1->port == ip2->port &&
300 ip1->proto == ip2->proto; 300 ip1->proto == ip2->proto;
301} 301}
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 2d5cd4ee30eb..f2627226a087 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -388,8 +388,8 @@ hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
388 const struct hash_ipportnet6_elem *ip2, 388 const struct hash_ipportnet6_elem *ip2,
389 u32 *multi) 389 u32 *multi)
390{ 390{
391 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 391 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
392 ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && 392 ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) &&
393 ip1->cidr == ip2->cidr && 393 ip1->cidr == ip2->cidr &&
394 ip1->port == ip2->port && 394 ip1->port == ip2->port &&
395 ip1->proto == ip2->proto; 395 ip1->proto == ip2->proto;
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 29e94b981f3f..4b677cf6bf7d 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -286,7 +286,7 @@ hash_net6_data_equal(const struct hash_net6_elem *ip1,
286 const struct hash_net6_elem *ip2, 286 const struct hash_net6_elem *ip2,
287 u32 *multi) 287 u32 *multi)
288{ 288{
289 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 289 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
290 ip1->cidr == ip2->cidr; 290 ip1->cidr == ip2->cidr;
291} 291}
292 292
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 45a101439bc5..6ba985f1c96f 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -471,7 +471,7 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
471 const struct hash_netiface6_elem *ip2, 471 const struct hash_netiface6_elem *ip2,
472 u32 *multi) 472 u32 *multi)
473{ 473{
474 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 474 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
475 ip1->cidr == ip2->cidr && 475 ip1->cidr == ip2->cidr &&
476 (++*multi) && 476 (++*multi) &&
477 ip1->physdev == ip2->physdev && 477 ip1->physdev == ip2->physdev &&
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 7ef700de596c..af20c0c5ced2 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -350,7 +350,7 @@ hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
350 const struct hash_netport6_elem *ip2, 350 const struct hash_netport6_elem *ip2,
351 u32 *multi) 351 u32 *multi)
352{ 352{
353 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 353 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
354 ip1->port == ip2->port && 354 ip1->port == ip2->port &&
355 ip1->proto == ip2->proto && 355 ip1->proto == ip2->proto &&
356 ip1->cidr == ip2->cidr; 356 ip1->cidr == ip2->cidr;
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 9713e6e86d47..0b779d7df881 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -605,12 +605,12 @@ int __net_init ip_vs_app_net_init(struct net *net)
605 struct netns_ipvs *ipvs = net_ipvs(net); 605 struct netns_ipvs *ipvs = net_ipvs(net);
606 606
607 INIT_LIST_HEAD(&ipvs->app_list); 607 INIT_LIST_HEAD(&ipvs->app_list);
608 proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops); 608 proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops);
609 return 0; 609 return 0;
610} 610}
611 611
612void __net_exit ip_vs_app_net_cleanup(struct net *net) 612void __net_exit ip_vs_app_net_cleanup(struct net *net)
613{ 613{
614 unregister_ip_vs_app(net, NULL /* all */); 614 unregister_ip_vs_app(net, NULL /* all */);
615 proc_net_remove(net, "ip_vs_app"); 615 remove_proc_entry("ip_vs_app", net->proc_net);
616} 616}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 30e764ad021f..9f00db7e03f2 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -796,8 +796,7 @@ static void ip_vs_conn_expire(unsigned long data)
796 */ 796 */
797 if (likely(atomic_read(&cp->refcnt) == 1)) { 797 if (likely(atomic_read(&cp->refcnt) == 1)) {
798 /* delete the timer if it is activated by other users */ 798 /* delete the timer if it is activated by other users */
799 if (timer_pending(&cp->timer)) 799 del_timer(&cp->timer);
800 del_timer(&cp->timer);
801 800
802 /* does anybody control me? */ 801 /* does anybody control me? */
803 if (cp->control) 802 if (cp->control)
@@ -1292,8 +1291,8 @@ int __net_init ip_vs_conn_net_init(struct net *net)
1292 1291
1293 atomic_set(&ipvs->conn_count, 0); 1292 atomic_set(&ipvs->conn_count, 0);
1294 1293
1295 proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops); 1294 proc_create("ip_vs_conn", 0, net->proc_net, &ip_vs_conn_fops);
1296 proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops); 1295 proc_create("ip_vs_conn_sync", 0, net->proc_net, &ip_vs_conn_sync_fops);
1297 return 0; 1296 return 0;
1298} 1297}
1299 1298
@@ -1301,8 +1300,8 @@ void __net_exit ip_vs_conn_net_cleanup(struct net *net)
1301{ 1300{
1302 /* flush all the connection entries first */ 1301 /* flush all the connection entries first */
1303 ip_vs_conn_flush(net); 1302 ip_vs_conn_flush(net);
1304 proc_net_remove(net, "ip_vs_conn"); 1303 remove_proc_entry("ip_vs_conn", net->proc_net);
1305 proc_net_remove(net, "ip_vs_conn_sync"); 1304 remove_proc_entry("ip_vs_conn_sync", net->proc_net);
1306} 1305}
1307 1306
1308int __init ip_vs_conn_init(void) 1307int __init ip_vs_conn_init(void)
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index ec664cbb119f..c68198bf9128 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3800,10 +3800,10 @@ int __net_init ip_vs_control_net_init(struct net *net)
3800 3800
3801 spin_lock_init(&ipvs->tot_stats.lock); 3801 spin_lock_init(&ipvs->tot_stats.lock);
3802 3802
3803 proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops); 3803 proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
3804 proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops); 3804 proc_create("ip_vs_stats", 0, net->proc_net, &ip_vs_stats_fops);
3805 proc_net_fops_create(net, "ip_vs_stats_percpu", 0, 3805 proc_create("ip_vs_stats_percpu", 0, net->proc_net,
3806 &ip_vs_stats_percpu_fops); 3806 &ip_vs_stats_percpu_fops);
3807 3807
3808 if (ip_vs_control_net_init_sysctl(net)) 3808 if (ip_vs_control_net_init_sysctl(net))
3809 goto err; 3809 goto err;
@@ -3822,9 +3822,9 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3822 ip_vs_trash_cleanup(net); 3822 ip_vs_trash_cleanup(net);
3823 ip_vs_stop_estimator(net, &ipvs->tot_stats); 3823 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3824 ip_vs_control_net_cleanup_sysctl(net); 3824 ip_vs_control_net_cleanup_sysctl(net);
3825 proc_net_remove(net, "ip_vs_stats_percpu"); 3825 remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
3826 proc_net_remove(net, "ip_vs_stats"); 3826 remove_proc_entry("ip_vs_stats", net->proc_net);
3827 proc_net_remove(net, "ip_vs"); 3827 remove_proc_entry("ip_vs", net->proc_net);
3828 free_percpu(ipvs->tot_stats.cpustats); 3828 free_percpu(ipvs->tot_stats.cpustats);
3829} 3829}
3830 3830
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 746048b13ef3..ae8ec6f27688 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -61,14 +61,27 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
61 return 1; 61 return 1;
62} 62}
63 63
64static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
65 unsigned int sctphoff)
66{
67 __u32 crc32;
68 struct sk_buff *iter;
69
70 crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
71 skb_walk_frags(skb, iter)
72 crc32 = sctp_update_cksum((u8 *) iter->data,
73 skb_headlen(iter), crc32);
74 sctph->checksum = sctp_end_cksum(crc32);
75
76 skb->ip_summed = CHECKSUM_UNNECESSARY;
77}
78
64static int 79static int
65sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 80sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
66 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 81 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
67{ 82{
68 sctp_sctphdr_t *sctph; 83 sctp_sctphdr_t *sctph;
69 unsigned int sctphoff = iph->len; 84 unsigned int sctphoff = iph->len;
70 struct sk_buff *iter;
71 __be32 crc32;
72 85
73#ifdef CONFIG_IP_VS_IPV6 86#ifdef CONFIG_IP_VS_IPV6
74 if (cp->af == AF_INET6 && iph->fragoffs) 87 if (cp->af == AF_INET6 && iph->fragoffs)
@@ -92,13 +105,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
92 sctph = (void *) skb_network_header(skb) + sctphoff; 105 sctph = (void *) skb_network_header(skb) + sctphoff;
93 sctph->source = cp->vport; 106 sctph->source = cp->vport;
94 107
95 /* Calculate the checksum */ 108 sctp_nat_csum(skb, sctph, sctphoff);
96 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
97 skb_walk_frags(skb, iter)
98 crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
99 crc32);
100 crc32 = sctp_end_cksum(crc32);
101 sctph->checksum = crc32;
102 109
103 return 1; 110 return 1;
104} 111}
@@ -109,8 +116,6 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
109{ 116{
110 sctp_sctphdr_t *sctph; 117 sctp_sctphdr_t *sctph;
111 unsigned int sctphoff = iph->len; 118 unsigned int sctphoff = iph->len;
112 struct sk_buff *iter;
113 __be32 crc32;
114 119
115#ifdef CONFIG_IP_VS_IPV6 120#ifdef CONFIG_IP_VS_IPV6
116 if (cp->af == AF_INET6 && iph->fragoffs) 121 if (cp->af == AF_INET6 && iph->fragoffs)
@@ -134,13 +139,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
134 sctph = (void *) skb_network_header(skb) + sctphoff; 139 sctph = (void *) skb_network_header(skb) + sctphoff;
135 sctph->dest = cp->dport; 140 sctph->dest = cp->dport;
136 141
137 /* Calculate the checksum */ 142 sctp_nat_csum(skb, sctph, sctphoff);
138 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
139 skb_walk_frags(skb, iter)
140 crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
141 crc32);
142 crc32 = sctp_end_cksum(crc32);
143 sctph->checksum = crc32;
144 143
145 return 1; 144 return 1;
146} 145}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index effa10c9e4e3..44fd10c539ac 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1795,6 +1795,8 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
1795 GFP_KERNEL); 1795 GFP_KERNEL);
1796 if (!tinfo->buf) 1796 if (!tinfo->buf)
1797 goto outtinfo; 1797 goto outtinfo;
1798 } else {
1799 tinfo->buf = NULL;
1798 } 1800 }
1799 tinfo->id = id; 1801 tinfo->id = id;
1800 1802
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 7df424e2d10c..2d3030ab5b61 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -106,36 +106,26 @@ static void nf_conntrack_acct_fini_sysctl(struct net *net)
106} 106}
107#endif 107#endif
108 108
109int nf_conntrack_acct_init(struct net *net) 109int nf_conntrack_acct_pernet_init(struct net *net)
110{ 110{
111 int ret;
112
113 net->ct.sysctl_acct = nf_ct_acct; 111 net->ct.sysctl_acct = nf_ct_acct;
112 return nf_conntrack_acct_init_sysctl(net);
113}
114 114
115 if (net_eq(net, &init_net)) { 115void nf_conntrack_acct_pernet_fini(struct net *net)
116 ret = nf_ct_extend_register(&acct_extend); 116{
117 if (ret < 0) { 117 nf_conntrack_acct_fini_sysctl(net);
118 printk(KERN_ERR "nf_conntrack_acct: Unable to register extension\n"); 118}
119 goto out_extend_register;
120 }
121 }
122 119
123 ret = nf_conntrack_acct_init_sysctl(net); 120int nf_conntrack_acct_init(void)
121{
122 int ret = nf_ct_extend_register(&acct_extend);
124 if (ret < 0) 123 if (ret < 0)
125 goto out_sysctl; 124 pr_err("nf_conntrack_acct: Unable to register extension\n");
126
127 return 0;
128
129out_sysctl:
130 if (net_eq(net, &init_net))
131 nf_ct_extend_unregister(&acct_extend);
132out_extend_register:
133 return ret; 125 return ret;
134} 126}
135 127
136void nf_conntrack_acct_fini(struct net *net) 128void nf_conntrack_acct_fini(void)
137{ 129{
138 nf_conntrack_acct_fini_sysctl(net); 130 nf_ct_extend_unregister(&acct_extend);
139 if (net_eq(net, &init_net))
140 nf_ct_extend_unregister(&acct_extend);
141} 131}
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index c514fe6033d2..dbdaa1149260 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -145,6 +145,7 @@ static int amanda_help(struct sk_buff *skb,
145 145
146 exp = nf_ct_expect_alloc(ct); 146 exp = nf_ct_expect_alloc(ct);
147 if (exp == NULL) { 147 if (exp == NULL) {
148 nf_ct_helper_log(skb, ct, "cannot alloc expectation");
148 ret = NF_DROP; 149 ret = NF_DROP;
149 goto out; 150 goto out;
150 } 151 }
@@ -158,8 +159,10 @@ static int amanda_help(struct sk_buff *skb,
158 if (nf_nat_amanda && ct->status & IPS_NAT_MASK) 159 if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
159 ret = nf_nat_amanda(skb, ctinfo, protoff, 160 ret = nf_nat_amanda(skb, ctinfo, protoff,
160 off - dataoff, len, exp); 161 off - dataoff, len, exp);
161 else if (nf_ct_expect_related(exp) != 0) 162 else if (nf_ct_expect_related(exp) != 0) {
163 nf_ct_helper_log(skb, ct, "cannot add expectation");
162 ret = NF_DROP; 164 ret = NF_DROP;
165 }
163 nf_ct_expect_put(exp); 166 nf_ct_expect_put(exp);
164 } 167 }
165 168
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 08cdc71d8e87..c8e001a9c45b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -45,6 +45,7 @@
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_timeout.h> 47#include <net/netfilter/nf_conntrack_timeout.h>
48#include <net/netfilter/nf_conntrack_labels.h>
48#include <net/netfilter/nf_nat.h> 49#include <net/netfilter/nf_nat.h>
49#include <net/netfilter/nf_nat_core.h> 50#include <net/netfilter/nf_nat_core.h>
50 51
@@ -763,6 +764,7 @@ void nf_conntrack_free(struct nf_conn *ct)
763} 764}
764EXPORT_SYMBOL_GPL(nf_conntrack_free); 765EXPORT_SYMBOL_GPL(nf_conntrack_free);
765 766
767
766/* Allocate a new conntrack: we return -ENOMEM if classification 768/* Allocate a new conntrack: we return -ENOMEM if classification
767 failed due to stress. Otherwise it really is unclassifiable. */ 769 failed due to stress. Otherwise it really is unclassifiable. */
768static struct nf_conntrack_tuple_hash * 770static struct nf_conntrack_tuple_hash *
@@ -809,6 +811,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
809 811
810 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 812 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
811 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 813 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
814 nf_ct_labels_ext_add(ct);
812 815
813 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; 816 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
814 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, 817 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
@@ -1331,18 +1334,42 @@ static int untrack_refs(void)
1331 return cnt; 1334 return cnt;
1332} 1335}
1333 1336
1334static void nf_conntrack_cleanup_init_net(void) 1337void nf_conntrack_cleanup_start(void)
1335{ 1338{
1339 RCU_INIT_POINTER(ip_ct_attach, NULL);
1340}
1341
1342void nf_conntrack_cleanup_end(void)
1343{
1344 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1336 while (untrack_refs() > 0) 1345 while (untrack_refs() > 0)
1337 schedule(); 1346 schedule();
1338 1347
1339#ifdef CONFIG_NF_CONNTRACK_ZONES 1348#ifdef CONFIG_NF_CONNTRACK_ZONES
1340 nf_ct_extend_unregister(&nf_ct_zone_extend); 1349 nf_ct_extend_unregister(&nf_ct_zone_extend);
1341#endif 1350#endif
1351 nf_conntrack_proto_fini();
1352 nf_conntrack_labels_fini();
1353 nf_conntrack_helper_fini();
1354 nf_conntrack_timeout_fini();
1355 nf_conntrack_ecache_fini();
1356 nf_conntrack_tstamp_fini();
1357 nf_conntrack_acct_fini();
1358 nf_conntrack_expect_fini();
1342} 1359}
1343 1360
1344static void nf_conntrack_cleanup_net(struct net *net) 1361/*
1362 * Mishearing the voices in his head, our hero wonders how he's
1363 * supposed to kill the mall.
1364 */
1365void nf_conntrack_cleanup_net(struct net *net)
1345{ 1366{
1367 /*
1368 * This makes sure all current packets have passed through
1369 * netfilter framework. Roll on, two-stage module
1370 * delete...
1371 */
1372 synchronize_net();
1346 i_see_dead_people: 1373 i_see_dead_people:
1347 nf_ct_iterate_cleanup(net, kill_all, NULL); 1374 nf_ct_iterate_cleanup(net, kill_all, NULL);
1348 nf_ct_release_dying_list(net); 1375 nf_ct_release_dying_list(net);
@@ -1352,37 +1379,17 @@ static void nf_conntrack_cleanup_net(struct net *net)
1352 } 1379 }
1353 1380
1354 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1381 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1355 nf_conntrack_helper_fini(net); 1382 nf_conntrack_proto_pernet_fini(net);
1356 nf_conntrack_timeout_fini(net); 1383 nf_conntrack_helper_pernet_fini(net);
1357 nf_conntrack_ecache_fini(net); 1384 nf_conntrack_ecache_pernet_fini(net);
1358 nf_conntrack_tstamp_fini(net); 1385 nf_conntrack_tstamp_pernet_fini(net);
1359 nf_conntrack_acct_fini(net); 1386 nf_conntrack_acct_pernet_fini(net);
1360 nf_conntrack_expect_fini(net); 1387 nf_conntrack_expect_pernet_fini(net);
1361 kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1388 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1362 kfree(net->ct.slabname); 1389 kfree(net->ct.slabname);
1363 free_percpu(net->ct.stat); 1390 free_percpu(net->ct.stat);
1364} 1391}
1365 1392
1366/* Mishearing the voices in his head, our hero wonders how he's
1367 supposed to kill the mall. */
1368void nf_conntrack_cleanup(struct net *net)
1369{
1370 if (net_eq(net, &init_net))
1371 RCU_INIT_POINTER(ip_ct_attach, NULL);
1372
1373 /* This makes sure all current packets have passed through
1374 netfilter framework. Roll on, two-stage module
1375 delete... */
1376 synchronize_net();
1377 nf_conntrack_proto_fini(net);
1378 nf_conntrack_cleanup_net(net);
1379
1380 if (net_eq(net, &init_net)) {
1381 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1382 nf_conntrack_cleanup_init_net();
1383 }
1384}
1385
1386void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) 1393void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1387{ 1394{
1388 struct hlist_nulls_head *hash; 1395 struct hlist_nulls_head *hash;
@@ -1473,7 +1480,7 @@ void nf_ct_untracked_status_or(unsigned long bits)
1473} 1480}
1474EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); 1481EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1475 1482
1476static int nf_conntrack_init_init_net(void) 1483int nf_conntrack_init_start(void)
1477{ 1484{
1478 int max_factor = 8; 1485 int max_factor = 8;
1479 int ret, cpu; 1486 int ret, cpu;
@@ -1500,11 +1507,44 @@ static int nf_conntrack_init_init_net(void)
1500 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", 1507 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1501 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1508 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1502 nf_conntrack_max); 1509 nf_conntrack_max);
1510
1511 ret = nf_conntrack_expect_init();
1512 if (ret < 0)
1513 goto err_expect;
1514
1515 ret = nf_conntrack_acct_init();
1516 if (ret < 0)
1517 goto err_acct;
1518
1519 ret = nf_conntrack_tstamp_init();
1520 if (ret < 0)
1521 goto err_tstamp;
1522
1523 ret = nf_conntrack_ecache_init();
1524 if (ret < 0)
1525 goto err_ecache;
1526
1527 ret = nf_conntrack_timeout_init();
1528 if (ret < 0)
1529 goto err_timeout;
1530
1531 ret = nf_conntrack_helper_init();
1532 if (ret < 0)
1533 goto err_helper;
1534
1535 ret = nf_conntrack_labels_init();
1536 if (ret < 0)
1537 goto err_labels;
1538
1503#ifdef CONFIG_NF_CONNTRACK_ZONES 1539#ifdef CONFIG_NF_CONNTRACK_ZONES
1504 ret = nf_ct_extend_register(&nf_ct_zone_extend); 1540 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1505 if (ret < 0) 1541 if (ret < 0)
1506 goto err_extend; 1542 goto err_extend;
1507#endif 1543#endif
1544 ret = nf_conntrack_proto_init();
1545 if (ret < 0)
1546 goto err_proto;
1547
1508 /* Set up fake conntrack: to never be deleted, not in any hashes */ 1548 /* Set up fake conntrack: to never be deleted, not in any hashes */
1509 for_each_possible_cpu(cpu) { 1549 for_each_possible_cpu(cpu) {
1510 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); 1550 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
@@ -1515,25 +1555,53 @@ static int nf_conntrack_init_init_net(void)
1515 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1555 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1516 return 0; 1556 return 0;
1517 1557
1558err_proto:
1518#ifdef CONFIG_NF_CONNTRACK_ZONES 1559#ifdef CONFIG_NF_CONNTRACK_ZONES
1560 nf_ct_extend_unregister(&nf_ct_zone_extend);
1519err_extend: 1561err_extend:
1520#endif 1562#endif
1563 nf_conntrack_labels_fini();
1564err_labels:
1565 nf_conntrack_helper_fini();
1566err_helper:
1567 nf_conntrack_timeout_fini();
1568err_timeout:
1569 nf_conntrack_ecache_fini();
1570err_ecache:
1571 nf_conntrack_tstamp_fini();
1572err_tstamp:
1573 nf_conntrack_acct_fini();
1574err_acct:
1575 nf_conntrack_expect_fini();
1576err_expect:
1521 return ret; 1577 return ret;
1522} 1578}
1523 1579
1580void nf_conntrack_init_end(void)
1581{
1582 /* For use by REJECT target */
1583 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1584 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1585
1586 /* Howto get NAT offsets */
1587 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1588}
1589
1524/* 1590/*
1525 * We need to use special "null" values, not used in hash table 1591 * We need to use special "null" values, not used in hash table
1526 */ 1592 */
1527#define UNCONFIRMED_NULLS_VAL ((1<<30)+0) 1593#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1528#define DYING_NULLS_VAL ((1<<30)+1) 1594#define DYING_NULLS_VAL ((1<<30)+1)
1595#define TEMPLATE_NULLS_VAL ((1<<30)+2)
1529 1596
1530static int nf_conntrack_init_net(struct net *net) 1597int nf_conntrack_init_net(struct net *net)
1531{ 1598{
1532 int ret; 1599 int ret;
1533 1600
1534 atomic_set(&net->ct.count, 0); 1601 atomic_set(&net->ct.count, 0);
1535 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); 1602 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1536 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); 1603 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1604 INIT_HLIST_NULLS_HEAD(&net->ct.tmpl, TEMPLATE_NULLS_VAL);
1537 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1605 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1538 if (!net->ct.stat) { 1606 if (!net->ct.stat) {
1539 ret = -ENOMEM; 1607 ret = -ENOMEM;
@@ -1562,35 +1630,36 @@ static int nf_conntrack_init_net(struct net *net)
1562 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1630 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1563 goto err_hash; 1631 goto err_hash;
1564 } 1632 }
1565 ret = nf_conntrack_expect_init(net); 1633 ret = nf_conntrack_expect_pernet_init(net);
1566 if (ret < 0) 1634 if (ret < 0)
1567 goto err_expect; 1635 goto err_expect;
1568 ret = nf_conntrack_acct_init(net); 1636 ret = nf_conntrack_acct_pernet_init(net);
1569 if (ret < 0) 1637 if (ret < 0)
1570 goto err_acct; 1638 goto err_acct;
1571 ret = nf_conntrack_tstamp_init(net); 1639 ret = nf_conntrack_tstamp_pernet_init(net);
1572 if (ret < 0) 1640 if (ret < 0)
1573 goto err_tstamp; 1641 goto err_tstamp;
1574 ret = nf_conntrack_ecache_init(net); 1642 ret = nf_conntrack_ecache_pernet_init(net);
1575 if (ret < 0) 1643 if (ret < 0)
1576 goto err_ecache; 1644 goto err_ecache;
1577 ret = nf_conntrack_timeout_init(net); 1645 ret = nf_conntrack_helper_pernet_init(net);
1578 if (ret < 0)
1579 goto err_timeout;
1580 ret = nf_conntrack_helper_init(net);
1581 if (ret < 0) 1646 if (ret < 0)
1582 goto err_helper; 1647 goto err_helper;
1648 ret = nf_conntrack_proto_pernet_init(net);
1649 if (ret < 0)
1650 goto err_proto;
1583 return 0; 1651 return 0;
1652
1653err_proto:
1654 nf_conntrack_helper_pernet_fini(net);
1584err_helper: 1655err_helper:
1585 nf_conntrack_timeout_fini(net); 1656 nf_conntrack_ecache_pernet_fini(net);
1586err_timeout:
1587 nf_conntrack_ecache_fini(net);
1588err_ecache: 1657err_ecache:
1589 nf_conntrack_tstamp_fini(net); 1658 nf_conntrack_tstamp_pernet_fini(net);
1590err_tstamp: 1659err_tstamp:
1591 nf_conntrack_acct_fini(net); 1660 nf_conntrack_acct_pernet_fini(net);
1592err_acct: 1661err_acct:
1593 nf_conntrack_expect_fini(net); 1662 nf_conntrack_expect_pernet_fini(net);
1594err_expect: 1663err_expect:
1595 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1664 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1596err_hash: 1665err_hash:
@@ -1607,38 +1676,3 @@ s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1607 enum ip_conntrack_dir dir, 1676 enum ip_conntrack_dir dir,
1608 u32 seq); 1677 u32 seq);
1609EXPORT_SYMBOL_GPL(nf_ct_nat_offset); 1678EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1610
1611int nf_conntrack_init(struct net *net)
1612{
1613 int ret;
1614
1615 if (net_eq(net, &init_net)) {
1616 ret = nf_conntrack_init_init_net();
1617 if (ret < 0)
1618 goto out_init_net;
1619 }
1620 ret = nf_conntrack_proto_init(net);
1621 if (ret < 0)
1622 goto out_proto;
1623 ret = nf_conntrack_init_net(net);
1624 if (ret < 0)
1625 goto out_net;
1626
1627 if (net_eq(net, &init_net)) {
1628 /* For use by REJECT target */
1629 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1630 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1631
1632 /* Howto get NAT offsets */
1633 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1634 }
1635 return 0;
1636
1637out_net:
1638 nf_conntrack_proto_fini(net);
1639out_proto:
1640 if (net_eq(net, &init_net))
1641 nf_conntrack_cleanup_init_net();
1642out_init_net:
1643 return ret;
1644}
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index faa978f1714b..b5d2eb8bf0d5 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -233,38 +233,27 @@ static void nf_conntrack_event_fini_sysctl(struct net *net)
233} 233}
234#endif /* CONFIG_SYSCTL */ 234#endif /* CONFIG_SYSCTL */
235 235
236int nf_conntrack_ecache_init(struct net *net) 236int nf_conntrack_ecache_pernet_init(struct net *net)
237{ 237{
238 int ret;
239
240 net->ct.sysctl_events = nf_ct_events; 238 net->ct.sysctl_events = nf_ct_events;
241 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout; 239 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
240 return nf_conntrack_event_init_sysctl(net);
241}
242 242
243 if (net_eq(net, &init_net)) { 243void nf_conntrack_ecache_pernet_fini(struct net *net)
244 ret = nf_ct_extend_register(&event_extend); 244{
245 if (ret < 0) { 245 nf_conntrack_event_fini_sysctl(net);
246 printk(KERN_ERR "nf_ct_event: Unable to register " 246}
247 "event extension.\n");
248 goto out_extend_register;
249 }
250 }
251 247
252 ret = nf_conntrack_event_init_sysctl(net); 248int nf_conntrack_ecache_init(void)
249{
250 int ret = nf_ct_extend_register(&event_extend);
253 if (ret < 0) 251 if (ret < 0)
254 goto out_sysctl; 252 pr_err("nf_ct_event: Unable to register event extension.\n");
255
256 return 0;
257
258out_sysctl:
259 if (net_eq(net, &init_net))
260 nf_ct_extend_unregister(&event_extend);
261out_extend_register:
262 return ret; 253 return ret;
263} 254}
264 255
265void nf_conntrack_ecache_fini(struct net *net) 256void nf_conntrack_ecache_fini(void)
266{ 257{
267 nf_conntrack_event_fini_sysctl(net); 258 nf_ct_extend_unregister(&event_extend);
268 if (net_eq(net, &init_net))
269 nf_ct_extend_unregister(&event_extend);
270} 259}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 527651a53a45..3921e5bc1235 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -571,7 +571,8 @@ static int exp_proc_init(struct net *net)
571#ifdef CONFIG_NF_CONNTRACK_PROCFS 571#ifdef CONFIG_NF_CONNTRACK_PROCFS
572 struct proc_dir_entry *proc; 572 struct proc_dir_entry *proc;
573 573
574 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops); 574 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
575 &exp_file_ops);
575 if (!proc) 576 if (!proc)
576 return -ENOMEM; 577 return -ENOMEM;
577#endif /* CONFIG_NF_CONNTRACK_PROCFS */ 578#endif /* CONFIG_NF_CONNTRACK_PROCFS */
@@ -581,59 +582,56 @@ static int exp_proc_init(struct net *net)
581static void exp_proc_remove(struct net *net) 582static void exp_proc_remove(struct net *net)
582{ 583{
583#ifdef CONFIG_NF_CONNTRACK_PROCFS 584#ifdef CONFIG_NF_CONNTRACK_PROCFS
584 proc_net_remove(net, "nf_conntrack_expect"); 585 remove_proc_entry("nf_conntrack_expect", net->proc_net);
585#endif /* CONFIG_NF_CONNTRACK_PROCFS */ 586#endif /* CONFIG_NF_CONNTRACK_PROCFS */
586} 587}
587 588
588module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); 589module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
589 590
590int nf_conntrack_expect_init(struct net *net) 591int nf_conntrack_expect_pernet_init(struct net *net)
591{ 592{
592 int err = -ENOMEM; 593 int err = -ENOMEM;
593 594
594 if (net_eq(net, &init_net)) {
595 if (!nf_ct_expect_hsize) {
596 nf_ct_expect_hsize = net->ct.htable_size / 256;
597 if (!nf_ct_expect_hsize)
598 nf_ct_expect_hsize = 1;
599 }
600 nf_ct_expect_max = nf_ct_expect_hsize * 4;
601 }
602
603 net->ct.expect_count = 0; 595 net->ct.expect_count = 0;
604 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0); 596 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
605 if (net->ct.expect_hash == NULL) 597 if (net->ct.expect_hash == NULL)
606 goto err1; 598 goto err1;
607 599
608 if (net_eq(net, &init_net)) {
609 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
610 sizeof(struct nf_conntrack_expect),
611 0, 0, NULL);
612 if (!nf_ct_expect_cachep)
613 goto err2;
614 }
615
616 err = exp_proc_init(net); 600 err = exp_proc_init(net);
617 if (err < 0) 601 if (err < 0)
618 goto err3; 602 goto err2;
619 603
620 return 0; 604 return 0;
621
622err3:
623 if (net_eq(net, &init_net))
624 kmem_cache_destroy(nf_ct_expect_cachep);
625err2: 605err2:
626 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); 606 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
627err1: 607err1:
628 return err; 608 return err;
629} 609}
630 610
631void nf_conntrack_expect_fini(struct net *net) 611void nf_conntrack_expect_pernet_fini(struct net *net)
632{ 612{
633 exp_proc_remove(net); 613 exp_proc_remove(net);
634 if (net_eq(net, &init_net)) {
635 rcu_barrier(); /* Wait for call_rcu() before destroy */
636 kmem_cache_destroy(nf_ct_expect_cachep);
637 }
638 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); 614 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
639} 615}
616
617int nf_conntrack_expect_init(void)
618{
619 if (!nf_ct_expect_hsize) {
620 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
621 if (!nf_ct_expect_hsize)
622 nf_ct_expect_hsize = 1;
623 }
624 nf_ct_expect_max = nf_ct_expect_hsize * 4;
625 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
626 sizeof(struct nf_conntrack_expect),
627 0, 0, NULL);
628 if (!nf_ct_expect_cachep)
629 return -ENOMEM;
630 return 0;
631}
632
633void nf_conntrack_expect_fini(void)
634{
635 rcu_barrier(); /* Wait for call_rcu() before destroy */
636 kmem_cache_destroy(nf_ct_expect_cachep);
637}
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 1ce3befb7c8a..62fb8faedb80 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -435,8 +435,8 @@ skip_nl_seq:
435 connection tracking, not packet filtering. 435 connection tracking, not packet filtering.
436 However, it is necessary for accurate tracking in 436 However, it is necessary for accurate tracking in
437 this case. */ 437 this case. */
438 pr_debug("conntrack_ftp: partial %s %u+%u\n", 438 nf_ct_helper_log(skb, ct, "partial matching of `%s'",
439 search[dir][i].pattern, ntohl(th->seq), datalen); 439 search[dir][i].pattern);
440 ret = NF_DROP; 440 ret = NF_DROP;
441 goto out; 441 goto out;
442 } else if (found == 0) { /* No match */ 442 } else if (found == 0) { /* No match */
@@ -450,6 +450,7 @@ skip_nl_seq:
450 450
451 exp = nf_ct_expect_alloc(ct); 451 exp = nf_ct_expect_alloc(ct);
452 if (exp == NULL) { 452 if (exp == NULL) {
453 nf_ct_helper_log(skb, ct, "cannot alloc expectation");
453 ret = NF_DROP; 454 ret = NF_DROP;
454 goto out; 455 goto out;
455 } 456 }
@@ -500,9 +501,10 @@ skip_nl_seq:
500 protoff, matchoff, matchlen, exp); 501 protoff, matchoff, matchlen, exp);
501 else { 502 else {
502 /* Can't expect this? Best to drop packet now. */ 503 /* Can't expect this? Best to drop packet now. */
503 if (nf_ct_expect_related(exp) != 0) 504 if (nf_ct_expect_related(exp) != 0) {
505 nf_ct_helper_log(skb, ct, "cannot add expectation");
504 ret = NF_DROP; 506 ret = NF_DROP;
505 else 507 } else
506 ret = NF_ACCEPT; 508 ret = NF_ACCEPT;
507 } 509 }
508 510
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 962795e839ab..7df7b36d2e24 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -623,7 +623,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
623 623
624 drop: 624 drop:
625 spin_unlock_bh(&nf_h323_lock); 625 spin_unlock_bh(&nf_h323_lock);
626 net_info_ratelimited("nf_ct_h245: packet dropped\n"); 626 nf_ct_helper_log(skb, ct, "cannot process H.245 message");
627 return NF_DROP; 627 return NF_DROP;
628} 628}
629 629
@@ -1197,7 +1197,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
1197 1197
1198 drop: 1198 drop:
1199 spin_unlock_bh(&nf_h323_lock); 1199 spin_unlock_bh(&nf_h323_lock);
1200 net_info_ratelimited("nf_ct_q931: packet dropped\n"); 1200 nf_ct_helper_log(skb, ct, "cannot process Q.931 message");
1201 return NF_DROP; 1201 return NF_DROP;
1202} 1202}
1203 1203
@@ -1795,7 +1795,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
1795 1795
1796 drop: 1796 drop:
1797 spin_unlock_bh(&nf_h323_lock); 1797 spin_unlock_bh(&nf_h323_lock);
1798 net_info_ratelimited("nf_ct_ras: packet dropped\n"); 1798 nf_ct_helper_log(skb, ct, "cannot process RAS message");
1799 return NF_DROP; 1799 return NF_DROP;
1800} 1800}
1801 1801
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 884f2b39319a..013cdf69fe29 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -28,6 +28,7 @@
28#include <net/netfilter/nf_conntrack_helper.h> 28#include <net/netfilter/nf_conntrack_helper.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_extend.h> 30#include <net/netfilter/nf_conntrack_extend.h>
31#include <net/netfilter/nf_log.h>
31 32
32static DEFINE_MUTEX(nf_ct_helper_mutex); 33static DEFINE_MUTEX(nf_ct_helper_mutex);
33struct hlist_head *nf_ct_helper_hash __read_mostly; 34struct hlist_head *nf_ct_helper_hash __read_mostly;
@@ -236,7 +237,9 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
236 /* We only allow helper re-assignment of the same sort since 237 /* We only allow helper re-assignment of the same sort since
237 * we cannot reallocate the helper extension area. 238 * we cannot reallocate the helper extension area.
238 */ 239 */
239 if (help->helper != helper) { 240 struct nf_conntrack_helper *tmp = rcu_dereference(help->helper);
241
242 if (tmp && tmp->help != helper->help) {
240 RCU_INIT_POINTER(help->helper, NULL); 243 RCU_INIT_POINTER(help->helper, NULL);
241 goto out; 244 goto out;
242 } 245 }
@@ -332,6 +335,24 @@ nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
332} 335}
333EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); 336EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
334 337
338__printf(3, 4)
339void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
340 const char *fmt, ...)
341{
342 const struct nf_conn_help *help;
343 const struct nf_conntrack_helper *helper;
344
345 /* Called from the helper function, this call never fails */
346 help = nfct_help(ct);
347
348 /* rcu_read_lock()ed by nf_hook_slow */
349 helper = rcu_dereference(help->helper);
350
351 nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
352 "nf_ct_%s: dropping packet: %s ", helper->name, fmt);
353}
354EXPORT_SYMBOL_GPL(nf_ct_helper_log);
355
335int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 356int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
336{ 357{
337 int ret = 0; 358 int ret = 0;
@@ -423,44 +444,41 @@ static struct nf_ct_ext_type helper_extend __read_mostly = {
423 .id = NF_CT_EXT_HELPER, 444 .id = NF_CT_EXT_HELPER,
424}; 445};
425 446
426int nf_conntrack_helper_init(struct net *net) 447int nf_conntrack_helper_pernet_init(struct net *net)
427{ 448{
428 int err;
429
430 net->ct.auto_assign_helper_warned = false; 449 net->ct.auto_assign_helper_warned = false;
431 net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper; 450 net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
451 return nf_conntrack_helper_init_sysctl(net);
452}
432 453
433 if (net_eq(net, &init_net)) { 454void nf_conntrack_helper_pernet_fini(struct net *net)
434 nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ 455{
435 nf_ct_helper_hash = 456 nf_conntrack_helper_fini_sysctl(net);
436 nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); 457}
437 if (!nf_ct_helper_hash)
438 return -ENOMEM;
439 458
440 err = nf_ct_extend_register(&helper_extend); 459int nf_conntrack_helper_init(void)
441 if (err < 0) 460{
442 goto err1; 461 int ret;
462 nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
463 nf_ct_helper_hash =
464 nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
465 if (!nf_ct_helper_hash)
466 return -ENOMEM;
467
468 ret = nf_ct_extend_register(&helper_extend);
469 if (ret < 0) {
470 pr_err("nf_ct_helper: Unable to register helper extension.\n");
471 goto out_extend;
443 } 472 }
444 473
445 err = nf_conntrack_helper_init_sysctl(net);
446 if (err < 0)
447 goto out_sysctl;
448
449 return 0; 474 return 0;
450 475out_extend:
451out_sysctl:
452 if (net_eq(net, &init_net))
453 nf_ct_extend_unregister(&helper_extend);
454err1:
455 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); 476 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
456 return err; 477 return ret;
457} 478}
458 479
459void nf_conntrack_helper_fini(struct net *net) 480void nf_conntrack_helper_fini(void)
460{ 481{
461 nf_conntrack_helper_fini_sysctl(net); 482 nf_ct_extend_unregister(&helper_extend);
462 if (net_eq(net, &init_net)) { 483 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
463 nf_ct_extend_unregister(&helper_extend);
464 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
465 }
466} 484}
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 3b20aa77cfc8..70985c5d0ffa 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -194,6 +194,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
194 194
195 exp = nf_ct_expect_alloc(ct); 195 exp = nf_ct_expect_alloc(ct);
196 if (exp == NULL) { 196 if (exp == NULL) {
197 nf_ct_helper_log(skb, ct,
198 "cannot alloc expectation");
197 ret = NF_DROP; 199 ret = NF_DROP;
198 goto out; 200 goto out;
199 } 201 }
@@ -210,8 +212,11 @@ static int help(struct sk_buff *skb, unsigned int protoff,
210 addr_beg_p - ib_ptr, 212 addr_beg_p - ib_ptr,
211 addr_end_p - addr_beg_p, 213 addr_end_p - addr_beg_p,
212 exp); 214 exp);
213 else if (nf_ct_expect_related(exp) != 0) 215 else if (nf_ct_expect_related(exp) != 0) {
216 nf_ct_helper_log(skb, ct,
217 "cannot add expectation");
214 ret = NF_DROP; 218 ret = NF_DROP;
219 }
215 nf_ct_expect_put(exp); 220 nf_ct_expect_put(exp);
216 goto out; 221 goto out;
217 } 222 }
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
new file mode 100644
index 000000000000..8fe2e99428b7
--- /dev/null
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -0,0 +1,112 @@
1/*
2 * test/set flag bits stored in conntrack extension area.
3 *
4 * (C) 2013 Astaro GmbH & Co KG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/ctype.h>
12#include <linux/export.h>
13#include <linux/jhash.h>
14#include <linux/spinlock.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17
18#include <net/netfilter/nf_conntrack_ecache.h>
19#include <net/netfilter/nf_conntrack_labels.h>
20
21static unsigned int label_bits(const struct nf_conn_labels *l)
22{
23 unsigned int longs = l->words;
24 return longs * BITS_PER_LONG;
25}
26
27bool nf_connlabel_match(const struct nf_conn *ct, u16 bit)
28{
29 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
30
31 if (!labels)
32 return false;
33
34 return bit < label_bits(labels) && test_bit(bit, labels->bits);
35}
36EXPORT_SYMBOL_GPL(nf_connlabel_match);
37
38int nf_connlabel_set(struct nf_conn *ct, u16 bit)
39{
40 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
41
42 if (!labels || bit >= label_bits(labels))
43 return -ENOSPC;
44
45 if (test_bit(bit, labels->bits))
46 return 0;
47
48 if (test_and_set_bit(bit, labels->bits))
49 nf_conntrack_event_cache(IPCT_LABEL, ct);
50
51 return 0;
52}
53EXPORT_SYMBOL_GPL(nf_connlabel_set);
54
55#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
56static void replace_u32(u32 *address, u32 mask, u32 new)
57{
58 u32 old, tmp;
59
60 do {
61 old = *address;
62 tmp = (old & mask) ^ new;
63 } while (cmpxchg(address, old, tmp) != old);
64}
65
66int nf_connlabels_replace(struct nf_conn *ct,
67 const u32 *data,
68 const u32 *mask, unsigned int words32)
69{
70 struct nf_conn_labels *labels;
71 unsigned int size, i;
72 u32 *dst;
73
74 labels = nf_ct_labels_find(ct);
75 if (!labels)
76 return -ENOSPC;
77
78 size = labels->words * sizeof(long);
79 if (size < (words32 * sizeof(u32)))
80 words32 = size / sizeof(u32);
81
82 dst = (u32 *) labels->bits;
83 if (words32) {
84 for (i = 0; i < words32; i++)
85 replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]);
86 }
87
88 size /= sizeof(u32);
89 for (i = words32; i < size; i++) /* pad */
90 replace_u32(&dst[i], 0, 0);
91
92 nf_conntrack_event_cache(IPCT_LABEL, ct);
93 return 0;
94}
95EXPORT_SYMBOL_GPL(nf_connlabels_replace);
96#endif
97
98static struct nf_ct_ext_type labels_extend __read_mostly = {
99 .len = sizeof(struct nf_conn_labels),
100 .align = __alignof__(struct nf_conn_labels),
101 .id = NF_CT_EXT_LABELS,
102};
103
104int nf_conntrack_labels_init(void)
105{
106 return nf_ct_extend_register(&labels_extend);
107}
108
109void nf_conntrack_labels_fini(void)
110{
111 nf_ct_extend_unregister(&labels_extend);
112}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4e078cd84d83..5d60e04f9679 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -43,6 +43,7 @@
43#include <net/netfilter/nf_conntrack_acct.h> 43#include <net/netfilter/nf_conntrack_acct.h>
44#include <net/netfilter/nf_conntrack_zones.h> 44#include <net/netfilter/nf_conntrack_zones.h>
45#include <net/netfilter/nf_conntrack_timestamp.h> 45#include <net/netfilter/nf_conntrack_timestamp.h>
46#include <net/netfilter/nf_conntrack_labels.h>
46#ifdef CONFIG_NF_NAT_NEEDED 47#ifdef CONFIG_NF_NAT_NEEDED
47#include <net/netfilter/nf_nat_core.h> 48#include <net/netfilter/nf_nat_core.h>
48#include <net/netfilter/nf_nat_l4proto.h> 49#include <net/netfilter/nf_nat_l4proto.h>
@@ -323,6 +324,40 @@ nla_put_failure:
323#define ctnetlink_dump_secctx(a, b) (0) 324#define ctnetlink_dump_secctx(a, b) (0)
324#endif 325#endif
325 326
327#ifdef CONFIG_NF_CONNTRACK_LABELS
328static int ctnetlink_label_size(const struct nf_conn *ct)
329{
330 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
331
332 if (!labels)
333 return 0;
334 return nla_total_size(labels->words * sizeof(long));
335}
336
337static int
338ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
339{
340 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
341 unsigned int len, i;
342
343 if (!labels)
344 return 0;
345
346 len = labels->words * sizeof(long);
347 i = 0;
348 do {
349 if (labels->bits[i] != 0)
350 return nla_put(skb, CTA_LABELS, len, labels->bits);
351 i++;
352 } while (i < labels->words);
353
354 return 0;
355}
356#else
357#define ctnetlink_dump_labels(a, b) (0)
358#define ctnetlink_label_size(a) (0)
359#endif
360
326#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 361#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
327 362
328static inline int 363static inline int
@@ -463,6 +498,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
463 ctnetlink_dump_helpinfo(skb, ct) < 0 || 498 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
464 ctnetlink_dump_mark(skb, ct) < 0 || 499 ctnetlink_dump_mark(skb, ct) < 0 ||
465 ctnetlink_dump_secctx(skb, ct) < 0 || 500 ctnetlink_dump_secctx(skb, ct) < 0 ||
501 ctnetlink_dump_labels(skb, ct) < 0 ||
466 ctnetlink_dump_id(skb, ct) < 0 || 502 ctnetlink_dump_id(skb, ct) < 0 ||
467 ctnetlink_dump_use(skb, ct) < 0 || 503 ctnetlink_dump_use(skb, ct) < 0 ||
468 ctnetlink_dump_master(skb, ct) < 0 || 504 ctnetlink_dump_master(skb, ct) < 0 ||
@@ -561,6 +597,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
561 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 597 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
562#endif 598#endif
563 + ctnetlink_proto_size(ct) 599 + ctnetlink_proto_size(ct)
600 + ctnetlink_label_size(ct)
564 ; 601 ;
565} 602}
566 603
@@ -662,6 +699,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
662 && ctnetlink_dump_secctx(skb, ct) < 0) 699 && ctnetlink_dump_secctx(skb, ct) < 0)
663 goto nla_put_failure; 700 goto nla_put_failure;
664#endif 701#endif
702 if (events & (1 << IPCT_LABEL) &&
703 ctnetlink_dump_labels(skb, ct) < 0)
704 goto nla_put_failure;
665 705
666 if (events & (1 << IPCT_RELATED) && 706 if (events & (1 << IPCT_RELATED) &&
667 ctnetlink_dump_master(skb, ct) < 0) 707 ctnetlink_dump_master(skb, ct) < 0)
@@ -921,6 +961,7 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
921 return 0; 961 return 0;
922} 962}
923 963
964#define __CTA_LABELS_MAX_LENGTH ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
924static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { 965static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
925 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, 966 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
926 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, 967 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
@@ -937,6 +978,10 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
937 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED }, 978 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
938 [CTA_ZONE] = { .type = NLA_U16 }, 979 [CTA_ZONE] = { .type = NLA_U16 },
939 [CTA_MARK_MASK] = { .type = NLA_U32 }, 980 [CTA_MARK_MASK] = { .type = NLA_U32 },
981 [CTA_LABELS] = { .type = NLA_BINARY,
982 .len = __CTA_LABELS_MAX_LENGTH },
983 [CTA_LABELS_MASK] = { .type = NLA_BINARY,
984 .len = __CTA_LABELS_MAX_LENGTH },
940}; 985};
941 986
942static int 987static int
@@ -1211,13 +1256,13 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
1211 if (!parse_nat_setup) { 1256 if (!parse_nat_setup) {
1212#ifdef CONFIG_MODULES 1257#ifdef CONFIG_MODULES
1213 rcu_read_unlock(); 1258 rcu_read_unlock();
1214 nfnl_unlock(); 1259 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1215 if (request_module("nf-nat") < 0) { 1260 if (request_module("nf-nat") < 0) {
1216 nfnl_lock(); 1261 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1217 rcu_read_lock(); 1262 rcu_read_lock();
1218 return -EOPNOTSUPP; 1263 return -EOPNOTSUPP;
1219 } 1264 }
1220 nfnl_lock(); 1265 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1221 rcu_read_lock(); 1266 rcu_read_lock();
1222 if (nfnetlink_parse_nat_setup_hook) 1267 if (nfnetlink_parse_nat_setup_hook)
1223 return -EAGAIN; 1268 return -EAGAIN;
@@ -1229,13 +1274,13 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
1229 if (err == -EAGAIN) { 1274 if (err == -EAGAIN) {
1230#ifdef CONFIG_MODULES 1275#ifdef CONFIG_MODULES
1231 rcu_read_unlock(); 1276 rcu_read_unlock();
1232 nfnl_unlock(); 1277 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1233 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) { 1278 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1234 nfnl_lock(); 1279 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1235 rcu_read_lock(); 1280 rcu_read_lock();
1236 return -EOPNOTSUPP; 1281 return -EOPNOTSUPP;
1237 } 1282 }
1238 nfnl_lock(); 1283 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1239 rcu_read_lock(); 1284 rcu_read_lock();
1240#else 1285#else
1241 err = -EOPNOTSUPP; 1286 err = -EOPNOTSUPP;
@@ -1465,6 +1510,31 @@ ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
1465#endif 1510#endif
1466 1511
1467static int 1512static int
1513ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
1514{
1515#ifdef CONFIG_NF_CONNTRACK_LABELS
1516 size_t len = nla_len(cda[CTA_LABELS]);
1517 const void *mask = cda[CTA_LABELS_MASK];
1518
1519 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
1520 return -EINVAL;
1521
1522 if (mask) {
1523 if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
1524 nla_len(cda[CTA_LABELS_MASK]) != len)
1525 return -EINVAL;
1526 mask = nla_data(cda[CTA_LABELS_MASK]);
1527 }
1528
1529 len /= sizeof(u32);
1530
1531 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
1532#else
1533 return -EOPNOTSUPP;
1534#endif
1535}
1536
1537static int
1468ctnetlink_change_conntrack(struct nf_conn *ct, 1538ctnetlink_change_conntrack(struct nf_conn *ct,
1469 const struct nlattr * const cda[]) 1539 const struct nlattr * const cda[])
1470{ 1540{
@@ -1510,6 +1580,11 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
1510 return err; 1580 return err;
1511 } 1581 }
1512#endif 1582#endif
1583 if (cda[CTA_LABELS]) {
1584 err = ctnetlink_attach_labels(ct, cda);
1585 if (err < 0)
1586 return err;
1587 }
1513 1588
1514 return 0; 1589 return 0;
1515} 1590}
@@ -1598,6 +1673,8 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1598 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 1673 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1599 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 1674 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1600 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); 1675 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1676 nf_ct_labels_ext_add(ct);
1677
1601 /* we must add conntrack extensions before confirmation. */ 1678 /* we must add conntrack extensions before confirmation. */
1602 ct->status |= IPS_CONFIRMED; 1679 ct->status |= IPS_CONFIRMED;
1603 1680
@@ -1705,6 +1782,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1705 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1782 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1706 enum ip_conntrack_events events; 1783 enum ip_conntrack_events events;
1707 1784
1785 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1786 return -EINVAL;
1787
1708 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, 1788 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1709 &rtuple, u3); 1789 &rtuple, u3);
1710 if (IS_ERR(ct)) 1790 if (IS_ERR(ct))
@@ -1716,6 +1796,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1716 else 1796 else
1717 events = IPCT_NEW; 1797 events = IPCT_NEW;
1718 1798
1799 if (cda[CTA_LABELS] &&
1800 ctnetlink_attach_labels(ct, cda) == 0)
1801 events |= (1 << IPCT_LABEL);
1802
1719 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 1803 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1720 (1 << IPCT_ASSURED) | 1804 (1 << IPCT_ASSURED) |
1721 (1 << IPCT_HELPER) | 1805 (1 << IPCT_HELPER) |
@@ -1983,6 +2067,8 @@ ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
1983 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0) 2067 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
1984 goto nla_put_failure; 2068 goto nla_put_failure;
1985#endif 2069#endif
2070 if (ctnetlink_dump_labels(skb, ct) < 0)
2071 goto nla_put_failure;
1986 rcu_read_unlock(); 2072 rcu_read_unlock();
1987 return 0; 2073 return 0;
1988 2074
@@ -2011,6 +2097,11 @@ ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2011 if (err < 0) 2097 if (err < 0)
2012 return err; 2098 return err;
2013 } 2099 }
2100 if (cda[CTA_LABELS]) {
2101 err = ctnetlink_attach_labels(ct, cda);
2102 if (err < 0)
2103 return err;
2104 }
2014#if defined(CONFIG_NF_CONNTRACK_MARK) 2105#if defined(CONFIG_NF_CONNTRACK_MARK)
2015 if (cda[CTA_MARK]) 2106 if (cda[CTA_MARK])
2016 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 2107 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
@@ -2624,7 +2715,7 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2624 if (!help) { 2715 if (!help) {
2625 if (!cda[CTA_EXPECT_TIMEOUT]) { 2716 if (!cda[CTA_EXPECT_TIMEOUT]) {
2626 err = -EINVAL; 2717 err = -EINVAL;
2627 goto out; 2718 goto err_out;
2628 } 2719 }
2629 exp->timeout.expires = 2720 exp->timeout.expires =
2630 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; 2721 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index cc7669ef0b95..e6678d2b624e 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -14,7 +14,7 @@
14 * Limitations: 14 * Limitations:
15 * - We blindly assume that control connections are always 15 * - We blindly assume that control connections are always
16 * established in PNS->PAC direction. This is a violation 16 * established in PNS->PAC direction. This is a violation
17 * of RFFC2673 17 * of RFC 2637
18 * - We can only support one single call within each session 18 * - We can only support one single call within each session
19 * TODO: 19 * TODO:
20 * - testing of incoming PPTP calls 20 * - testing of incoming PPTP calls
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 51e928db48c8..58ab4050830c 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -212,8 +212,7 @@ static void nf_ct_l3proto_unregister_sysctl(struct net *net,
212#endif 212#endif
213} 213}
214 214
215static int 215int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto)
216nf_conntrack_l3proto_register_net(struct nf_conntrack_l3proto *proto)
217{ 216{
218 int ret = 0; 217 int ret = 0;
219 struct nf_conntrack_l3proto *old; 218 struct nf_conntrack_l3proto *old;
@@ -242,8 +241,9 @@ out_unlock:
242 return ret; 241 return ret;
243 242
244} 243}
244EXPORT_SYMBOL_GPL(nf_ct_l3proto_register);
245 245
246int nf_conntrack_l3proto_register(struct net *net, 246int nf_ct_l3proto_pernet_register(struct net *net,
247 struct nf_conntrack_l3proto *proto) 247 struct nf_conntrack_l3proto *proto)
248{ 248{
249 int ret = 0; 249 int ret = 0;
@@ -254,22 +254,11 @@ int nf_conntrack_l3proto_register(struct net *net,
254 return ret; 254 return ret;
255 } 255 }
256 256
257 ret = nf_ct_l3proto_register_sysctl(net, proto); 257 return nf_ct_l3proto_register_sysctl(net, proto);
258 if (ret < 0)
259 return ret;
260
261 if (net == &init_net) {
262 ret = nf_conntrack_l3proto_register_net(proto);
263 if (ret < 0)
264 nf_ct_l3proto_unregister_sysctl(net, proto);
265 }
266
267 return ret;
268} 258}
269EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register); 259EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_register);
270 260
271static void 261void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto)
272nf_conntrack_l3proto_unregister_net(struct nf_conntrack_l3proto *proto)
273{ 262{
274 BUG_ON(proto->l3proto >= AF_MAX); 263 BUG_ON(proto->l3proto >= AF_MAX);
275 264
@@ -283,19 +272,17 @@ nf_conntrack_l3proto_unregister_net(struct nf_conntrack_l3proto *proto)
283 272
284 synchronize_rcu(); 273 synchronize_rcu();
285} 274}
275EXPORT_SYMBOL_GPL(nf_ct_l3proto_unregister);
286 276
287void nf_conntrack_l3proto_unregister(struct net *net, 277void nf_ct_l3proto_pernet_unregister(struct net *net,
288 struct nf_conntrack_l3proto *proto) 278 struct nf_conntrack_l3proto *proto)
289{ 279{
290 if (net == &init_net)
291 nf_conntrack_l3proto_unregister_net(proto);
292
293 nf_ct_l3proto_unregister_sysctl(net, proto); 280 nf_ct_l3proto_unregister_sysctl(net, proto);
294 281
295 /* Remove all contrack entries for this protocol */ 282 /* Remove all contrack entries for this protocol */
296 nf_ct_iterate_cleanup(net, kill_l3proto, proto); 283 nf_ct_iterate_cleanup(net, kill_l3proto, proto);
297} 284}
298EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); 285EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
299 286
300static struct nf_proto_net *nf_ct_l4proto_net(struct net *net, 287static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
301 struct nf_conntrack_l4proto *l4proto) 288 struct nf_conntrack_l4proto *l4proto)
@@ -376,8 +363,7 @@ void nf_ct_l4proto_unregister_sysctl(struct net *net,
376 363
377/* FIXME: Allow NULL functions and sub in pointers to generic for 364/* FIXME: Allow NULL functions and sub in pointers to generic for
378 them. --RR */ 365 them. --RR */
379static int 366int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto)
380nf_conntrack_l4proto_register_net(struct nf_conntrack_l4proto *l4proto)
381{ 367{
382 int ret = 0; 368 int ret = 0;
383 369
@@ -431,8 +417,9 @@ out_unlock:
431 mutex_unlock(&nf_ct_proto_mutex); 417 mutex_unlock(&nf_ct_proto_mutex);
432 return ret; 418 return ret;
433} 419}
420EXPORT_SYMBOL_GPL(nf_ct_l4proto_register);
434 421
435int nf_conntrack_l4proto_register(struct net *net, 422int nf_ct_l4proto_pernet_register(struct net *net,
436 struct nf_conntrack_l4proto *l4proto) 423 struct nf_conntrack_l4proto *l4proto)
437{ 424{
438 int ret = 0; 425 int ret = 0;
@@ -452,22 +439,13 @@ int nf_conntrack_l4proto_register(struct net *net,
452 if (ret < 0) 439 if (ret < 0)
453 goto out; 440 goto out;
454 441
455 if (net == &init_net) {
456 ret = nf_conntrack_l4proto_register_net(l4proto);
457 if (ret < 0) {
458 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
459 goto out;
460 }
461 }
462
463 pn->users++; 442 pn->users++;
464out: 443out:
465 return ret; 444 return ret;
466} 445}
467EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register); 446EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
468 447
469static void 448void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
470nf_conntrack_l4proto_unregister_net(struct nf_conntrack_l4proto *l4proto)
471{ 449{
472 BUG_ON(l4proto->l3proto >= PF_MAX); 450 BUG_ON(l4proto->l3proto >= PF_MAX);
473 451
@@ -482,15 +460,13 @@ nf_conntrack_l4proto_unregister_net(struct nf_conntrack_l4proto *l4proto)
482 460
483 synchronize_rcu(); 461 synchronize_rcu();
484} 462}
463EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister);
485 464
486void nf_conntrack_l4proto_unregister(struct net *net, 465void nf_ct_l4proto_pernet_unregister(struct net *net,
487 struct nf_conntrack_l4proto *l4proto) 466 struct nf_conntrack_l4proto *l4proto)
488{ 467{
489 struct nf_proto_net *pn = NULL; 468 struct nf_proto_net *pn = NULL;
490 469
491 if (net == &init_net)
492 nf_conntrack_l4proto_unregister_net(l4proto);
493
494 pn = nf_ct_l4proto_net(net, l4proto); 470 pn = nf_ct_l4proto_net(net, l4proto);
495 if (pn == NULL) 471 if (pn == NULL)
496 return; 472 return;
@@ -501,11 +477,10 @@ void nf_conntrack_l4proto_unregister(struct net *net,
501 /* Remove all contrack entries for this protocol */ 477 /* Remove all contrack entries for this protocol */
502 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto); 478 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
503} 479}
504EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); 480EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
505 481
506int nf_conntrack_proto_init(struct net *net) 482int nf_conntrack_proto_pernet_init(struct net *net)
507{ 483{
508 unsigned int i;
509 int err; 484 int err;
510 struct nf_proto_net *pn = nf_ct_l4proto_net(net, 485 struct nf_proto_net *pn = nf_ct_l4proto_net(net,
511 &nf_conntrack_l4proto_generic); 486 &nf_conntrack_l4proto_generic);
@@ -520,19 +495,12 @@ int nf_conntrack_proto_init(struct net *net)
520 if (err < 0) 495 if (err < 0)
521 return err; 496 return err;
522 497
523 if (net == &init_net) {
524 for (i = 0; i < AF_MAX; i++)
525 rcu_assign_pointer(nf_ct_l3protos[i],
526 &nf_conntrack_l3proto_generic);
527 }
528
529 pn->users++; 498 pn->users++;
530 return 0; 499 return 0;
531} 500}
532 501
533void nf_conntrack_proto_fini(struct net *net) 502void nf_conntrack_proto_pernet_fini(struct net *net)
534{ 503{
535 unsigned int i;
536 struct nf_proto_net *pn = nf_ct_l4proto_net(net, 504 struct nf_proto_net *pn = nf_ct_l4proto_net(net,
537 &nf_conntrack_l4proto_generic); 505 &nf_conntrack_l4proto_generic);
538 506
@@ -540,9 +508,21 @@ void nf_conntrack_proto_fini(struct net *net)
540 nf_ct_l4proto_unregister_sysctl(net, 508 nf_ct_l4proto_unregister_sysctl(net,
541 pn, 509 pn,
542 &nf_conntrack_l4proto_generic); 510 &nf_conntrack_l4proto_generic);
543 if (net == &init_net) { 511}
544 /* free l3proto protocol tables */ 512
545 for (i = 0; i < PF_MAX; i++) 513int nf_conntrack_proto_init(void)
546 kfree(nf_ct_protos[i]); 514{
547 } 515 unsigned int i;
516 for (i = 0; i < AF_MAX; i++)
517 rcu_assign_pointer(nf_ct_l3protos[i],
518 &nf_conntrack_l3proto_generic);
519 return 0;
520}
521
522void nf_conntrack_proto_fini(void)
523{
524 unsigned int i;
525 /* free l3proto protocol tables */
526 for (i = 0; i < PF_MAX; i++)
527 kfree(nf_ct_protos[i]);
548} 528}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index a8ae287bc7af..432f95780003 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -935,32 +935,27 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
935static __net_init int dccp_net_init(struct net *net) 935static __net_init int dccp_net_init(struct net *net)
936{ 936{
937 int ret = 0; 937 int ret = 0;
938 ret = nf_conntrack_l4proto_register(net, 938 ret = nf_ct_l4proto_pernet_register(net, &dccp_proto4);
939 &dccp_proto4);
940 if (ret < 0) { 939 if (ret < 0) {
941 pr_err("nf_conntrack_l4proto_dccp4 :protocol register failed.\n"); 940 pr_err("nf_conntrack_dccp4: pernet registration failed.\n");
942 goto out; 941 goto out;
943 } 942 }
944 ret = nf_conntrack_l4proto_register(net, 943 ret = nf_ct_l4proto_pernet_register(net, &dccp_proto6);
945 &dccp_proto6);
946 if (ret < 0) { 944 if (ret < 0) {
947 pr_err("nf_conntrack_l4proto_dccp6 :protocol register failed.\n"); 945 pr_err("nf_conntrack_dccp6: pernet registration failed.\n");
948 goto cleanup_dccp4; 946 goto cleanup_dccp4;
949 } 947 }
950 return 0; 948 return 0;
951cleanup_dccp4: 949cleanup_dccp4:
952 nf_conntrack_l4proto_unregister(net, 950 nf_ct_l4proto_pernet_unregister(net, &dccp_proto4);
953 &dccp_proto4);
954out: 951out:
955 return ret; 952 return ret;
956} 953}
957 954
958static __net_exit void dccp_net_exit(struct net *net) 955static __net_exit void dccp_net_exit(struct net *net)
959{ 956{
960 nf_conntrack_l4proto_unregister(net, 957 nf_ct_l4proto_pernet_unregister(net, &dccp_proto6);
961 &dccp_proto6); 958 nf_ct_l4proto_pernet_unregister(net, &dccp_proto4);
962 nf_conntrack_l4proto_unregister(net,
963 &dccp_proto4);
964} 959}
965 960
966static struct pernet_operations dccp_net_ops = { 961static struct pernet_operations dccp_net_ops = {
@@ -972,11 +967,33 @@ static struct pernet_operations dccp_net_ops = {
972 967
973static int __init nf_conntrack_proto_dccp_init(void) 968static int __init nf_conntrack_proto_dccp_init(void)
974{ 969{
975 return register_pernet_subsys(&dccp_net_ops); 970 int ret;
971
972 ret = nf_ct_l4proto_register(&dccp_proto4);
973 if (ret < 0)
974 goto out_dccp4;
975
976 ret = nf_ct_l4proto_register(&dccp_proto6);
977 if (ret < 0)
978 goto out_dccp6;
979
980 ret = register_pernet_subsys(&dccp_net_ops);
981 if (ret < 0)
982 goto out_pernet;
983
984 return 0;
985out_pernet:
986 nf_ct_l4proto_unregister(&dccp_proto6);
987out_dccp6:
988 nf_ct_l4proto_unregister(&dccp_proto4);
989out_dccp4:
990 return ret;
976} 991}
977 992
978static void __exit nf_conntrack_proto_dccp_fini(void) 993static void __exit nf_conntrack_proto_dccp_fini(void)
979{ 994{
995 nf_ct_l4proto_unregister(&dccp_proto6);
996 nf_ct_l4proto_unregister(&dccp_proto4);
980 unregister_pernet_subsys(&dccp_net_ops); 997 unregister_pernet_subsys(&dccp_net_ops);
981} 998}
982 999
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index b09b7af7f6f8..bd7d01d9c7e7 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -397,15 +397,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
397static int proto_gre_net_init(struct net *net) 397static int proto_gre_net_init(struct net *net)
398{ 398{
399 int ret = 0; 399 int ret = 0;
400 ret = nf_conntrack_l4proto_register(net, &nf_conntrack_l4proto_gre4); 400 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_gre4);
401 if (ret < 0) 401 if (ret < 0)
402 pr_err("nf_conntrack_l4proto_gre4 :protocol register failed.\n"); 402 pr_err("nf_conntrack_gre4: pernet registration failed.\n");
403 return ret; 403 return ret;
404} 404}
405 405
406static void proto_gre_net_exit(struct net *net) 406static void proto_gre_net_exit(struct net *net)
407{ 407{
408 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_gre4); 408 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_gre4);
409 nf_ct_gre_keymap_flush(net); 409 nf_ct_gre_keymap_flush(net);
410} 410}
411 411
@@ -418,11 +418,26 @@ static struct pernet_operations proto_gre_net_ops = {
418 418
419static int __init nf_ct_proto_gre_init(void) 419static int __init nf_ct_proto_gre_init(void)
420{ 420{
421 return register_pernet_subsys(&proto_gre_net_ops); 421 int ret;
422
423 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
424 if (ret < 0)
425 goto out_gre4;
426
427 ret = register_pernet_subsys(&proto_gre_net_ops);
428 if (ret < 0)
429 goto out_pernet;
430
431 return 0;
432out_pernet:
433 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);
434out_gre4:
435 return ret;
422} 436}
423 437
424static void __exit nf_ct_proto_gre_fini(void) 438static void __exit nf_ct_proto_gre_fini(void)
425{ 439{
440 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);
426 unregister_pernet_subsys(&proto_gre_net_ops); 441 unregister_pernet_subsys(&proto_gre_net_ops);
427} 442}
428 443
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index c746d61f83ed..480f616d5936 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -853,33 +853,28 @@ static int sctp_net_init(struct net *net)
853{ 853{
854 int ret = 0; 854 int ret = 0;
855 855
856 ret = nf_conntrack_l4proto_register(net, 856 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_sctp4);
857 &nf_conntrack_l4proto_sctp4);
858 if (ret < 0) { 857 if (ret < 0) {
859 pr_err("nf_conntrack_l4proto_sctp4 :protocol register failed.\n"); 858 pr_err("nf_conntrack_sctp4: pernet registration failed.\n");
860 goto out; 859 goto out;
861 } 860 }
862 ret = nf_conntrack_l4proto_register(net, 861 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_sctp6);
863 &nf_conntrack_l4proto_sctp6);
864 if (ret < 0) { 862 if (ret < 0) {
865 pr_err("nf_conntrack_l4proto_sctp6 :protocol register failed.\n"); 863 pr_err("nf_conntrack_sctp6: pernet registration failed.\n");
866 goto cleanup_sctp4; 864 goto cleanup_sctp4;
867 } 865 }
868 return 0; 866 return 0;
869 867
870cleanup_sctp4: 868cleanup_sctp4:
871 nf_conntrack_l4proto_unregister(net, 869 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_sctp4);
872 &nf_conntrack_l4proto_sctp4);
873out: 870out:
874 return ret; 871 return ret;
875} 872}
876 873
877static void sctp_net_exit(struct net *net) 874static void sctp_net_exit(struct net *net)
878{ 875{
879 nf_conntrack_l4proto_unregister(net, 876 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_sctp6);
880 &nf_conntrack_l4proto_sctp6); 877 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_sctp4);
881 nf_conntrack_l4proto_unregister(net,
882 &nf_conntrack_l4proto_sctp4);
883} 878}
884 879
885static struct pernet_operations sctp_net_ops = { 880static struct pernet_operations sctp_net_ops = {
@@ -891,11 +886,33 @@ static struct pernet_operations sctp_net_ops = {
891 886
892static int __init nf_conntrack_proto_sctp_init(void) 887static int __init nf_conntrack_proto_sctp_init(void)
893{ 888{
894 return register_pernet_subsys(&sctp_net_ops); 889 int ret;
890
891 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4);
892 if (ret < 0)
893 goto out_sctp4;
894
895 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp6);
896 if (ret < 0)
897 goto out_sctp6;
898
899 ret = register_pernet_subsys(&sctp_net_ops);
900 if (ret < 0)
901 goto out_pernet;
902
903 return 0;
904out_pernet:
905 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
906out_sctp6:
907 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
908out_sctp4:
909 return ret;
895} 910}
896 911
897static void __exit nf_conntrack_proto_sctp_fini(void) 912static void __exit nf_conntrack_proto_sctp_fini(void)
898{ 913{
914 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
915 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
899 unregister_pernet_subsys(&sctp_net_ops); 916 unregister_pernet_subsys(&sctp_net_ops);
900} 917}
901 918
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 4b66df209286..157489581c31 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -336,30 +336,28 @@ static int udplite_net_init(struct net *net)
336{ 336{
337 int ret = 0; 337 int ret = 0;
338 338
339 ret = nf_conntrack_l4proto_register(net, 339 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udplite4);
340 &nf_conntrack_l4proto_udplite4);
341 if (ret < 0) { 340 if (ret < 0) {
342 pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n"); 341 pr_err("nf_conntrack_udplite4: pernet registration failed.\n");
343 goto out; 342 goto out;
344 } 343 }
345 ret = nf_conntrack_l4proto_register(net, 344 ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udplite6);
346 &nf_conntrack_l4proto_udplite6);
347 if (ret < 0) { 345 if (ret < 0) {
348 pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n"); 346 pr_err("nf_conntrack_udplite6: pernet registration failed.\n");
349 goto cleanup_udplite4; 347 goto cleanup_udplite4;
350 } 348 }
351 return 0; 349 return 0;
352 350
353cleanup_udplite4: 351cleanup_udplite4:
354 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4); 352 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite4);
355out: 353out:
356 return ret; 354 return ret;
357} 355}
358 356
359static void udplite_net_exit(struct net *net) 357static void udplite_net_exit(struct net *net)
360{ 358{
361 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite6); 359 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite6);
362 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4); 360 nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite4);
363} 361}
364 362
365static struct pernet_operations udplite_net_ops = { 363static struct pernet_operations udplite_net_ops = {
@@ -371,11 +369,33 @@ static struct pernet_operations udplite_net_ops = {
371 369
372static int __init nf_conntrack_proto_udplite_init(void) 370static int __init nf_conntrack_proto_udplite_init(void)
373{ 371{
374 return register_pernet_subsys(&udplite_net_ops); 372 int ret;
373
374 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);
375 if (ret < 0)
376 goto out_udplite4;
377
378 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite6);
379 if (ret < 0)
380 goto out_udplite6;
381
382 ret = register_pernet_subsys(&udplite_net_ops);
383 if (ret < 0)
384 goto out_pernet;
385
386 return 0;
387out_pernet:
388 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
389out_udplite6:
390 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
391out_udplite4:
392 return ret;
375} 393}
376 394
377static void __exit nf_conntrack_proto_udplite_exit(void) 395static void __exit nf_conntrack_proto_udplite_exit(void)
378{ 396{
397 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
398 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
379 unregister_pernet_subsys(&udplite_net_ops); 399 unregister_pernet_subsys(&udplite_net_ops);
380} 400}
381 401
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 295429f39088..4a2134fd3fcb 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -138,6 +138,7 @@ static int help(struct sk_buff *skb,
138 138
139 exp = nf_ct_expect_alloc(ct); 139 exp = nf_ct_expect_alloc(ct);
140 if (exp == NULL) { 140 if (exp == NULL) {
141 nf_ct_helper_log(skb, ct, "cannot alloc expectation");
141 ret = NF_DROP; 142 ret = NF_DROP;
142 goto out; 143 goto out;
143 } 144 }
@@ -151,8 +152,10 @@ static int help(struct sk_buff *skb,
151 nf_ct_dump_tuple(&exp->tuple); 152 nf_ct_dump_tuple(&exp->tuple);
152 153
153 /* Can't expect this? Best to drop packet now. */ 154 /* Can't expect this? Best to drop packet now. */
154 if (nf_ct_expect_related(exp) != 0) 155 if (nf_ct_expect_related(exp) != 0) {
156 nf_ct_helper_log(skb, ct, "cannot add expectation");
155 ret = NF_DROP; 157 ret = NF_DROP;
158 }
156 159
157 nf_ct_expect_put(exp); 160 nf_ct_expect_put(exp);
158 161
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index df8f4f284481..069229d919b6 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1095,8 +1095,10 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
1095 port = simple_strtoul(*dptr + mediaoff, NULL, 10); 1095 port = simple_strtoul(*dptr + mediaoff, NULL, 10);
1096 if (port == 0) 1096 if (port == 0)
1097 continue; 1097 continue;
1098 if (port < 1024 || port > 65535) 1098 if (port < 1024 || port > 65535) {
1099 nf_ct_helper_log(skb, ct, "wrong port %u", port);
1099 return NF_DROP; 1100 return NF_DROP;
1101 }
1100 1102
1101 /* The media description overrides the session description. */ 1103 /* The media description overrides the session description. */
1102 maddr_len = 0; 1104 maddr_len = 0;
@@ -1107,15 +1109,20 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
1107 memcpy(&rtp_addr, &maddr, sizeof(rtp_addr)); 1109 memcpy(&rtp_addr, &maddr, sizeof(rtp_addr));
1108 } else if (caddr_len) 1110 } else if (caddr_len)
1109 memcpy(&rtp_addr, &caddr, sizeof(rtp_addr)); 1111 memcpy(&rtp_addr, &caddr, sizeof(rtp_addr));
1110 else 1112 else {
1113 nf_ct_helper_log(skb, ct, "cannot parse SDP message");
1111 return NF_DROP; 1114 return NF_DROP;
1115 }
1112 1116
1113 ret = set_expected_rtp_rtcp(skb, protoff, dataoff, 1117 ret = set_expected_rtp_rtcp(skb, protoff, dataoff,
1114 dptr, datalen, 1118 dptr, datalen,
1115 &rtp_addr, htons(port), t->class, 1119 &rtp_addr, htons(port), t->class,
1116 mediaoff, medialen); 1120 mediaoff, medialen);
1117 if (ret != NF_ACCEPT) 1121 if (ret != NF_ACCEPT) {
1122 nf_ct_helper_log(skb, ct,
1123 "cannot add expectation for voice");
1118 return ret; 1124 return ret;
1125 }
1119 1126
1120 /* Update media connection address if present */ 1127 /* Update media connection address if present */
1121 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { 1128 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
@@ -1123,8 +1130,10 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
1123 dptr, datalen, mediaoff, 1130 dptr, datalen, mediaoff,
1124 SDP_HDR_CONNECTION, SDP_HDR_MEDIA, 1131 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
1125 &rtp_addr); 1132 &rtp_addr);
1126 if (ret != NF_ACCEPT) 1133 if (ret != NF_ACCEPT) {
1134 nf_ct_helper_log(skb, ct, "cannot mangle SDP");
1127 return ret; 1135 return ret;
1136 }
1128 } 1137 }
1129 i++; 1138 i++;
1130 } 1139 }
@@ -1258,9 +1267,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
1258 ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, 1267 ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
1259 SIP_HDR_CONTACT, NULL, 1268 SIP_HDR_CONTACT, NULL,
1260 &matchoff, &matchlen, &daddr, &port); 1269 &matchoff, &matchlen, &daddr, &port);
1261 if (ret < 0) 1270 if (ret < 0) {
1271 nf_ct_helper_log(skb, ct, "cannot parse contact");
1262 return NF_DROP; 1272 return NF_DROP;
1263 else if (ret == 0) 1273 } else if (ret == 0)
1264 return NF_ACCEPT; 1274 return NF_ACCEPT;
1265 1275
1266 /* We don't support third-party registrations */ 1276 /* We don't support third-party registrations */
@@ -1273,8 +1283,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
1273 1283
1274 if (ct_sip_parse_numerical_param(ct, *dptr, 1284 if (ct_sip_parse_numerical_param(ct, *dptr,
1275 matchoff + matchlen, *datalen, 1285 matchoff + matchlen, *datalen,
1276 "expires=", NULL, NULL, &expires) < 0) 1286 "expires=", NULL, NULL, &expires) < 0) {
1287 nf_ct_helper_log(skb, ct, "cannot parse expires");
1277 return NF_DROP; 1288 return NF_DROP;
1289 }
1278 1290
1279 if (expires == 0) { 1291 if (expires == 0) {
1280 ret = NF_ACCEPT; 1292 ret = NF_ACCEPT;
@@ -1282,8 +1294,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
1282 } 1294 }
1283 1295
1284 exp = nf_ct_expect_alloc(ct); 1296 exp = nf_ct_expect_alloc(ct);
1285 if (!exp) 1297 if (!exp) {
1298 nf_ct_helper_log(skb, ct, "cannot alloc expectation");
1286 return NF_DROP; 1299 return NF_DROP;
1300 }
1287 1301
1288 saddr = NULL; 1302 saddr = NULL;
1289 if (sip_direct_signalling) 1303 if (sip_direct_signalling)
@@ -1300,9 +1314,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
1300 ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen, 1314 ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen,
1301 exp, matchoff, matchlen); 1315 exp, matchoff, matchlen);
1302 else { 1316 else {
1303 if (nf_ct_expect_related(exp) != 0) 1317 if (nf_ct_expect_related(exp) != 0) {
1318 nf_ct_helper_log(skb, ct, "cannot add expectation");
1304 ret = NF_DROP; 1319 ret = NF_DROP;
1305 else 1320 } else
1306 ret = NF_ACCEPT; 1321 ret = NF_ACCEPT;
1307 } 1322 }
1308 nf_ct_expect_put(exp); 1323 nf_ct_expect_put(exp);
@@ -1356,9 +1371,10 @@ static int process_register_response(struct sk_buff *skb, unsigned int protoff,
1356 SIP_HDR_CONTACT, &in_contact, 1371 SIP_HDR_CONTACT, &in_contact,
1357 &matchoff, &matchlen, 1372 &matchoff, &matchlen,
1358 &addr, &port); 1373 &addr, &port);
1359 if (ret < 0) 1374 if (ret < 0) {
1375 nf_ct_helper_log(skb, ct, "cannot parse contact");
1360 return NF_DROP; 1376 return NF_DROP;
1361 else if (ret == 0) 1377 } else if (ret == 0)
1362 break; 1378 break;
1363 1379
1364 /* We don't support third-party registrations */ 1380 /* We don't support third-party registrations */
@@ -1373,8 +1389,10 @@ static int process_register_response(struct sk_buff *skb, unsigned int protoff,
1373 matchoff + matchlen, 1389 matchoff + matchlen,
1374 *datalen, "expires=", 1390 *datalen, "expires=",
1375 NULL, NULL, &c_expires); 1391 NULL, NULL, &c_expires);
1376 if (ret < 0) 1392 if (ret < 0) {
1393 nf_ct_helper_log(skb, ct, "cannot parse expires");
1377 return NF_DROP; 1394 return NF_DROP;
1395 }
1378 if (c_expires == 0) 1396 if (c_expires == 0)
1379 break; 1397 break;
1380 if (refresh_signalling_expectation(ct, &addr, proto, port, 1398 if (refresh_signalling_expectation(ct, &addr, proto, port,
@@ -1408,15 +1426,21 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
1408 if (*datalen < strlen("SIP/2.0 200")) 1426 if (*datalen < strlen("SIP/2.0 200"))
1409 return NF_ACCEPT; 1427 return NF_ACCEPT;
1410 code = simple_strtoul(*dptr + strlen("SIP/2.0 "), NULL, 10); 1428 code = simple_strtoul(*dptr + strlen("SIP/2.0 "), NULL, 10);
1411 if (!code) 1429 if (!code) {
1430 nf_ct_helper_log(skb, ct, "cannot get code");
1412 return NF_DROP; 1431 return NF_DROP;
1432 }
1413 1433
1414 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, 1434 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
1415 &matchoff, &matchlen) <= 0) 1435 &matchoff, &matchlen) <= 0) {
1436 nf_ct_helper_log(skb, ct, "cannot parse cseq");
1416 return NF_DROP; 1437 return NF_DROP;
1438 }
1417 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1439 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1418 if (!cseq) 1440 if (!cseq) {
1441 nf_ct_helper_log(skb, ct, "cannot get cseq");
1419 return NF_DROP; 1442 return NF_DROP;
1443 }
1420 matchend = matchoff + matchlen + 1; 1444 matchend = matchoff + matchlen + 1;
1421 1445
1422 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1446 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
@@ -1440,8 +1464,25 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1440{ 1464{
1441 enum ip_conntrack_info ctinfo; 1465 enum ip_conntrack_info ctinfo;
1442 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1466 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1467 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1468 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1443 unsigned int matchoff, matchlen; 1469 unsigned int matchoff, matchlen;
1444 unsigned int cseq, i; 1470 unsigned int cseq, i;
1471 union nf_inet_addr addr;
1472 __be16 port;
1473
1474 /* Many Cisco IP phones use a high source port for SIP requests, but
1475 * listen for the response on port 5060. If we are the local
1476 * router for one of these phones, save the port number from the
1477 * Via: header so that nf_nat_sip can redirect the responses to
1478 * the correct port.
1479 */
1480 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
1481 SIP_HDR_VIA_UDP, NULL, &matchoff,
1482 &matchlen, &addr, &port) > 0 &&
1483 port != ct->tuplehash[dir].tuple.src.u.udp.port &&
1484 nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3))
1485 ct_sip_info->forced_dport = port;
1445 1486
1446 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1487 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1447 const struct sip_handler *handler; 1488 const struct sip_handler *handler;
@@ -1454,11 +1495,15 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1454 continue; 1495 continue;
1455 1496
1456 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, 1497 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
1457 &matchoff, &matchlen) <= 0) 1498 &matchoff, &matchlen) <= 0) {
1499 nf_ct_helper_log(skb, ct, "cannot parse cseq");
1458 return NF_DROP; 1500 return NF_DROP;
1501 }
1459 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1502 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1460 if (!cseq) 1503 if (!cseq) {
1504 nf_ct_helper_log(skb, ct, "cannot get cseq");
1461 return NF_DROP; 1505 return NF_DROP;
1506 }
1462 1507
1463 return handler->request(skb, protoff, dataoff, dptr, datalen, 1508 return handler->request(skb, protoff, dataoff, dptr, datalen,
1464 cseq); 1509 cseq);
@@ -1481,8 +1526,10 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
1481 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { 1526 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1482 nf_nat_sip = rcu_dereference(nf_nat_sip_hook); 1527 nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
1483 if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff, 1528 if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff,
1484 dptr, datalen)) 1529 dptr, datalen)) {
1530 nf_ct_helper_log(skb, ct, "cannot NAT SIP message");
1485 ret = NF_DROP; 1531 ret = NF_DROP;
1532 }
1486 } 1533 }
1487 1534
1488 return ret; 1535 return ret;
@@ -1546,11 +1593,14 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1546 end += strlen("\r\n\r\n") + clen; 1593 end += strlen("\r\n\r\n") + clen;
1547 1594
1548 msglen = origlen = end - dptr; 1595 msglen = origlen = end - dptr;
1549 if (msglen > datalen) 1596 if (msglen > datalen) {
1597 nf_ct_helper_log(skb, ct, "incomplete/bad SIP message");
1550 return NF_DROP; 1598 return NF_DROP;
1599 }
1551 1600
1552 ret = process_sip_msg(skb, ct, protoff, dataoff, 1601 ret = process_sip_msg(skb, ct, protoff, dataoff,
1553 &dptr, &msglen); 1602 &dptr, &msglen);
1603 /* process_sip_* functions report why this packet is dropped */
1554 if (ret != NF_ACCEPT) 1604 if (ret != NF_ACCEPT)
1555 break; 1605 break;
1556 diff = msglen - origlen; 1606 diff = msglen - origlen;
diff --git a/net/netfilter/nf_conntrack_snmp.c b/net/netfilter/nf_conntrack_snmp.c
index 6e545e26289e..87b95a2c270c 100644
--- a/net/netfilter/nf_conntrack_snmp.c
+++ b/net/netfilter/nf_conntrack_snmp.c
@@ -16,6 +16,7 @@
16#include <net/netfilter/nf_conntrack.h> 16#include <net/netfilter/nf_conntrack.h>
17#include <net/netfilter/nf_conntrack_helper.h> 17#include <net/netfilter/nf_conntrack_helper.h>
18#include <net/netfilter/nf_conntrack_expect.h> 18#include <net/netfilter/nf_conntrack_expect.h>
19#include <linux/netfilter/nf_conntrack_snmp.h>
19 20
20#define SNMP_PORT 161 21#define SNMP_PORT 161
21 22
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 363285d544a1..6bcce401fd1c 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -366,7 +366,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
366{ 366{
367 struct proc_dir_entry *pde; 367 struct proc_dir_entry *pde;
368 368
369 pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops); 369 pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops);
370 if (!pde) 370 if (!pde)
371 goto out_nf_conntrack; 371 goto out_nf_conntrack;
372 372
@@ -377,7 +377,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
377 return 0; 377 return 0;
378 378
379out_stat_nf_conntrack: 379out_stat_nf_conntrack:
380 proc_net_remove(net, "nf_conntrack"); 380 remove_proc_entry("nf_conntrack", net->proc_net);
381out_nf_conntrack: 381out_nf_conntrack:
382 return -ENOMEM; 382 return -ENOMEM;
383} 383}
@@ -385,7 +385,7 @@ out_nf_conntrack:
385static void nf_conntrack_standalone_fini_proc(struct net *net) 385static void nf_conntrack_standalone_fini_proc(struct net *net)
386{ 386{
387 remove_proc_entry("nf_conntrack", net->proc_net_stat); 387 remove_proc_entry("nf_conntrack", net->proc_net_stat);
388 proc_net_remove(net, "nf_conntrack"); 388 remove_proc_entry("nf_conntrack", net->proc_net);
389} 389}
390#else 390#else
391static int nf_conntrack_standalone_init_proc(struct net *net) 391static int nf_conntrack_standalone_init_proc(struct net *net)
@@ -472,13 +472,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
472{ 472{
473 struct ctl_table *table; 473 struct ctl_table *table;
474 474
475 if (net_eq(net, &init_net)) {
476 nf_ct_netfilter_header =
477 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
478 if (!nf_ct_netfilter_header)
479 goto out;
480 }
481
482 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table), 475 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
483 GFP_KERNEL); 476 GFP_KERNEL);
484 if (!table) 477 if (!table)
@@ -502,10 +495,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
502out_unregister_netfilter: 495out_unregister_netfilter:
503 kfree(table); 496 kfree(table);
504out_kmemdup: 497out_kmemdup:
505 if (net_eq(net, &init_net))
506 unregister_net_sysctl_table(nf_ct_netfilter_header);
507out:
508 printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
509 return -ENOMEM; 498 return -ENOMEM;
510} 499}
511 500
@@ -513,8 +502,6 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net)
513{ 502{
514 struct ctl_table *table; 503 struct ctl_table *table;
515 504
516 if (net_eq(net, &init_net))
517 unregister_net_sysctl_table(nf_ct_netfilter_header);
518 table = net->ct.sysctl_header->ctl_table_arg; 505 table = net->ct.sysctl_header->ctl_table_arg;
519 unregister_net_sysctl_table(net->ct.sysctl_header); 506 unregister_net_sysctl_table(net->ct.sysctl_header);
520 kfree(table); 507 kfree(table);
@@ -530,51 +517,86 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net)
530} 517}
531#endif /* CONFIG_SYSCTL */ 518#endif /* CONFIG_SYSCTL */
532 519
533static int nf_conntrack_net_init(struct net *net) 520static int nf_conntrack_pernet_init(struct net *net)
534{ 521{
535 int ret; 522 int ret;
536 523
537 ret = nf_conntrack_init(net); 524 ret = nf_conntrack_init_net(net);
538 if (ret < 0) 525 if (ret < 0)
539 goto out_init; 526 goto out_init;
527
540 ret = nf_conntrack_standalone_init_proc(net); 528 ret = nf_conntrack_standalone_init_proc(net);
541 if (ret < 0) 529 if (ret < 0)
542 goto out_proc; 530 goto out_proc;
531
543 net->ct.sysctl_checksum = 1; 532 net->ct.sysctl_checksum = 1;
544 net->ct.sysctl_log_invalid = 0; 533 net->ct.sysctl_log_invalid = 0;
545 ret = nf_conntrack_standalone_init_sysctl(net); 534 ret = nf_conntrack_standalone_init_sysctl(net);
546 if (ret < 0) 535 if (ret < 0)
547 goto out_sysctl; 536 goto out_sysctl;
537
548 return 0; 538 return 0;
549 539
550out_sysctl: 540out_sysctl:
551 nf_conntrack_standalone_fini_proc(net); 541 nf_conntrack_standalone_fini_proc(net);
552out_proc: 542out_proc:
553 nf_conntrack_cleanup(net); 543 nf_conntrack_cleanup_net(net);
554out_init: 544out_init:
555 return ret; 545 return ret;
556} 546}
557 547
558static void nf_conntrack_net_exit(struct net *net) 548static void nf_conntrack_pernet_exit(struct net *net)
559{ 549{
560 nf_conntrack_standalone_fini_sysctl(net); 550 nf_conntrack_standalone_fini_sysctl(net);
561 nf_conntrack_standalone_fini_proc(net); 551 nf_conntrack_standalone_fini_proc(net);
562 nf_conntrack_cleanup(net); 552 nf_conntrack_cleanup_net(net);
563} 553}
564 554
565static struct pernet_operations nf_conntrack_net_ops = { 555static struct pernet_operations nf_conntrack_net_ops = {
566 .init = nf_conntrack_net_init, 556 .init = nf_conntrack_pernet_init,
567 .exit = nf_conntrack_net_exit, 557 .exit = nf_conntrack_pernet_exit,
568}; 558};
569 559
570static int __init nf_conntrack_standalone_init(void) 560static int __init nf_conntrack_standalone_init(void)
571{ 561{
572 return register_pernet_subsys(&nf_conntrack_net_ops); 562 int ret = nf_conntrack_init_start();
563 if (ret < 0)
564 goto out_start;
565
566#ifdef CONFIG_SYSCTL
567 nf_ct_netfilter_header =
568 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
569 if (!nf_ct_netfilter_header) {
570 pr_err("nf_conntrack: can't register to sysctl.\n");
571 goto out_sysctl;
572 }
573#endif
574
575 ret = register_pernet_subsys(&nf_conntrack_net_ops);
576 if (ret < 0)
577 goto out_pernet;
578
579 nf_conntrack_init_end();
580 return 0;
581
582out_pernet:
583#ifdef CONFIG_SYSCTL
584 unregister_net_sysctl_table(nf_ct_netfilter_header);
585out_sysctl:
586#endif
587 nf_conntrack_cleanup_end();
588out_start:
589 return ret;
573} 590}
574 591
575static void __exit nf_conntrack_standalone_fini(void) 592static void __exit nf_conntrack_standalone_fini(void)
576{ 593{
594 nf_conntrack_cleanup_start();
577 unregister_pernet_subsys(&nf_conntrack_net_ops); 595 unregister_pernet_subsys(&nf_conntrack_net_ops);
596#ifdef CONFIG_SYSCTL
597 unregister_net_sysctl_table(nf_ct_netfilter_header);
598#endif
599 nf_conntrack_cleanup_end();
578} 600}
579 601
580module_init(nf_conntrack_standalone_init); 602module_init(nf_conntrack_standalone_init);
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 81fc61c05263..e9936c830208 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -60,8 +60,10 @@ static int tftp_help(struct sk_buff *skb,
60 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 60 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
61 61
62 exp = nf_ct_expect_alloc(ct); 62 exp = nf_ct_expect_alloc(ct);
63 if (exp == NULL) 63 if (exp == NULL) {
64 nf_ct_helper_log(skb, ct, "cannot alloc expectation");
64 return NF_DROP; 65 return NF_DROP;
66 }
65 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 67 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
66 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, 68 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
67 nf_ct_l3num(ct), 69 nf_ct_l3num(ct),
@@ -74,8 +76,10 @@ static int tftp_help(struct sk_buff *skb,
74 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); 76 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
75 if (nf_nat_tftp && ct->status & IPS_NAT_MASK) 77 if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
76 ret = nf_nat_tftp(skb, ctinfo, exp); 78 ret = nf_nat_tftp(skb, ctinfo, exp);
77 else if (nf_ct_expect_related(exp) != 0) 79 else if (nf_ct_expect_related(exp) != 0) {
80 nf_ct_helper_log(skb, ct, "cannot add expectation");
78 ret = NF_DROP; 81 ret = NF_DROP;
82 }
79 nf_ct_expect_put(exp); 83 nf_ct_expect_put(exp);
80 break; 84 break;
81 case TFTP_OPCODE_DATA: 85 case TFTP_OPCODE_DATA:
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
index a878ce5b252c..93da609d9d29 100644
--- a/net/netfilter/nf_conntrack_timeout.c
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -37,24 +37,15 @@ static struct nf_ct_ext_type timeout_extend __read_mostly = {
37 .id = NF_CT_EXT_TIMEOUT, 37 .id = NF_CT_EXT_TIMEOUT,
38}; 38};
39 39
40int nf_conntrack_timeout_init(struct net *net) 40int nf_conntrack_timeout_init(void)
41{ 41{
42 int ret = 0; 42 int ret = nf_ct_extend_register(&timeout_extend);
43 43 if (ret < 0)
44 if (net_eq(net, &init_net)) { 44 pr_err("nf_ct_timeout: Unable to register timeout extension.\n");
45 ret = nf_ct_extend_register(&timeout_extend); 45 return ret;
46 if (ret < 0) {
47 printk(KERN_ERR "nf_ct_timeout: Unable to register "
48 "timeout extension.\n");
49 return ret;
50 }
51 }
52
53 return 0;
54} 46}
55 47
56void nf_conntrack_timeout_fini(struct net *net) 48void nf_conntrack_timeout_fini(void)
57{ 49{
58 if (net_eq(net, &init_net)) 50 nf_ct_extend_unregister(&timeout_extend);
59 nf_ct_extend_unregister(&timeout_extend);
60} 51}
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index 7ea8026f07c9..902fb0a6b38a 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -88,37 +88,28 @@ static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
88} 88}
89#endif 89#endif
90 90
91int nf_conntrack_tstamp_init(struct net *net) 91int nf_conntrack_tstamp_pernet_init(struct net *net)
92{ 92{
93 int ret;
94
95 net->ct.sysctl_tstamp = nf_ct_tstamp; 93 net->ct.sysctl_tstamp = nf_ct_tstamp;
94 return nf_conntrack_tstamp_init_sysctl(net);
95}
96 96
97 if (net_eq(net, &init_net)) { 97void nf_conntrack_tstamp_pernet_fini(struct net *net)
98 ret = nf_ct_extend_register(&tstamp_extend); 98{
99 if (ret < 0) { 99 nf_conntrack_tstamp_fini_sysctl(net);
100 printk(KERN_ERR "nf_ct_tstamp: Unable to register " 100 nf_ct_extend_unregister(&tstamp_extend);
101 "extension\n"); 101}
102 goto out_extend_register;
103 }
104 }
105 102
106 ret = nf_conntrack_tstamp_init_sysctl(net); 103int nf_conntrack_tstamp_init(void)
104{
105 int ret;
106 ret = nf_ct_extend_register(&tstamp_extend);
107 if (ret < 0) 107 if (ret < 0)
108 goto out_sysctl; 108 pr_err("nf_ct_tstamp: Unable to register extension\n");
109
110 return 0;
111
112out_sysctl:
113 if (net_eq(net, &init_net))
114 nf_ct_extend_unregister(&tstamp_extend);
115out_extend_register:
116 return ret; 109 return ret;
117} 110}
118 111
119void nf_conntrack_tstamp_fini(struct net *net) 112void nf_conntrack_tstamp_fini(void)
120{ 113{
121 nf_conntrack_tstamp_fini_sysctl(net); 114 nf_ct_extend_unregister(&tstamp_extend);
122 if (net_eq(net, &init_net))
123 nf_ct_extend_unregister(&tstamp_extend);
124} 115}
diff --git a/net/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c
index 42d337881171..3b67c9d11273 100644
--- a/net/netfilter/nf_nat_amanda.c
+++ b/net/netfilter/nf_nat_amanda.c
@@ -56,15 +56,19 @@ static unsigned int help(struct sk_buff *skb,
56 } 56 }
57 } 57 }
58 58
59 if (port == 0) 59 if (port == 0) {
60 nf_ct_helper_log(skb, exp->master, "all ports in use");
60 return NF_DROP; 61 return NF_DROP;
62 }
61 63
62 sprintf(buffer, "%u", port); 64 sprintf(buffer, "%u", port);
63 ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo, 65 ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo,
64 protoff, matchoff, matchlen, 66 protoff, matchoff, matchlen,
65 buffer, strlen(buffer)); 67 buffer, strlen(buffer));
66 if (ret != NF_ACCEPT) 68 if (ret != NF_ACCEPT) {
69 nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
67 nf_ct_unexpect_related(exp); 70 nf_ct_unexpect_related(exp);
71 }
68 return ret; 72 return ret;
69} 73}
70 74
diff --git a/net/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c
index e839b97b2863..e84a578dbe35 100644
--- a/net/netfilter/nf_nat_ftp.c
+++ b/net/netfilter/nf_nat_ftp.c
@@ -96,8 +96,10 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
96 } 96 }
97 } 97 }
98 98
99 if (port == 0) 99 if (port == 0) {
100 nf_ct_helper_log(skb, ct, "all ports in use");
100 return NF_DROP; 101 return NF_DROP;
102 }
101 103
102 buflen = nf_nat_ftp_fmt_cmd(ct, type, buffer, sizeof(buffer), 104 buflen = nf_nat_ftp_fmt_cmd(ct, type, buffer, sizeof(buffer),
103 &newaddr, port); 105 &newaddr, port);
@@ -113,6 +115,7 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
113 return NF_ACCEPT; 115 return NF_ACCEPT;
114 116
115out: 117out:
118 nf_ct_helper_log(skb, ct, "cannot mangle packet");
116 nf_ct_unexpect_related(exp); 119 nf_ct_unexpect_related(exp);
117 return NF_DROP; 120 return NF_DROP;
118} 121}
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index 1fedee6e7fb6..f02b3605823e 100644
--- a/net/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -56,14 +56,18 @@ static unsigned int help(struct sk_buff *skb,
56 } 56 }
57 } 57 }
58 58
59 if (port == 0) 59 if (port == 0) {
60 nf_ct_helper_log(skb, exp->master, "all ports in use");
60 return NF_DROP; 61 return NF_DROP;
62 }
61 63
62 ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, 64 ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
63 protoff, matchoff, matchlen, buffer, 65 protoff, matchoff, matchlen, buffer,
64 strlen(buffer)); 66 strlen(buffer));
65 if (ret != NF_ACCEPT) 67 if (ret != NF_ACCEPT) {
68 nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
66 nf_ct_unexpect_related(exp); 69 nf_ct_unexpect_related(exp);
70 }
67 return ret; 71 return ret;
68} 72}
69 73
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index 16303c752213..96ccdf78a29f 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -95,6 +95,7 @@ static int map_addr(struct sk_buff *skb, unsigned int protoff,
95 enum ip_conntrack_info ctinfo; 95 enum ip_conntrack_info ctinfo;
96 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 96 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
97 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 97 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
98 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
98 char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")]; 99 char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
99 unsigned int buflen; 100 unsigned int buflen;
100 union nf_inet_addr newaddr; 101 union nf_inet_addr newaddr;
@@ -107,7 +108,8 @@ static int map_addr(struct sk_buff *skb, unsigned int protoff,
107 } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) && 108 } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) &&
108 ct->tuplehash[dir].tuple.dst.u.udp.port == port) { 109 ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
109 newaddr = ct->tuplehash[!dir].tuple.src.u3; 110 newaddr = ct->tuplehash[!dir].tuple.src.u3;
110 newport = ct->tuplehash[!dir].tuple.src.u.udp.port; 111 newport = ct_sip_info->forced_dport ? :
112 ct->tuplehash[!dir].tuple.src.u.udp.port;
111 } else 113 } else
112 return 1; 114 return 1;
113 115
@@ -144,6 +146,7 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
144 enum ip_conntrack_info ctinfo; 146 enum ip_conntrack_info ctinfo;
145 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 147 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
146 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 148 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
149 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
147 unsigned int coff, matchoff, matchlen; 150 unsigned int coff, matchoff, matchlen;
148 enum sip_header_types hdr; 151 enum sip_header_types hdr;
149 union nf_inet_addr addr; 152 union nf_inet_addr addr;
@@ -156,8 +159,10 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
156 &matchoff, &matchlen, 159 &matchoff, &matchlen,
157 &addr, &port) > 0 && 160 &addr, &port) > 0 &&
158 !map_addr(skb, protoff, dataoff, dptr, datalen, 161 !map_addr(skb, protoff, dataoff, dptr, datalen,
159 matchoff, matchlen, &addr, port)) 162 matchoff, matchlen, &addr, port)) {
163 nf_ct_helper_log(skb, ct, "cannot mangle SIP message");
160 return NF_DROP; 164 return NF_DROP;
165 }
161 request = 1; 166 request = 1;
162 } else 167 } else
163 request = 0; 168 request = 0;
@@ -190,8 +195,10 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
190 195
191 olen = *datalen; 196 olen = *datalen;
192 if (!map_addr(skb, protoff, dataoff, dptr, datalen, 197 if (!map_addr(skb, protoff, dataoff, dptr, datalen,
193 matchoff, matchlen, &addr, port)) 198 matchoff, matchlen, &addr, port)) {
199 nf_ct_helper_log(skb, ct, "cannot mangle Via header");
194 return NF_DROP; 200 return NF_DROP;
201 }
195 202
196 matchend = matchoff + matchlen + *datalen - olen; 203 matchend = matchoff + matchlen + *datalen - olen;
197 204
@@ -206,8 +213,10 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
206 &ct->tuplehash[!dir].tuple.dst.u3, 213 &ct->tuplehash[!dir].tuple.dst.u3,
207 true); 214 true);
208 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, 215 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
209 poff, plen, buffer, buflen)) 216 poff, plen, buffer, buflen)) {
217 nf_ct_helper_log(skb, ct, "cannot mangle maddr");
210 return NF_DROP; 218 return NF_DROP;
219 }
211 } 220 }
212 221
213 /* The received= parameter (RFC 2361) contains the address 222 /* The received= parameter (RFC 2361) contains the address
@@ -222,6 +231,7 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
222 false); 231 false);
223 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, 232 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
224 poff, plen, buffer, buflen)) 233 poff, plen, buffer, buflen))
234 nf_ct_helper_log(skb, ct, "cannot mangle received");
225 return NF_DROP; 235 return NF_DROP;
226 } 236 }
227 237
@@ -235,8 +245,10 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
235 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; 245 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
236 buflen = sprintf(buffer, "%u", ntohs(p)); 246 buflen = sprintf(buffer, "%u", ntohs(p));
237 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, 247 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
238 poff, plen, buffer, buflen)) 248 poff, plen, buffer, buflen)) {
249 nf_ct_helper_log(skb, ct, "cannot mangle rport");
239 return NF_DROP; 250 return NF_DROP;
251 }
240 } 252 }
241 } 253 }
242 254
@@ -250,13 +262,36 @@ next:
250 &addr, &port) > 0) { 262 &addr, &port) > 0) {
251 if (!map_addr(skb, protoff, dataoff, dptr, datalen, 263 if (!map_addr(skb, protoff, dataoff, dptr, datalen,
252 matchoff, matchlen, 264 matchoff, matchlen,
253 &addr, port)) 265 &addr, port)) {
266 nf_ct_helper_log(skb, ct, "cannot mangle contact");
254 return NF_DROP; 267 return NF_DROP;
268 }
255 } 269 }
256 270
257 if (!map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_FROM) || 271 if (!map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_FROM) ||
258 !map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_TO)) 272 !map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_TO)) {
273 nf_ct_helper_log(skb, ct, "cannot mangle SIP from/to");
259 return NF_DROP; 274 return NF_DROP;
275 }
276
277 /* Mangle destination port for Cisco phones, then fix up checksums */
278 if (dir == IP_CT_DIR_REPLY && ct_sip_info->forced_dport) {
279 struct udphdr *uh;
280
281 if (!skb_make_writable(skb, skb->len)) {
282 nf_ct_helper_log(skb, ct, "cannot mangle packet");
283 return NF_DROP;
284 }
285
286 uh = (void *)skb->data + protoff;
287 uh->dest = ct_sip_info->forced_dport;
288
289 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, protoff,
290 0, 0, NULL, 0)) {
291 nf_ct_helper_log(skb, ct, "cannot mangle packet");
292 return NF_DROP;
293 }
294 }
260 295
261 return NF_ACCEPT; 296 return NF_ACCEPT;
262} 297}
@@ -311,8 +346,10 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
311 enum ip_conntrack_info ctinfo; 346 enum ip_conntrack_info ctinfo;
312 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 347 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
313 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 348 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
349 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
314 union nf_inet_addr newaddr; 350 union nf_inet_addr newaddr;
315 u_int16_t port; 351 u_int16_t port;
352 __be16 srcport;
316 char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")]; 353 char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
317 unsigned int buflen; 354 unsigned int buflen;
318 355
@@ -326,8 +363,9 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
326 /* If the signalling port matches the connection's source port in the 363 /* If the signalling port matches the connection's source port in the
327 * original direction, try to use the destination port in the opposite 364 * original direction, try to use the destination port in the opposite
328 * direction. */ 365 * direction. */
329 if (exp->tuple.dst.u.udp.port == 366 srcport = ct_sip_info->forced_dport ? :
330 ct->tuplehash[dir].tuple.src.u.udp.port) 367 ct->tuplehash[dir].tuple.src.u.udp.port;
368 if (exp->tuple.dst.u.udp.port == srcport)
331 port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port); 369 port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
332 else 370 else
333 port = ntohs(exp->tuple.dst.u.udp.port); 371 port = ntohs(exp->tuple.dst.u.udp.port);
@@ -351,15 +389,19 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
351 } 389 }
352 } 390 }
353 391
354 if (port == 0) 392 if (port == 0) {
393 nf_ct_helper_log(skb, ct, "all ports in use for SIP");
355 return NF_DROP; 394 return NF_DROP;
395 }
356 396
357 if (!nf_inet_addr_cmp(&exp->tuple.dst.u3, &exp->saved_addr) || 397 if (!nf_inet_addr_cmp(&exp->tuple.dst.u3, &exp->saved_addr) ||
358 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { 398 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
359 buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, port); 399 buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, port);
360 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, 400 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
361 matchoff, matchlen, buffer, buflen)) 401 matchoff, matchlen, buffer, buflen)) {
402 nf_ct_helper_log(skb, ct, "cannot mangle packet");
362 goto err; 403 goto err;
404 }
363 } 405 }
364 return NF_ACCEPT; 406 return NF_ACCEPT;
365 407
@@ -552,14 +594,18 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
552 } 594 }
553 } 595 }
554 596
555 if (port == 0) 597 if (port == 0) {
598 nf_ct_helper_log(skb, ct, "all ports in use for SDP media");
556 goto err1; 599 goto err1;
600 }
557 601
558 /* Update media port. */ 602 /* Update media port. */
559 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port && 603 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
560 !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen, 604 !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
561 mediaoff, medialen, port)) 605 mediaoff, medialen, port)) {
606 nf_ct_helper_log(skb, ct, "cannot mangle SDP message");
562 goto err2; 607 goto err2;
608 }
563 609
564 return NF_ACCEPT; 610 return NF_ACCEPT;
565 611
diff --git a/net/netfilter/nf_nat_tftp.c b/net/netfilter/nf_nat_tftp.c
index ccabbda71a3e..7f67e1d5310d 100644
--- a/net/netfilter/nf_nat_tftp.c
+++ b/net/netfilter/nf_nat_tftp.c
@@ -28,8 +28,10 @@ static unsigned int help(struct sk_buff *skb,
28 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; 28 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
29 exp->dir = IP_CT_DIR_REPLY; 29 exp->dir = IP_CT_DIR_REPLY;
30 exp->expectfn = nf_nat_follow_master; 30 exp->expectfn = nf_nat_follow_master;
31 if (nf_ct_expect_related(exp) != 0) 31 if (nf_ct_expect_related(exp) != 0) {
32 nf_ct_helper_log(skb, exp->master, "cannot add expectation");
32 return NF_DROP; 33 return NF_DROP;
34 }
33 return NF_ACCEPT; 35 return NF_ACCEPT;
34} 36}
35 37
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 58a09b7c3f6d..d578ec251712 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -36,8 +36,10 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
36 36
37static char __initdata nfversion[] = "0.30"; 37static char __initdata nfversion[] = "0.30";
38 38
39static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT]; 39static struct {
40static DEFINE_MUTEX(nfnl_mutex); 40 struct mutex mutex;
41 const struct nfnetlink_subsystem __rcu *subsys;
42} table[NFNL_SUBSYS_COUNT];
41 43
42static const int nfnl_group2type[NFNLGRP_MAX+1] = { 44static const int nfnl_group2type[NFNLGRP_MAX+1] = {
43 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK, 45 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
@@ -48,27 +50,32 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = {
48 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, 50 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
49}; 51};
50 52
51void nfnl_lock(void) 53void nfnl_lock(__u8 subsys_id)
52{ 54{
53 mutex_lock(&nfnl_mutex); 55 mutex_lock(&table[subsys_id].mutex);
54} 56}
55EXPORT_SYMBOL_GPL(nfnl_lock); 57EXPORT_SYMBOL_GPL(nfnl_lock);
56 58
57void nfnl_unlock(void) 59void nfnl_unlock(__u8 subsys_id)
58{ 60{
59 mutex_unlock(&nfnl_mutex); 61 mutex_unlock(&table[subsys_id].mutex);
60} 62}
61EXPORT_SYMBOL_GPL(nfnl_unlock); 63EXPORT_SYMBOL_GPL(nfnl_unlock);
62 64
65static struct mutex *nfnl_get_lock(__u8 subsys_id)
66{
67 return &table[subsys_id].mutex;
68}
69
63int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 70int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
64{ 71{
65 nfnl_lock(); 72 nfnl_lock(n->subsys_id);
66 if (subsys_table[n->subsys_id]) { 73 if (table[n->subsys_id].subsys) {
67 nfnl_unlock(); 74 nfnl_unlock(n->subsys_id);
68 return -EBUSY; 75 return -EBUSY;
69 } 76 }
70 rcu_assign_pointer(subsys_table[n->subsys_id], n); 77 rcu_assign_pointer(table[n->subsys_id].subsys, n);
71 nfnl_unlock(); 78 nfnl_unlock(n->subsys_id);
72 79
73 return 0; 80 return 0;
74} 81}
@@ -76,9 +83,9 @@ EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
76 83
77int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) 84int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
78{ 85{
79 nfnl_lock(); 86 nfnl_lock(n->subsys_id);
80 subsys_table[n->subsys_id] = NULL; 87 table[n->subsys_id].subsys = NULL;
81 nfnl_unlock(); 88 nfnl_unlock(n->subsys_id);
82 synchronize_rcu(); 89 synchronize_rcu();
83 return 0; 90 return 0;
84} 91}
@@ -91,7 +98,7 @@ static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t t
91 if (subsys_id >= NFNL_SUBSYS_COUNT) 98 if (subsys_id >= NFNL_SUBSYS_COUNT)
92 return NULL; 99 return NULL;
93 100
94 return rcu_dereference(subsys_table[subsys_id]); 101 return rcu_dereference(table[subsys_id].subsys);
95} 102}
96 103
97static inline const struct nfnl_callback * 104static inline const struct nfnl_callback *
@@ -175,6 +182,7 @@ replay:
175 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; 182 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
176 struct nlattr *attr = (void *)nlh + min_len; 183 struct nlattr *attr = (void *)nlh + min_len;
177 int attrlen = nlh->nlmsg_len - min_len; 184 int attrlen = nlh->nlmsg_len - min_len;
185 __u8 subsys_id = NFNL_SUBSYS_ID(type);
178 186
179 err = nla_parse(cda, ss->cb[cb_id].attr_count, 187 err = nla_parse(cda, ss->cb[cb_id].attr_count,
180 attr, attrlen, ss->cb[cb_id].policy); 188 attr, attrlen, ss->cb[cb_id].policy);
@@ -189,10 +197,9 @@ replay:
189 rcu_read_unlock(); 197 rcu_read_unlock();
190 } else { 198 } else {
191 rcu_read_unlock(); 199 rcu_read_unlock();
192 nfnl_lock(); 200 nfnl_lock(subsys_id);
193 if (rcu_dereference_protected( 201 if (rcu_dereference_protected(table[subsys_id].subsys,
194 subsys_table[NFNL_SUBSYS_ID(type)], 202 lockdep_is_held(nfnl_get_lock(subsys_id))) != ss ||
195 lockdep_is_held(&nfnl_mutex)) != ss ||
196 nfnetlink_find_client(type, ss) != nc) 203 nfnetlink_find_client(type, ss) != nc)
197 err = -EAGAIN; 204 err = -EAGAIN;
198 else if (nc->call) 205 else if (nc->call)
@@ -200,7 +207,7 @@ replay:
200 (const struct nlattr **)cda); 207 (const struct nlattr **)cda);
201 else 208 else
202 err = -EINVAL; 209 err = -EINVAL;
203 nfnl_unlock(); 210 nfnl_unlock(subsys_id);
204 } 211 }
205 if (err == -EAGAIN) 212 if (err == -EAGAIN)
206 goto replay; 213 goto replay;
@@ -267,6 +274,11 @@ static struct pernet_operations nfnetlink_net_ops = {
267 274
268static int __init nfnetlink_init(void) 275static int __init nfnetlink_init(void)
269{ 276{
277 int i;
278
279 for (i=0; i<NFNL_SUBSYS_COUNT; i++)
280 mutex_init(&table[i].mutex);
281
270 pr_info("Netfilter messages via NETLINK v%s.\n", nfversion); 282 pr_info("Netfilter messages via NETLINK v%s.\n", nfversion);
271 return register_pernet_subsys(&nfnetlink_net_ops); 283 return register_pernet_subsys(&nfnetlink_net_ops);
272} 284}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 9f199f2e31fa..92fd8eca0d31 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/if_arp.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/ip.h> 18#include <linux/ip.h>
18#include <linux/ipv6.h> 19#include <linux/ipv6.h>
@@ -384,6 +385,7 @@ __build_packet_message(struct nfulnl_instance *inst,
384 struct nfgenmsg *nfmsg; 385 struct nfgenmsg *nfmsg;
385 sk_buff_data_t old_tail = inst->skb->tail; 386 sk_buff_data_t old_tail = inst->skb->tail;
386 struct sock *sk; 387 struct sock *sk;
388 const unsigned char *hwhdrp;
387 389
388 nlh = nlmsg_put(inst->skb, 0, 0, 390 nlh = nlmsg_put(inst->skb, 0, 0,
389 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, 391 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
@@ -485,9 +487,17 @@ __build_packet_message(struct nfulnl_instance *inst,
485 if (indev && skb_mac_header_was_set(skb)) { 487 if (indev && skb_mac_header_was_set(skb)) {
486 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || 488 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
487 nla_put_be16(inst->skb, NFULA_HWLEN, 489 nla_put_be16(inst->skb, NFULA_HWLEN,
488 htons(skb->dev->hard_header_len)) || 490 htons(skb->dev->hard_header_len)))
489 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, 491 goto nla_put_failure;
490 skb_mac_header(skb))) 492
493 hwhdrp = skb_mac_header(skb);
494
495 if (skb->dev->type == ARPHRD_SIT)
496 hwhdrp -= ETH_HLEN;
497
498 if (hwhdrp >= skb->head &&
499 nla_put(inst->skb, NFULA_HWHEADER,
500 skb->dev->hard_header_len, hwhdrp))
491 goto nla_put_failure; 501 goto nla_put_failure;
492 } 502 }
493 503
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8d987c3573fd..686c7715d777 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -345,19 +345,27 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
345} 345}
346EXPORT_SYMBOL_GPL(xt_find_revision); 346EXPORT_SYMBOL_GPL(xt_find_revision);
347 347
348static char *textify_hooks(char *buf, size_t size, unsigned int mask) 348static char *
349textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
349{ 350{
350 static const char *const names[] = { 351 static const char *const inetbr_names[] = {
351 "PREROUTING", "INPUT", "FORWARD", 352 "PREROUTING", "INPUT", "FORWARD",
352 "OUTPUT", "POSTROUTING", "BROUTING", 353 "OUTPUT", "POSTROUTING", "BROUTING",
353 }; 354 };
354 unsigned int i; 355 static const char *const arp_names[] = {
356 "INPUT", "FORWARD", "OUTPUT",
357 };
358 const char *const *names;
359 unsigned int i, max;
355 char *p = buf; 360 char *p = buf;
356 bool np = false; 361 bool np = false;
357 int res; 362 int res;
358 363
364 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
365 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
366 ARRAY_SIZE(inetbr_names);
359 *p = '\0'; 367 *p = '\0';
360 for (i = 0; i < ARRAY_SIZE(names); ++i) { 368 for (i = 0; i < max; ++i) {
361 if (!(mask & (1 << i))) 369 if (!(mask & (1 << i)))
362 continue; 370 continue;
363 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); 371 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
@@ -402,8 +410,10 @@ int xt_check_match(struct xt_mtchk_param *par,
402 pr_err("%s_tables: %s match: used from hooks %s, but only " 410 pr_err("%s_tables: %s match: used from hooks %s, but only "
403 "valid from %s\n", 411 "valid from %s\n",
404 xt_prefix[par->family], par->match->name, 412 xt_prefix[par->family], par->match->name,
405 textify_hooks(used, sizeof(used), par->hook_mask), 413 textify_hooks(used, sizeof(used), par->hook_mask,
406 textify_hooks(allow, sizeof(allow), par->match->hooks)); 414 par->family),
415 textify_hooks(allow, sizeof(allow), par->match->hooks,
416 par->family));
407 return -EINVAL; 417 return -EINVAL;
408 } 418 }
409 if (par->match->proto && (par->match->proto != proto || inv_proto)) { 419 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
@@ -575,8 +585,10 @@ int xt_check_target(struct xt_tgchk_param *par,
575 pr_err("%s_tables: %s target: used from hooks %s, but only " 585 pr_err("%s_tables: %s target: used from hooks %s, but only "
576 "usable from %s\n", 586 "usable from %s\n",
577 xt_prefix[par->family], par->target->name, 587 xt_prefix[par->family], par->target->name,
578 textify_hooks(used, sizeof(used), par->hook_mask), 588 textify_hooks(used, sizeof(used), par->hook_mask,
579 textify_hooks(allow, sizeof(allow), par->target->hooks)); 589 par->family),
590 textify_hooks(allow, sizeof(allow), par->target->hooks,
591 par->family));
580 return -EINVAL; 592 return -EINVAL;
581 } 593 }
582 if (par->target->proto && (par->target->proto != proto || inv_proto)) { 594 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
@@ -1311,12 +1323,12 @@ int xt_proto_init(struct net *net, u_int8_t af)
1311out_remove_matches: 1323out_remove_matches:
1312 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1324 strlcpy(buf, xt_prefix[af], sizeof(buf));
1313 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1325 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1314 proc_net_remove(net, buf); 1326 remove_proc_entry(buf, net->proc_net);
1315 1327
1316out_remove_tables: 1328out_remove_tables:
1317 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1329 strlcpy(buf, xt_prefix[af], sizeof(buf));
1318 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1330 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1319 proc_net_remove(net, buf); 1331 remove_proc_entry(buf, net->proc_net);
1320out: 1332out:
1321 return -1; 1333 return -1;
1322#endif 1334#endif
@@ -1330,15 +1342,15 @@ void xt_proto_fini(struct net *net, u_int8_t af)
1330 1342
1331 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1343 strlcpy(buf, xt_prefix[af], sizeof(buf));
1332 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1344 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1333 proc_net_remove(net, buf); 1345 remove_proc_entry(buf, net->proc_net);
1334 1346
1335 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1347 strlcpy(buf, xt_prefix[af], sizeof(buf));
1336 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 1348 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1337 proc_net_remove(net, buf); 1349 remove_proc_entry(buf, net->proc_net);
1338 1350
1339 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1351 strlcpy(buf, xt_prefix[af], sizeof(buf));
1340 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1352 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1341 proc_net_remove(net, buf); 1353 remove_proc_entry(buf, net->proc_net);
1342#endif /*CONFIG_PROC_FS*/ 1354#endif /*CONFIG_PROC_FS*/
1343} 1355}
1344EXPORT_SYMBOL_GPL(xt_proto_fini); 1356EXPORT_SYMBOL_GPL(xt_proto_fini);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index ae7f5daeee43..a60261cb0e80 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -20,12 +20,8 @@
20#include <net/netfilter/nf_conntrack_timeout.h> 20#include <net/netfilter/nf_conntrack_timeout.h>
21#include <net/netfilter/nf_conntrack_zones.h> 21#include <net/netfilter/nf_conntrack_zones.h>
22 22
23static unsigned int xt_ct_target_v0(struct sk_buff *skb, 23static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
24 const struct xt_action_param *par)
25{ 24{
26 const struct xt_ct_target_info *info = par->targinfo;
27 struct nf_conn *ct = info->ct;
28
29 /* Previously seen (loopback)? Ignore. */ 25 /* Previously seen (loopback)? Ignore. */
30 if (skb->nfct != NULL) 26 if (skb->nfct != NULL)
31 return XT_CONTINUE; 27 return XT_CONTINUE;
@@ -37,21 +33,22 @@ static unsigned int xt_ct_target_v0(struct sk_buff *skb,
37 return XT_CONTINUE; 33 return XT_CONTINUE;
38} 34}
39 35
40static unsigned int xt_ct_target_v1(struct sk_buff *skb, 36static unsigned int xt_ct_target_v0(struct sk_buff *skb,
41 const struct xt_action_param *par) 37 const struct xt_action_param *par)
42{ 38{
43 const struct xt_ct_target_info_v1 *info = par->targinfo; 39 const struct xt_ct_target_info *info = par->targinfo;
44 struct nf_conn *ct = info->ct; 40 struct nf_conn *ct = info->ct;
45 41
46 /* Previously seen (loopback)? Ignore. */ 42 return xt_ct_target(skb, ct);
47 if (skb->nfct != NULL) 43}
48 return XT_CONTINUE;
49 44
50 atomic_inc(&ct->ct_general.use); 45static unsigned int xt_ct_target_v1(struct sk_buff *skb,
51 skb->nfct = &ct->ct_general; 46 const struct xt_action_param *par)
52 skb->nfctinfo = IP_CT_NEW; 47{
48 const struct xt_ct_target_info_v1 *info = par->targinfo;
49 struct nf_conn *ct = info->ct;
53 50
54 return XT_CONTINUE; 51 return xt_ct_target(skb, ct);
55} 52}
56 53
57static u8 xt_ct_find_proto(const struct xt_tgchk_param *par) 54static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
@@ -104,63 +101,6 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
104 return 0; 101 return 0;
105} 102}
106 103
107static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
108{
109 struct xt_ct_target_info *info = par->targinfo;
110 struct nf_conntrack_tuple t;
111 struct nf_conn *ct;
112 int ret;
113
114 if (info->flags & ~XT_CT_NOTRACK)
115 return -EINVAL;
116
117 if (info->flags & XT_CT_NOTRACK) {
118 ct = nf_ct_untracked_get();
119 atomic_inc(&ct->ct_general.use);
120 goto out;
121 }
122
123#ifndef CONFIG_NF_CONNTRACK_ZONES
124 if (info->zone)
125 goto err1;
126#endif
127
128 ret = nf_ct_l3proto_try_module_get(par->family);
129 if (ret < 0)
130 goto err1;
131
132 memset(&t, 0, sizeof(t));
133 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
134 ret = PTR_ERR(ct);
135 if (IS_ERR(ct))
136 goto err2;
137
138 ret = 0;
139 if ((info->ct_events || info->exp_events) &&
140 !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
141 GFP_KERNEL))
142 goto err3;
143
144 if (info->helper[0]) {
145 ret = xt_ct_set_helper(ct, info->helper, par);
146 if (ret < 0)
147 goto err3;
148 }
149
150 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
151 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
152out:
153 info->ct = ct;
154 return 0;
155
156err3:
157 nf_conntrack_free(ct);
158err2:
159 nf_ct_l3proto_module_put(par->family);
160err1:
161 return ret;
162}
163
164#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 104#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
165static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout) 105static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
166{ 106{
@@ -238,15 +178,12 @@ out:
238#endif 178#endif
239} 179}
240 180
241static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) 181static int xt_ct_tg_check(const struct xt_tgchk_param *par,
182 struct xt_ct_target_info_v1 *info)
242{ 183{
243 struct xt_ct_target_info_v1 *info = par->targinfo;
244 struct nf_conntrack_tuple t; 184 struct nf_conntrack_tuple t;
245 struct nf_conn *ct; 185 struct nf_conn *ct;
246 int ret; 186 int ret = -EOPNOTSUPP;
247
248 if (info->flags & ~XT_CT_NOTRACK)
249 return -EINVAL;
250 187
251 if (info->flags & XT_CT_NOTRACK) { 188 if (info->flags & XT_CT_NOTRACK) {
252 ct = nf_ct_untracked_get(); 189 ct = nf_ct_untracked_get();
@@ -289,6 +226,10 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
289 226
290 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 227 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
291 __set_bit(IPS_CONFIRMED_BIT, &ct->status); 228 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
229
230 /* Overload tuple linked list to put us in template list. */
231 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
232 &par->net->ct.tmpl);
292out: 233out:
293 info->ct = ct; 234 info->ct = ct;
294 return 0; 235 return 0;
@@ -301,20 +242,49 @@ err1:
301 return ret; 242 return ret;
302} 243}
303 244
304static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par) 245static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
305{ 246{
306 struct xt_ct_target_info *info = par->targinfo; 247 struct xt_ct_target_info *info = par->targinfo;
307 struct nf_conn *ct = info->ct; 248 struct xt_ct_target_info_v1 info_v1 = {
308 struct nf_conn_help *help; 249 .flags = info->flags,
250 .zone = info->zone,
251 .ct_events = info->ct_events,
252 .exp_events = info->exp_events,
253 };
254 int ret;
309 255
310 if (!nf_ct_is_untracked(ct)) { 256 if (info->flags & ~XT_CT_NOTRACK)
311 help = nfct_help(ct); 257 return -EINVAL;
312 if (help)
313 module_put(help->helper->me);
314 258
315 nf_ct_l3proto_module_put(par->family); 259 memcpy(info_v1.helper, info->helper, sizeof(info->helper));
316 } 260
317 nf_ct_put(info->ct); 261 ret = xt_ct_tg_check(par, &info_v1);
262 if (ret < 0)
263 return ret;
264
265 info->ct = info_v1.ct;
266
267 return ret;
268}
269
270static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
271{
272 struct xt_ct_target_info_v1 *info = par->targinfo;
273
274 if (info->flags & ~XT_CT_NOTRACK)
275 return -EINVAL;
276
277 return xt_ct_tg_check(par, par->targinfo);
278}
279
280static int xt_ct_tg_check_v2(const struct xt_tgchk_param *par)
281{
282 struct xt_ct_target_info_v1 *info = par->targinfo;
283
284 if (info->flags & ~XT_CT_MASK)
285 return -EINVAL;
286
287 return xt_ct_tg_check(par, par->targinfo);
318} 288}
319 289
320static void xt_ct_destroy_timeout(struct nf_conn *ct) 290static void xt_ct_destroy_timeout(struct nf_conn *ct)
@@ -335,9 +305,9 @@ static void xt_ct_destroy_timeout(struct nf_conn *ct)
335#endif 305#endif
336} 306}
337 307
338static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par) 308static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
309 struct xt_ct_target_info_v1 *info)
339{ 310{
340 struct xt_ct_target_info_v1 *info = par->targinfo;
341 struct nf_conn *ct = info->ct; 311 struct nf_conn *ct = info->ct;
342 struct nf_conn_help *help; 312 struct nf_conn_help *help;
343 313
@@ -353,6 +323,26 @@ static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
353 nf_ct_put(info->ct); 323 nf_ct_put(info->ct);
354} 324}
355 325
326static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
327{
328 struct xt_ct_target_info *info = par->targinfo;
329 struct xt_ct_target_info_v1 info_v1 = {
330 .flags = info->flags,
331 .zone = info->zone,
332 .ct_events = info->ct_events,
333 .exp_events = info->exp_events,
334 .ct = info->ct,
335 };
336 memcpy(info_v1.helper, info->helper, sizeof(info->helper));
337
338 xt_ct_tg_destroy(par, &info_v1);
339}
340
341static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
342{
343 xt_ct_tg_destroy(par, par->targinfo);
344}
345
356static struct xt_target xt_ct_tg_reg[] __read_mostly = { 346static struct xt_target xt_ct_tg_reg[] __read_mostly = {
357 { 347 {
358 .name = "CT", 348 .name = "CT",
@@ -375,16 +365,73 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
375 .table = "raw", 365 .table = "raw",
376 .me = THIS_MODULE, 366 .me = THIS_MODULE,
377 }, 367 },
368 {
369 .name = "CT",
370 .family = NFPROTO_UNSPEC,
371 .revision = 2,
372 .targetsize = sizeof(struct xt_ct_target_info_v1),
373 .checkentry = xt_ct_tg_check_v2,
374 .destroy = xt_ct_tg_destroy_v1,
375 .target = xt_ct_target_v1,
376 .table = "raw",
377 .me = THIS_MODULE,
378 },
379};
380
381static unsigned int
382notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
383{
384 /* Previously seen (loopback)? Ignore. */
385 if (skb->nfct != NULL)
386 return XT_CONTINUE;
387
388 skb->nfct = &nf_ct_untracked_get()->ct_general;
389 skb->nfctinfo = IP_CT_NEW;
390 nf_conntrack_get(skb->nfct);
391
392 return XT_CONTINUE;
393}
394
395static int notrack_chk(const struct xt_tgchk_param *par)
396{
397 if (!par->net->xt.notrack_deprecated_warning) {
398 pr_info("netfilter: NOTRACK target is deprecated, "
399 "use CT instead or upgrade iptables\n");
400 par->net->xt.notrack_deprecated_warning = true;
401 }
402 return 0;
403}
404
405static struct xt_target notrack_tg_reg __read_mostly = {
406 .name = "NOTRACK",
407 .revision = 0,
408 .family = NFPROTO_UNSPEC,
409 .checkentry = notrack_chk,
410 .target = notrack_tg,
411 .table = "raw",
412 .me = THIS_MODULE,
378}; 413};
379 414
380static int __init xt_ct_tg_init(void) 415static int __init xt_ct_tg_init(void)
381{ 416{
382 return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg)); 417 int ret;
418
419 ret = xt_register_target(&notrack_tg_reg);
420 if (ret < 0)
421 return ret;
422
423 ret = xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
424 if (ret < 0) {
425 xt_unregister_target(&notrack_tg_reg);
426 return ret;
427 }
428 return 0;
383} 429}
384 430
385static void __exit xt_ct_tg_exit(void) 431static void __exit xt_ct_tg_exit(void)
386{ 432{
387 xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg)); 433 xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
434 xt_unregister_target(&notrack_tg_reg);
388} 435}
389 436
390module_init(xt_ct_tg_init); 437module_init(xt_ct_tg_init);
@@ -394,3 +441,5 @@ MODULE_LICENSE("GPL");
394MODULE_DESCRIPTION("Xtables: connection tracking target"); 441MODULE_DESCRIPTION("Xtables: connection tracking target");
395MODULE_ALIAS("ipt_CT"); 442MODULE_ALIAS("ipt_CT");
396MODULE_ALIAS("ip6t_CT"); 443MODULE_ALIAS("ip6t_CT");
444MODULE_ALIAS("ipt_NOTRACK");
445MODULE_ALIAS("ip6t_NOTRACK");
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
new file mode 100644
index 000000000000..12d4da8e6c77
--- /dev/null
+++ b/net/netfilter/xt_bpf.c
@@ -0,0 +1,73 @@
1/* Xtables module to match packets using a BPF filter.
2 * Copyright 2013 Google Inc.
3 * Written by Willem de Bruijn <willemb@google.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/skbuff.h>
12#include <linux/filter.h>
13
14#include <linux/netfilter/xt_bpf.h>
15#include <linux/netfilter/x_tables.h>
16
17MODULE_AUTHOR("Willem de Bruijn <willemb@google.com>");
18MODULE_DESCRIPTION("Xtables: BPF filter match");
19MODULE_LICENSE("GPL");
20MODULE_ALIAS("ipt_bpf");
21MODULE_ALIAS("ip6t_bpf");
22
23static int bpf_mt_check(const struct xt_mtchk_param *par)
24{
25 struct xt_bpf_info *info = par->matchinfo;
26 struct sock_fprog program;
27
28 program.len = info->bpf_program_num_elem;
29 program.filter = (struct sock_filter __user *) info->bpf_program;
30 if (sk_unattached_filter_create(&info->filter, &program)) {
31 pr_info("bpf: check failed: parse error\n");
32 return -EINVAL;
33 }
34
35 return 0;
36}
37
38static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
39{
40 const struct xt_bpf_info *info = par->matchinfo;
41
42 return SK_RUN_FILTER(info->filter, skb);
43}
44
45static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
46{
47 const struct xt_bpf_info *info = par->matchinfo;
48 sk_unattached_filter_destroy(info->filter);
49}
50
51static struct xt_match bpf_mt_reg __read_mostly = {
52 .name = "bpf",
53 .revision = 0,
54 .family = NFPROTO_UNSPEC,
55 .checkentry = bpf_mt_check,
56 .match = bpf_mt,
57 .destroy = bpf_mt_destroy,
58 .matchsize = sizeof(struct xt_bpf_info),
59 .me = THIS_MODULE,
60};
61
62static int __init bpf_mt_init(void)
63{
64 return xt_register_match(&bpf_mt_reg);
65}
66
67static void __exit bpf_mt_exit(void)
68{
69 xt_unregister_match(&bpf_mt_reg);
70}
71
72module_init(bpf_mt_init);
73module_exit(bpf_mt_exit);
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c
new file mode 100644
index 000000000000..9f8719df2001
--- /dev/null
+++ b/net/netfilter/xt_connlabel.c
@@ -0,0 +1,99 @@
1/*
2 * (C) 2013 Astaro GmbH & Co KG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/skbuff.h>
11#include <net/netfilter/nf_conntrack.h>
12#include <net/netfilter/nf_conntrack_labels.h>
13#include <linux/netfilter/x_tables.h>
14
15MODULE_LICENSE("GPL");
16MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
17MODULE_DESCRIPTION("Xtables: add/match connection trackling labels");
18MODULE_ALIAS("ipt_connlabel");
19MODULE_ALIAS("ip6t_connlabel");
20
21static bool
22connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par)
23{
24 const struct xt_connlabel_mtinfo *info = par->matchinfo;
25 enum ip_conntrack_info ctinfo;
26 struct nf_conn *ct;
27 bool invert = info->options & XT_CONNLABEL_OP_INVERT;
28
29 ct = nf_ct_get(skb, &ctinfo);
30 if (ct == NULL || nf_ct_is_untracked(ct))
31 return invert;
32
33 if (info->options & XT_CONNLABEL_OP_SET)
34 return (nf_connlabel_set(ct, info->bit) == 0) ^ invert;
35
36 return nf_connlabel_match(ct, info->bit) ^ invert;
37}
38
39static int connlabel_mt_check(const struct xt_mtchk_param *par)
40{
41 const int options = XT_CONNLABEL_OP_INVERT |
42 XT_CONNLABEL_OP_SET;
43 struct xt_connlabel_mtinfo *info = par->matchinfo;
44 int ret;
45 size_t words;
46
47 if (info->bit > XT_CONNLABEL_MAXBIT)
48 return -ERANGE;
49
50 if (info->options & ~options) {
51 pr_err("Unknown options in mask %x\n", info->options);
52 return -EINVAL;
53 }
54
55 ret = nf_ct_l3proto_try_module_get(par->family);
56 if (ret < 0) {
57 pr_info("cannot load conntrack support for proto=%u\n",
58 par->family);
59 return ret;
60 }
61
62 par->net->ct.labels_used++;
63 words = BITS_TO_LONGS(info->bit+1);
64 if (words > par->net->ct.label_words)
65 par->net->ct.label_words = words;
66
67 return ret;
68}
69
70static void connlabel_mt_destroy(const struct xt_mtdtor_param *par)
71{
72 par->net->ct.labels_used--;
73 if (par->net->ct.labels_used == 0)
74 par->net->ct.label_words = 0;
75 nf_ct_l3proto_module_put(par->family);
76}
77
78static struct xt_match connlabels_mt_reg __read_mostly = {
79 .name = "connlabel",
80 .family = NFPROTO_UNSPEC,
81 .checkentry = connlabel_mt_check,
82 .match = connlabel_mt,
83 .matchsize = sizeof(struct xt_connlabel_mtinfo),
84 .destroy = connlabel_mt_destroy,
85 .me = THIS_MODULE,
86};
87
88static int __init connlabel_mt_init(void)
89{
90 return xt_register_match(&connlabels_mt_reg);
91}
92
93static void __exit connlabel_mt_exit(void)
94{
95 xt_unregister_match(&connlabels_mt_reg);
96}
97
98module_init(connlabel_mt_init);
99module_exit(connlabel_mt_exit);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 26a668a84aa2..98218c896d2e 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -157,11 +157,22 @@ dsthash_find(const struct xt_hashlimit_htable *ht,
157/* allocate dsthash_ent, initialize dst, put in htable and lock it */ 157/* allocate dsthash_ent, initialize dst, put in htable and lock it */
158static struct dsthash_ent * 158static struct dsthash_ent *
159dsthash_alloc_init(struct xt_hashlimit_htable *ht, 159dsthash_alloc_init(struct xt_hashlimit_htable *ht,
160 const struct dsthash_dst *dst) 160 const struct dsthash_dst *dst, bool *race)
161{ 161{
162 struct dsthash_ent *ent; 162 struct dsthash_ent *ent;
163 163
164 spin_lock(&ht->lock); 164 spin_lock(&ht->lock);
165
166 /* Two or more packets may race to create the same entry in the
167 * hashtable, double check if this packet lost race.
168 */
169 ent = dsthash_find(ht, dst);
170 if (ent != NULL) {
171 spin_unlock(&ht->lock);
172 *race = true;
173 return ent;
174 }
175
165 /* initialize hash with random val at the time we allocate 176 /* initialize hash with random val at the time we allocate
166 * the first hashtable entry */ 177 * the first hashtable entry */
167 if (unlikely(!ht->rnd_initialized)) { 178 if (unlikely(!ht->rnd_initialized)) {
@@ -318,7 +329,10 @@ static void htable_destroy(struct xt_hashlimit_htable *hinfo)
318 parent = hashlimit_net->ipt_hashlimit; 329 parent = hashlimit_net->ipt_hashlimit;
319 else 330 else
320 parent = hashlimit_net->ip6t_hashlimit; 331 parent = hashlimit_net->ip6t_hashlimit;
321 remove_proc_entry(hinfo->pde->name, parent); 332
333 if(parent != NULL)
334 remove_proc_entry(hinfo->pde->name, parent);
335
322 htable_selective_cleanup(hinfo, select_all); 336 htable_selective_cleanup(hinfo, select_all);
323 vfree(hinfo); 337 vfree(hinfo);
324} 338}
@@ -585,6 +599,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
585 unsigned long now = jiffies; 599 unsigned long now = jiffies;
586 struct dsthash_ent *dh; 600 struct dsthash_ent *dh;
587 struct dsthash_dst dst; 601 struct dsthash_dst dst;
602 bool race = false;
588 u32 cost; 603 u32 cost;
589 604
590 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) 605 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
@@ -593,13 +608,18 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
593 rcu_read_lock_bh(); 608 rcu_read_lock_bh();
594 dh = dsthash_find(hinfo, &dst); 609 dh = dsthash_find(hinfo, &dst);
595 if (dh == NULL) { 610 if (dh == NULL) {
596 dh = dsthash_alloc_init(hinfo, &dst); 611 dh = dsthash_alloc_init(hinfo, &dst, &race);
597 if (dh == NULL) { 612 if (dh == NULL) {
598 rcu_read_unlock_bh(); 613 rcu_read_unlock_bh();
599 goto hotdrop; 614 goto hotdrop;
615 } else if (race) {
616 /* Already got an entry, update expiration timeout */
617 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
618 rateinfo_recalc(dh, now, hinfo->cfg.mode);
619 } else {
620 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
621 rateinfo_init(dh, hinfo);
600 } 622 }
601 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
602 rateinfo_init(dh, hinfo);
603 } else { 623 } else {
604 /* update expiration timeout */ 624 /* update expiration timeout */
605 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); 625 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
@@ -847,7 +867,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
847#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 867#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
848 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); 868 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
849 if (!hashlimit_net->ip6t_hashlimit) { 869 if (!hashlimit_net->ip6t_hashlimit) {
850 proc_net_remove(net, "ipt_hashlimit"); 870 remove_proc_entry("ipt_hashlimit", net->proc_net);
851 return -ENOMEM; 871 return -ENOMEM;
852 } 872 }
853#endif 873#endif
@@ -856,9 +876,30 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
856 876
857static void __net_exit hashlimit_proc_net_exit(struct net *net) 877static void __net_exit hashlimit_proc_net_exit(struct net *net)
858{ 878{
859 proc_net_remove(net, "ipt_hashlimit"); 879 struct xt_hashlimit_htable *hinfo;
880 struct hlist_node *pos;
881 struct proc_dir_entry *pde;
882 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
883
884 /* recent_net_exit() is called before recent_mt_destroy(). Make sure
885 * that the parent xt_recent proc entry is is empty before trying to
886 * remove it.
887 */
888 mutex_lock(&hashlimit_mutex);
889 pde = hashlimit_net->ipt_hashlimit;
890 if (pde == NULL)
891 pde = hashlimit_net->ip6t_hashlimit;
892
893 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node)
894 remove_proc_entry(hinfo->pde->name, pde);
895
896 hashlimit_net->ipt_hashlimit = NULL;
897 hashlimit_net->ip6t_hashlimit = NULL;
898 mutex_unlock(&hashlimit_mutex);
899
900 remove_proc_entry("ipt_hashlimit", net->proc_net);
860#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 901#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
861 proc_net_remove(net, "ip6t_hashlimit"); 902 remove_proc_entry("ip6t_hashlimit", net->proc_net);
862#endif 903#endif
863} 904}
864 905
@@ -872,9 +913,6 @@ static int __net_init hashlimit_net_init(struct net *net)
872 913
873static void __net_exit hashlimit_net_exit(struct net *net) 914static void __net_exit hashlimit_net_exit(struct net *net)
874{ 915{
875 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
876
877 BUG_ON(!hlist_empty(&hashlimit_net->htables));
878 hashlimit_proc_net_exit(net); 916 hashlimit_proc_net_exit(net);
879} 917}
880 918
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 4635c9b00459..31bf233dae97 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -29,6 +29,7 @@
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/inet.h> 30#include <linux/inet.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/vmalloc.h>
32#include <net/net_namespace.h> 33#include <net/net_namespace.h>
33#include <net/netns/generic.h> 34#include <net/netns/generic.h>
34 35
@@ -310,6 +311,14 @@ out:
310 return ret; 311 return ret;
311} 312}
312 313
314static void recent_table_free(void *addr)
315{
316 if (is_vmalloc_addr(addr))
317 vfree(addr);
318 else
319 kfree(addr);
320}
321
313static int recent_mt_check(const struct xt_mtchk_param *par, 322static int recent_mt_check(const struct xt_mtchk_param *par,
314 const struct xt_recent_mtinfo_v1 *info) 323 const struct xt_recent_mtinfo_v1 *info)
315{ 324{
@@ -322,6 +331,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
322#endif 331#endif
323 unsigned int i; 332 unsigned int i;
324 int ret = -EINVAL; 333 int ret = -EINVAL;
334 size_t sz;
325 335
326 if (unlikely(!hash_rnd_inited)) { 336 if (unlikely(!hash_rnd_inited)) {
327 get_random_bytes(&hash_rnd, sizeof(hash_rnd)); 337 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
@@ -360,8 +370,11 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
360 goto out; 370 goto out;
361 } 371 }
362 372
363 t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size, 373 sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size;
364 GFP_KERNEL); 374 if (sz <= PAGE_SIZE)
375 t = kzalloc(sz, GFP_KERNEL);
376 else
377 t = vzalloc(sz);
365 if (t == NULL) { 378 if (t == NULL) {
366 ret = -ENOMEM; 379 ret = -ENOMEM;
367 goto out; 380 goto out;
@@ -377,14 +390,14 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
377 uid = make_kuid(&init_user_ns, ip_list_uid); 390 uid = make_kuid(&init_user_ns, ip_list_uid);
378 gid = make_kgid(&init_user_ns, ip_list_gid); 391 gid = make_kgid(&init_user_ns, ip_list_gid);
379 if (!uid_valid(uid) || !gid_valid(gid)) { 392 if (!uid_valid(uid) || !gid_valid(gid)) {
380 kfree(t); 393 recent_table_free(t);
381 ret = -EINVAL; 394 ret = -EINVAL;
382 goto out; 395 goto out;
383 } 396 }
384 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, 397 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
385 &recent_mt_fops, t); 398 &recent_mt_fops, t);
386 if (pde == NULL) { 399 if (pde == NULL) {
387 kfree(t); 400 recent_table_free(t);
388 ret = -ENOMEM; 401 ret = -ENOMEM;
389 goto out; 402 goto out;
390 } 403 }
@@ -431,10 +444,11 @@ static void recent_mt_destroy(const struct xt_mtdtor_param *par)
431 list_del(&t->list); 444 list_del(&t->list);
432 spin_unlock_bh(&recent_lock); 445 spin_unlock_bh(&recent_lock);
433#ifdef CONFIG_PROC_FS 446#ifdef CONFIG_PROC_FS
434 remove_proc_entry(t->name, recent_net->xt_recent); 447 if (recent_net->xt_recent != NULL)
448 remove_proc_entry(t->name, recent_net->xt_recent);
435#endif 449#endif
436 recent_table_flush(t); 450 recent_table_flush(t);
437 kfree(t); 451 recent_table_free(t);
438 } 452 }
439 mutex_unlock(&recent_mutex); 453 mutex_unlock(&recent_mutex);
440} 454}
@@ -615,7 +629,21 @@ static int __net_init recent_proc_net_init(struct net *net)
615 629
616static void __net_exit recent_proc_net_exit(struct net *net) 630static void __net_exit recent_proc_net_exit(struct net *net)
617{ 631{
618 proc_net_remove(net, "xt_recent"); 632 struct recent_net *recent_net = recent_pernet(net);
633 struct recent_table *t;
634
635 /* recent_net_exit() is called before recent_mt_destroy(). Make sure
636 * that the parent xt_recent proc entry is is empty before trying to
637 * remove it.
638 */
639 spin_lock_bh(&recent_lock);
640 list_for_each_entry(t, &recent_net->tables, list)
641 remove_proc_entry(t->name, recent_net->xt_recent);
642
643 recent_net->xt_recent = NULL;
644 spin_unlock_bh(&recent_lock);
645
646 remove_proc_entry("xt_recent", net->proc_net);
619} 647}
620#else 648#else
621static inline int recent_proc_net_init(struct net *net) 649static inline int recent_proc_net_init(struct net *net)
@@ -638,9 +666,6 @@ static int __net_init recent_net_init(struct net *net)
638 666
639static void __net_exit recent_net_exit(struct net *net) 667static void __net_exit recent_net_exit(struct net *net)
640{ 668{
641 struct recent_net *recent_net = recent_pernet(net);
642
643 BUG_ON(!list_empty(&recent_net->tables));
644 recent_proc_net_exit(net); 669 recent_proc_net_exit(net);
645} 670}
646 671
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c8a1eb6eca2d..3d55e0c713e2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -669,6 +669,9 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
669 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 669 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
670 int err; 670 int err;
671 671
672 if (addr_len < sizeof(struct sockaddr_nl))
673 return -EINVAL;
674
672 if (nladdr->nl_family != AF_NETLINK) 675 if (nladdr->nl_family != AF_NETLINK)
673 return -EINVAL; 676 return -EINVAL;
674 677
@@ -2059,7 +2062,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
2059 struct sock *s = v; 2062 struct sock *s = v;
2060 struct netlink_sock *nlk = nlk_sk(s); 2063 struct netlink_sock *nlk = nlk_sk(s);
2061 2064
2062 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", 2065 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2063 s, 2066 s,
2064 s->sk_protocol, 2067 s->sk_protocol,
2065 nlk->portid, 2068 nlk->portid,
@@ -2142,7 +2145,7 @@ static const struct net_proto_family netlink_family_ops = {
2142static int __net_init netlink_net_init(struct net *net) 2145static int __net_init netlink_net_init(struct net *net)
2143{ 2146{
2144#ifdef CONFIG_PROC_FS 2147#ifdef CONFIG_PROC_FS
2145 if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops)) 2148 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
2146 return -ENOMEM; 2149 return -ENOMEM;
2147#endif 2150#endif
2148 return 0; 2151 return 0;
@@ -2151,7 +2154,7 @@ static int __net_init netlink_net_init(struct net *net)
2151static void __net_exit netlink_net_exit(struct net *net) 2154static void __net_exit netlink_net_exit(struct net *net)
2152{ 2155{
2153#ifdef CONFIG_PROC_FS 2156#ifdef CONFIG_PROC_FS
2154 proc_net_remove(net, "netlink"); 2157 remove_proc_entry("netlink", net->proc_net);
2155#endif 2158#endif
2156} 2159}
2157 2160
@@ -2182,7 +2185,6 @@ static struct pernet_operations __net_initdata netlink_net_ops = {
2182 2185
2183static int __init netlink_proto_init(void) 2186static int __init netlink_proto_init(void)
2184{ 2187{
2185 struct sk_buff *dummy_skb;
2186 int i; 2188 int i;
2187 unsigned long limit; 2189 unsigned long limit;
2188 unsigned int order; 2190 unsigned int order;
@@ -2191,7 +2193,7 @@ static int __init netlink_proto_init(void)
2191 if (err != 0) 2193 if (err != 0)
2192 goto out; 2194 goto out;
2193 2195
2194 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)); 2196 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2195 2197
2196 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 2198 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2197 if (!nl_table) 2199 if (!nl_table)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 7261eb81974f..297b07a029de 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1452,9 +1452,9 @@ static int __init nr_proto_init(void)
1452 1452
1453 nr_loopback_init(); 1453 nr_loopback_init();
1454 1454
1455 proc_net_fops_create(&init_net, "nr", S_IRUGO, &nr_info_fops); 1455 proc_create("nr", S_IRUGO, init_net.proc_net, &nr_info_fops);
1456 proc_net_fops_create(&init_net, "nr_neigh", S_IRUGO, &nr_neigh_fops); 1456 proc_create("nr_neigh", S_IRUGO, init_net.proc_net, &nr_neigh_fops);
1457 proc_net_fops_create(&init_net, "nr_nodes", S_IRUGO, &nr_nodes_fops); 1457 proc_create("nr_nodes", S_IRUGO, init_net.proc_net, &nr_nodes_fops);
1458out: 1458out:
1459 return rc; 1459 return rc;
1460fail: 1460fail:
@@ -1482,9 +1482,9 @@ static void __exit nr_exit(void)
1482{ 1482{
1483 int i; 1483 int i;
1484 1484
1485 proc_net_remove(&init_net, "nr"); 1485 remove_proc_entry("nr", init_net.proc_net);
1486 proc_net_remove(&init_net, "nr_neigh"); 1486 remove_proc_entry("nr_neigh", init_net.proc_net);
1487 proc_net_remove(&init_net, "nr_nodes"); 1487 remove_proc_entry("nr_nodes", init_net.proc_net);
1488 nr_loopback_clear(); 1488 nr_loopback_clear();
1489 1489
1490 nr_rt_free(); 1490 nr_rt_free();
diff --git a/net/nfc/core.c b/net/nfc/core.c
index aa64ea441676..6ceee8e181ca 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -338,7 +338,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
338 dev->active_target = target; 338 dev->active_target = target;
339 dev->rf_mode = NFC_RF_INITIATOR; 339 dev->rf_mode = NFC_RF_INITIATOR;
340 340
341 if (dev->ops->check_presence) 341 if (dev->ops->check_presence && !dev->shutting_down)
342 mod_timer(&dev->check_pres_timer, jiffies + 342 mod_timer(&dev->check_pres_timer, jiffies +
343 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); 343 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
344 } 344 }
@@ -429,7 +429,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
429 rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb, 429 rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb,
430 cb_context); 430 cb_context);
431 431
432 if (!rc && dev->ops->check_presence) 432 if (!rc && dev->ops->check_presence && !dev->shutting_down)
433 mod_timer(&dev->check_pres_timer, jiffies + 433 mod_timer(&dev->check_pres_timer, jiffies +
434 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); 434 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
435 } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) { 435 } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) {
@@ -684,11 +684,6 @@ static void nfc_release(struct device *d)
684 684
685 pr_debug("dev_name=%s\n", dev_name(&dev->dev)); 685 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
686 686
687 if (dev->ops->check_presence) {
688 del_timer_sync(&dev->check_pres_timer);
689 cancel_work_sync(&dev->check_pres_work);
690 }
691
692 nfc_genl_data_exit(&dev->genl_data); 687 nfc_genl_data_exit(&dev->genl_data);
693 kfree(dev->targets); 688 kfree(dev->targets);
694 kfree(dev); 689 kfree(dev);
@@ -706,15 +701,16 @@ static void nfc_check_pres_work(struct work_struct *work)
706 rc = dev->ops->check_presence(dev, dev->active_target); 701 rc = dev->ops->check_presence(dev, dev->active_target);
707 if (rc == -EOPNOTSUPP) 702 if (rc == -EOPNOTSUPP)
708 goto exit; 703 goto exit;
709 if (!rc) { 704 if (rc) {
710 mod_timer(&dev->check_pres_timer, jiffies +
711 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
712 } else {
713 u32 active_target_idx = dev->active_target->idx; 705 u32 active_target_idx = dev->active_target->idx;
714 device_unlock(&dev->dev); 706 device_unlock(&dev->dev);
715 nfc_target_lost(dev, active_target_idx); 707 nfc_target_lost(dev, active_target_idx);
716 return; 708 return;
717 } 709 }
710
711 if (!dev->shutting_down)
712 mod_timer(&dev->check_pres_timer, jiffies +
713 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
718 } 714 }
719 715
720exit: 716exit:
@@ -734,10 +730,10 @@ struct class nfc_class = {
734}; 730};
735EXPORT_SYMBOL(nfc_class); 731EXPORT_SYMBOL(nfc_class);
736 732
737static int match_idx(struct device *d, void *data) 733static int match_idx(struct device *d, const void *data)
738{ 734{
739 struct nfc_dev *dev = to_nfc_dev(d); 735 struct nfc_dev *dev = to_nfc_dev(d);
740 unsigned int *idx = data; 736 const unsigned int *idx = data;
741 737
742 return dev->idx == *idx; 738 return dev->idx == *idx;
743} 739}
@@ -761,6 +757,7 @@ struct nfc_dev *nfc_get_device(unsigned int idx)
761 */ 757 */
762struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, 758struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
763 u32 supported_protocols, 759 u32 supported_protocols,
760 u32 supported_se,
764 int tx_headroom, int tx_tailroom) 761 int tx_headroom, int tx_tailroom)
765{ 762{
766 struct nfc_dev *dev; 763 struct nfc_dev *dev;
@@ -778,6 +775,8 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
778 775
779 dev->ops = ops; 776 dev->ops = ops;
780 dev->supported_protocols = supported_protocols; 777 dev->supported_protocols = supported_protocols;
778 dev->supported_se = supported_se;
779 dev->active_se = NFC_SE_NONE;
781 dev->tx_headroom = tx_headroom; 780 dev->tx_headroom = tx_headroom;
782 dev->tx_tailroom = tx_tailroom; 781 dev->tx_tailroom = tx_tailroom;
783 782
@@ -853,26 +852,27 @@ void nfc_unregister_device(struct nfc_dev *dev)
853 852
854 id = dev->idx; 853 id = dev->idx;
855 854
856 mutex_lock(&nfc_devlist_mutex); 855 if (dev->ops->check_presence) {
857 nfc_devlist_generation++; 856 device_lock(&dev->dev);
858 857 dev->shutting_down = true;
859 /* lock to avoid unregistering a device while an operation 858 device_unlock(&dev->dev);
860 is in progress */ 859 del_timer_sync(&dev->check_pres_timer);
861 device_lock(&dev->dev); 860 cancel_work_sync(&dev->check_pres_work);
862 device_del(&dev->dev); 861 }
863 device_unlock(&dev->dev);
864 862
865 mutex_unlock(&nfc_devlist_mutex); 863 rc = nfc_genl_device_removed(dev);
864 if (rc)
865 pr_debug("The userspace won't be notified that the device %s "
866 "was removed\n", dev_name(&dev->dev));
866 867
867 nfc_llcp_unregister_device(dev); 868 nfc_llcp_unregister_device(dev);
868 869
869 rc = nfc_genl_device_removed(dev); 870 mutex_lock(&nfc_devlist_mutex);
870 if (rc) 871 nfc_devlist_generation++;
871 pr_debug("The userspace won't be notified that the device %s was removed\n", 872 device_del(&dev->dev);
872 dev_name(&dev->dev)); 873 mutex_unlock(&nfc_devlist_mutex);
873 874
874 ida_simple_remove(&nfc_index_ida, id); 875 ida_simple_remove(&nfc_index_ida, id);
875
876} 876}
877EXPORT_SYMBOL(nfc_unregister_device); 877EXPORT_SYMBOL(nfc_unregister_device);
878 878
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 7d99410e6c1a..64f922be9281 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -280,14 +280,19 @@ static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
280static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) 280static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
281{ 281{
282 u8 param[2]; 282 u8 param[2];
283 size_t param_len = 2;
283 284
284 /* TODO: Find out what the identity reference data is 285 /* TODO: Find out what the identity reference data is
285 * and fill param with it. HCI spec 6.1.3.5 */ 286 * and fill param with it. HCI spec 6.1.3.5 */
286 287
287 pr_debug("\n"); 288 pr_debug("\n");
288 289
290 if (test_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &hdev->quirks))
291 param_len = 0;
292
289 return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, 293 return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
290 NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL); 294 NFC_HCI_ADM_CLEAR_ALL_PIPE, param, param_len,
295 NULL);
291} 296}
292 297
293int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) 298int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 7bea574d5934..91020b210d87 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -57,6 +57,8 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
57 int r = 0; 57 int r = 0;
58 58
59 mutex_lock(&hdev->msg_tx_mutex); 59 mutex_lock(&hdev->msg_tx_mutex);
60 if (hdev->shutting_down)
61 goto exit;
60 62
61 if (hdev->cmd_pending_msg) { 63 if (hdev->cmd_pending_msg) {
62 if (timer_pending(&hdev->cmd_timer) == 0) { 64 if (timer_pending(&hdev->cmd_timer) == 0) {
@@ -295,6 +297,12 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
295 goto exit; 297 goto exit;
296 } 298 }
297 299
300 if (hdev->ops->event_received) {
301 r = hdev->ops->event_received(hdev, gate, event, skb);
302 if (r <= 0)
303 goto exit_noskb;
304 }
305
298 switch (event) { 306 switch (event) {
299 case NFC_HCI_EVT_TARGET_DISCOVERED: 307 case NFC_HCI_EVT_TARGET_DISCOVERED:
300 if (skb->len < 1) { /* no status data? */ 308 if (skb->len < 1) { /* no status data? */
@@ -320,17 +328,15 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
320 r = nfc_hci_target_discovered(hdev, gate); 328 r = nfc_hci_target_discovered(hdev, gate);
321 break; 329 break;
322 default: 330 default:
323 if (hdev->ops->event_received) { 331 pr_info("Discarded unknown event %x to gate %x\n", event, gate);
324 hdev->ops->event_received(hdev, gate, event, skb); 332 r = -EINVAL;
325 return;
326 }
327
328 break; 333 break;
329 } 334 }
330 335
331exit: 336exit:
332 kfree_skb(skb); 337 kfree_skb(skb);
333 338
339exit_noskb:
334 if (r) { 340 if (r) {
335 /* TODO: There was an error dispatching the event, 341 /* TODO: There was an error dispatching the event,
336 * how to propagate up to nfc core? 342 * how to propagate up to nfc core?
@@ -669,8 +675,10 @@ static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
669 675
670 if (hdev->ops->tm_send) 676 if (hdev->ops->tm_send)
671 return hdev->ops->tm_send(hdev, skb); 677 return hdev->ops->tm_send(hdev, skb);
672 else 678
673 return -ENOTSUPP; 679 kfree_skb(skb);
680
681 return -ENOTSUPP;
674} 682}
675 683
676static int hci_check_presence(struct nfc_dev *nfc_dev, 684static int hci_check_presence(struct nfc_dev *nfc_dev,
@@ -787,7 +795,9 @@ static struct nfc_ops hci_nfc_ops = {
787 795
788struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, 796struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
789 struct nfc_hci_init_data *init_data, 797 struct nfc_hci_init_data *init_data,
798 unsigned long quirks,
790 u32 protocols, 799 u32 protocols,
800 u32 supported_se,
791 const char *llc_name, 801 const char *llc_name,
792 int tx_headroom, 802 int tx_headroom,
793 int tx_tailroom, 803 int tx_tailroom,
@@ -813,7 +823,7 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
813 return NULL; 823 return NULL;
814 } 824 }
815 825
816 hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, 826 hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, supported_se,
817 tx_headroom + HCI_CMDS_HEADROOM, 827 tx_headroom + HCI_CMDS_HEADROOM,
818 tx_tailroom); 828 tx_tailroom);
819 if (!hdev->ndev) { 829 if (!hdev->ndev) {
@@ -830,6 +840,8 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
830 840
831 memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); 841 memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
832 842
843 hdev->quirks = quirks;
844
833 return hdev; 845 return hdev;
834} 846}
835EXPORT_SYMBOL(nfc_hci_allocate_device); 847EXPORT_SYMBOL(nfc_hci_allocate_device);
@@ -868,6 +880,28 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
868{ 880{
869 struct hci_msg *msg, *n; 881 struct hci_msg *msg, *n;
870 882
883 mutex_lock(&hdev->msg_tx_mutex);
884
885 if (hdev->cmd_pending_msg) {
886 if (hdev->cmd_pending_msg->cb)
887 hdev->cmd_pending_msg->cb(
888 hdev->cmd_pending_msg->cb_context,
889 NULL, -ESHUTDOWN);
890 kfree(hdev->cmd_pending_msg);
891 hdev->cmd_pending_msg = NULL;
892 }
893
894 hdev->shutting_down = true;
895
896 mutex_unlock(&hdev->msg_tx_mutex);
897
898 del_timer_sync(&hdev->cmd_timer);
899 cancel_work_sync(&hdev->msg_tx_work);
900
901 cancel_work_sync(&hdev->msg_rx_work);
902
903 nfc_unregister_device(hdev->ndev);
904
871 skb_queue_purge(&hdev->rx_hcp_frags); 905 skb_queue_purge(&hdev->rx_hcp_frags);
872 skb_queue_purge(&hdev->msg_rx_queue); 906 skb_queue_purge(&hdev->msg_rx_queue);
873 907
@@ -876,13 +910,6 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
876 skb_queue_purge(&msg->msg_frags); 910 skb_queue_purge(&msg->msg_frags);
877 kfree(msg); 911 kfree(msg);
878 } 912 }
879
880 del_timer_sync(&hdev->cmd_timer);
881
882 nfc_unregister_device(hdev->ndev);
883
884 cancel_work_sync(&hdev->msg_tx_work);
885 cancel_work_sync(&hdev->msg_rx_work);
886} 913}
887EXPORT_SYMBOL(nfc_hci_unregister_device); 914EXPORT_SYMBOL(nfc_hci_unregister_device);
888 915
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
index bc308a7ca609..b6b4109f2343 100644
--- a/net/nfc/hci/hcp.c
+++ b/net/nfc/hci/hcp.c
@@ -105,6 +105,13 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
105 } 105 }
106 106
107 mutex_lock(&hdev->msg_tx_mutex); 107 mutex_lock(&hdev->msg_tx_mutex);
108
109 if (hdev->shutting_down) {
110 err = -ESHUTDOWN;
111 mutex_unlock(&hdev->msg_tx_mutex);
112 goto out_skb_err;
113 }
114
108 list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue); 115 list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
109 mutex_unlock(&hdev->msg_tx_mutex); 116 mutex_unlock(&hdev->msg_tx_mutex);
110 117
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index df24be48d4da..c6bc3bd95052 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -304,6 +304,8 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
304 304
305 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM); 305 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
306 306
307 __net_timestamp(skb);
308
307 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_TX); 309 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_TX);
308 310
309 return nfc_data_exchange(dev, local->target_idx, skb, 311 return nfc_data_exchange(dev, local->target_idx, skb,
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index ec43914c92a9..746f5a2f9804 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -54,7 +54,6 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
54 54
55 skb_queue_purge(&sock->tx_queue); 55 skb_queue_purge(&sock->tx_queue);
56 skb_queue_purge(&sock->tx_pending_queue); 56 skb_queue_purge(&sock->tx_pending_queue);
57 skb_queue_purge(&sock->tx_backlog_queue);
58 57
59 if (local == NULL) 58 if (local == NULL)
60 return; 59 return;
@@ -550,14 +549,13 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
550 pr_err("No LLCP device\n"); 549 pr_err("No LLCP device\n");
551 return -ENODEV; 550 return -ENODEV;
552 } 551 }
552 if (gb_len < 3)
553 return -EINVAL;
553 554
554 memset(local->remote_gb, 0, NFC_MAX_GT_LEN); 555 memset(local->remote_gb, 0, NFC_MAX_GT_LEN);
555 memcpy(local->remote_gb, gb, gb_len); 556 memcpy(local->remote_gb, gb, gb_len);
556 local->remote_gb_len = gb_len; 557 local->remote_gb_len = gb_len;
557 558
558 if (local->remote_gb == NULL || local->remote_gb_len == 0)
559 return -ENODEV;
560
561 if (memcmp(local->remote_gb, llcp_magic, 3)) { 559 if (memcmp(local->remote_gb, llcp_magic, 3)) {
562 pr_err("MAC does not support LLCP\n"); 560 pr_err("MAC does not support LLCP\n");
563 return -EINVAL; 561 return -EINVAL;
@@ -668,6 +666,8 @@ static void nfc_llcp_tx_work(struct work_struct *work)
668 if (ptype == LLCP_PDU_I) 666 if (ptype == LLCP_PDU_I)
669 copy_skb = skb_copy(skb, GFP_ATOMIC); 667 copy_skb = skb_copy(skb, GFP_ATOMIC);
670 668
669 __net_timestamp(skb);
670
671 nfc_llcp_send_to_raw_sock(local, skb, 671 nfc_llcp_send_to_raw_sock(local, skb,
672 NFC_LLCP_DIRECTION_TX); 672 NFC_LLCP_DIRECTION_TX);
673 673
@@ -781,9 +781,15 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,
781 781
782 /* There is no sequence with UI frames */ 782 /* There is no sequence with UI frames */
783 skb_pull(skb, LLCP_HEADER_SIZE); 783 skb_pull(skb, LLCP_HEADER_SIZE);
784 if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) { 784 if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
785 pr_err("receive queue is full\n"); 785 /*
786 skb_queue_head(&llcp_sock->tx_backlog_queue, skb); 786 * UI frames will be freed from the socket layer, so we
787 * need to keep them alive until someone receives them.
788 */
789 skb_get(skb);
790 } else {
791 pr_err("Receive queue is full\n");
792 kfree_skb(skb);
787 } 793 }
788 794
789 nfc_llcp_sock_put(llcp_sock); 795 nfc_llcp_sock_put(llcp_sock);
@@ -976,9 +982,15 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
976 pr_err("Received out of sequence I PDU\n"); 982 pr_err("Received out of sequence I PDU\n");
977 983
978 skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE); 984 skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
979 if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) { 985 if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
980 pr_err("receive queue is full\n"); 986 /*
981 skb_queue_head(&llcp_sock->tx_backlog_queue, skb); 987 * I frames will be freed from the socket layer, so we
988 * need to keep them alive until someone receives them.
989 */
990 skb_get(skb);
991 } else {
992 pr_err("Receive queue is full\n");
993 kfree_skb(skb);
982 } 994 }
983 } 995 }
984 996
@@ -1245,6 +1257,8 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1245 print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET, 1257 print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET,
1246 16, 1, skb->data, skb->len, true); 1258 16, 1, skb->data, skb->len, true);
1247 1259
1260 __net_timestamp(skb);
1261
1248 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_RX); 1262 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_RX);
1249 1263
1250 switch (ptype) { 1264 switch (ptype) {
@@ -1296,6 +1310,13 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1296 local->rx_pending = NULL; 1310 local->rx_pending = NULL;
1297} 1311}
1298 1312
1313static void __nfc_llcp_recv(struct nfc_llcp_local *local, struct sk_buff *skb)
1314{
1315 local->rx_pending = skb;
1316 del_timer(&local->link_timer);
1317 schedule_work(&local->rx_work);
1318}
1319
1299void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) 1320void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
1300{ 1321{
1301 struct nfc_llcp_local *local = (struct nfc_llcp_local *) data; 1322 struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
@@ -1306,9 +1327,7 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
1306 return; 1327 return;
1307 } 1328 }
1308 1329
1309 local->rx_pending = skb_get(skb); 1330 __nfc_llcp_recv(local, skb);
1310 del_timer(&local->link_timer);
1311 schedule_work(&local->rx_work);
1312} 1331}
1313 1332
1314int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) 1333int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
@@ -1319,9 +1338,7 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
1319 if (local == NULL) 1338 if (local == NULL)
1320 return -ENODEV; 1339 return -ENODEV;
1321 1340
1322 local->rx_pending = skb_get(skb); 1341 __nfc_llcp_recv(local, skb);
1323 del_timer(&local->link_timer);
1324 schedule_work(&local->rx_work);
1325 1342
1326 return 0; 1343 return 0;
1327} 1344}
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 0d62366f8cc3..0eae5c509504 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -121,7 +121,6 @@ struct nfc_llcp_sock {
121 121
122 struct sk_buff_head tx_queue; 122 struct sk_buff_head tx_queue;
123 struct sk_buff_head tx_pending_queue; 123 struct sk_buff_head tx_pending_queue;
124 struct sk_buff_head tx_backlog_queue;
125 124
126 struct list_head accept_queue; 125 struct list_head accept_queue;
127 struct sock *parent; 126 struct sock *parent;
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index fea22eb41b82..5332751943a9 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -672,25 +672,27 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
672 copied = min_t(unsigned int, rlen, len); 672 copied = min_t(unsigned int, rlen, len);
673 673
674 cskb = skb; 674 cskb = skb;
675 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 675 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
676 if (!(flags & MSG_PEEK)) 676 if (!(flags & MSG_PEEK))
677 skb_queue_head(&sk->sk_receive_queue, skb); 677 skb_queue_head(&sk->sk_receive_queue, skb);
678 return -EFAULT; 678 return -EFAULT;
679 } 679 }
680 680
681 sock_recv_timestamp(msg, sk, skb);
682
681 if (sk->sk_type == SOCK_DGRAM && msg->msg_name) { 683 if (sk->sk_type == SOCK_DGRAM && msg->msg_name) {
682 struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb); 684 struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb);
683 struct sockaddr_nfc_llcp sockaddr; 685 struct sockaddr_nfc_llcp *sockaddr =
686 (struct sockaddr_nfc_llcp *) msg->msg_name;
684 687
685 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); 688 msg->msg_namelen = sizeof(struct sockaddr_nfc_llcp);
686 689
687 sockaddr.sa_family = AF_NFC; 690 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
688 sockaddr.nfc_protocol = NFC_PROTO_NFC_DEP;
689 sockaddr.dsap = ui_cb->dsap;
690 sockaddr.ssap = ui_cb->ssap;
691 691
692 memcpy(msg->msg_name, &sockaddr, sizeof(sockaddr)); 692 sockaddr->sa_family = AF_NFC;
693 msg->msg_namelen = sizeof(sockaddr); 693 sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;
694 sockaddr->dsap = ui_cb->dsap;
695 sockaddr->ssap = ui_cb->ssap;
694 } 696 }
695 697
696 /* Mark read part of skb as used */ 698 /* Mark read part of skb as used */
@@ -806,7 +808,6 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
806 llcp_sock->reserved_ssap = LLCP_SAP_MAX; 808 llcp_sock->reserved_ssap = LLCP_SAP_MAX;
807 skb_queue_head_init(&llcp_sock->tx_queue); 809 skb_queue_head_init(&llcp_sock->tx_queue);
808 skb_queue_head_init(&llcp_sock->tx_pending_queue); 810 skb_queue_head_init(&llcp_sock->tx_pending_queue);
809 skb_queue_head_init(&llcp_sock->tx_backlog_queue);
810 INIT_LIST_HEAD(&llcp_sock->accept_queue); 811 INIT_LIST_HEAD(&llcp_sock->accept_queue);
811 812
812 if (sock != NULL) 813 if (sock != NULL)
@@ -821,7 +822,6 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
821 822
822 skb_queue_purge(&sock->tx_queue); 823 skb_queue_purge(&sock->tx_queue);
823 skb_queue_purge(&sock->tx_pending_queue); 824 skb_queue_purge(&sock->tx_pending_queue);
824 skb_queue_purge(&sock->tx_backlog_queue);
825 825
826 list_del_init(&sock->accept_queue); 826 list_del_init(&sock->accept_queue);
827 827
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 5f98dc1bf039..48ada0ec749e 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -658,6 +658,7 @@ static struct nfc_ops nci_nfc_ops = {
658 */ 658 */
659struct nci_dev *nci_allocate_device(struct nci_ops *ops, 659struct nci_dev *nci_allocate_device(struct nci_ops *ops,
660 __u32 supported_protocols, 660 __u32 supported_protocols,
661 __u32 supported_se,
661 int tx_headroom, int tx_tailroom) 662 int tx_headroom, int tx_tailroom)
662{ 663{
663 struct nci_dev *ndev; 664 struct nci_dev *ndev;
@@ -680,6 +681,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
680 681
681 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, 682 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
682 supported_protocols, 683 supported_protocols,
684 supported_se,
683 tx_headroom + NCI_DATA_HDR_SIZE, 685 tx_headroom + NCI_DATA_HDR_SIZE,
684 tx_tailroom); 686 tx_tailroom);
685 if (!ndev->nfc_dev) 687 if (!ndev->nfc_dev)
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 3568ae16786d..504b883439f1 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -366,6 +366,7 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
366 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || 366 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
367 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || 367 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
368 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || 368 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
369 nla_put_u32(msg, NFC_ATTR_SE, dev->supported_se) ||
369 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || 370 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) ||
370 nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) 371 nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode))
371 goto nla_put_failure; 372 goto nla_put_failure;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f996db343247..9dc537df46c4 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -301,7 +301,7 @@ static int queue_gso_packets(struct net *net, int dp_ifindex,
301 struct sk_buff *segs, *nskb; 301 struct sk_buff *segs, *nskb;
302 int err; 302 int err;
303 303
304 segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM); 304 segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
305 if (IS_ERR(segs)) 305 if (IS_ERR(segs))
306 return PTR_ERR(segs); 306 return PTR_ERR(segs);
307 307
@@ -1989,10 +1989,9 @@ static struct pernet_operations ovs_net_ops = {
1989 1989
1990static int __init dp_init(void) 1990static int __init dp_init(void)
1991{ 1991{
1992 struct sk_buff *dummy_skb;
1993 int err; 1992 int err;
1994 1993
1995 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb)); 1994 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
1996 1995
1997 pr_info("Open vSwitch switching datapath\n"); 1996 pr_info("Open vSwitch switching datapath\n");
1998 1997
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 5d460c37df07..0531de6c7a4a 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -69,7 +69,6 @@ static int internal_dev_mac_addr(struct net_device *dev, void *p)
69 69
70 if (!is_valid_ether_addr(addr->sa_data)) 70 if (!is_valid_ether_addr(addr->sa_data))
71 return -EADDRNOTAVAIL; 71 return -EADDRNOTAVAIL;
72 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
73 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 72 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
74 return 0; 73 return 0;
75} 74}
@@ -98,7 +97,7 @@ static int internal_dev_stop(struct net_device *netdev)
98static void internal_dev_getinfo(struct net_device *netdev, 97static void internal_dev_getinfo(struct net_device *netdev,
99 struct ethtool_drvinfo *info) 98 struct ethtool_drvinfo *info)
100{ 99{
101 strcpy(info->driver, "openvswitch"); 100 strlcpy(info->driver, "openvswitch", sizeof(info->driver));
102} 101}
103 102
104static const struct ethtool_ops internal_dev_ethtool_ops = { 103static const struct ethtool_ops internal_dev_ethtool_ops = {
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index a9327e2e48ce..670cbc3518de 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -35,10 +35,11 @@
35/* Must be called with rcu_read_lock. */ 35/* Must be called with rcu_read_lock. */
36static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) 36static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
37{ 37{
38 if (unlikely(!vport)) { 38 if (unlikely(!vport))
39 kfree_skb(skb); 39 goto error;
40 return; 40
41 } 41 if (unlikely(skb_warn_if_lro(skb)))
42 goto error;
42 43
43 /* Make our own copy of the packet. Otherwise we will mangle the 44 /* Make our own copy of the packet. Otherwise we will mangle the
44 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
@@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
50 51
51 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
52 ovs_vport_receive(vport, skb); 53 ovs_vport_receive(vport, skb);
54 return;
55
56error:
57 kfree_skb(skb);
53} 58}
54 59
55/* Called with rcu_read_lock and bottom-halves disabled. */ 60/* Called with rcu_read_lock and bottom-halves disabled. */
@@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
169 goto error; 174 goto error;
170 } 175 }
171 176
172 if (unlikely(skb_warn_if_lro(skb)))
173 goto error;
174
175 skb->dev = netdev_vport->dev; 177 skb->dev = netdev_vport->dev;
176 len = skb->len; 178 len = skb->len;
177 dev_queue_xmit(skb); 179 dev_queue_xmit(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e639645e8fec..c7bfeff10767 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
2361 2361
2362 packet_flush_mclist(sk); 2362 packet_flush_mclist(sk);
2363 2363
2364 memset(&req_u, 0, sizeof(req_u)); 2364 if (po->rx_ring.pg_vec) {
2365 2365 memset(&req_u, 0, sizeof(req_u));
2366 if (po->rx_ring.pg_vec)
2367 packet_set_ring(sk, &req_u, 1, 0); 2366 packet_set_ring(sk, &req_u, 1, 0);
2367 }
2368 2368
2369 if (po->tx_ring.pg_vec) 2369 if (po->tx_ring.pg_vec) {
2370 memset(&req_u, 0, sizeof(req_u));
2370 packet_set_ring(sk, &req_u, 1, 1); 2371 packet_set_ring(sk, &req_u, 1, 1);
2372 }
2371 2373
2372 fanout_release(sk); 2374 fanout_release(sk);
2373 2375
@@ -3826,7 +3828,7 @@ static int __net_init packet_net_init(struct net *net)
3826 mutex_init(&net->packet.sklist_lock); 3828 mutex_init(&net->packet.sklist_lock);
3827 INIT_HLIST_HEAD(&net->packet.sklist); 3829 INIT_HLIST_HEAD(&net->packet.sklist);
3828 3830
3829 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) 3831 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
3830 return -ENOMEM; 3832 return -ENOMEM;
3831 3833
3832 return 0; 3834 return 0;
@@ -3834,7 +3836,7 @@ static int __net_init packet_net_init(struct net *net)
3834 3836
3835static void __net_exit packet_net_exit(struct net *net) 3837static void __net_exit packet_net_exit(struct net *net)
3836{ 3838{
3837 proc_net_remove(net, "packet"); 3839 remove_proc_entry("packet", net->proc_net);
3838} 3840}
3839 3841
3840static struct pernet_operations packet_net_ops = { 3842static struct pernet_operations packet_net_ops = {
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 5bf6341e2dd4..45a7df6575de 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -320,7 +320,7 @@ static int __net_init phonet_init_net(struct net *net)
320{ 320{
321 struct phonet_net *pnn = phonet_pernet(net); 321 struct phonet_net *pnn = phonet_pernet(net);
322 322
323 if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) 323 if (!proc_create("phonet", 0, net->proc_net, &pn_sock_seq_fops))
324 return -ENOMEM; 324 return -ENOMEM;
325 325
326 INIT_LIST_HEAD(&pnn->pndevs.list); 326 INIT_LIST_HEAD(&pnn->pndevs.list);
@@ -331,7 +331,7 @@ static int __net_init phonet_init_net(struct net *net)
331 331
332static void __net_exit phonet_exit_net(struct net *net) 332static void __net_exit phonet_exit_net(struct net *net)
333{ 333{
334 proc_net_remove(net, "phonet"); 334 remove_proc_entry("phonet", net->proc_net);
335} 335}
336 336
337static struct pernet_operations phonet_net_ops = { 337static struct pernet_operations phonet_net_ops = {
@@ -348,7 +348,7 @@ int __init phonet_device_init(void)
348 if (err) 348 if (err)
349 return err; 349 return err;
350 350
351 proc_net_fops_create(&init_net, "pnresource", 0, &pn_res_seq_fops); 351 proc_create("pnresource", 0, init_net.proc_net, &pn_res_seq_fops);
352 register_netdevice_notifier(&phonet_device_notifier); 352 register_netdevice_notifier(&phonet_device_notifier);
353 err = phonet_netlink_register(); 353 err = phonet_netlink_register();
354 if (err) 354 if (err)
@@ -361,7 +361,7 @@ void phonet_device_exit(void)
361 rtnl_unregister_all(PF_PHONET); 361 rtnl_unregister_all(PF_PHONET);
362 unregister_netdevice_notifier(&phonet_device_notifier); 362 unregister_netdevice_notifier(&phonet_device_notifier);
363 unregister_pernet_subsys(&phonet_net_ops); 363 unregister_pernet_subsys(&phonet_net_ops);
364 proc_net_remove(&init_net, "pnresource"); 364 remove_proc_entry("pnresource", init_net.proc_net);
365} 365}
366 366
367int phonet_route_add(struct net_device *dev, u8 daddr) 367int phonet_route_add(struct net_device *dev, u8 daddr)
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index ec753b3ae72a..f2c670ba7b9b 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -1,7 +1,7 @@
1 1
2config RDS 2config RDS
3 tristate "The RDS Protocol (EXPERIMENTAL)" 3 tristate "The RDS Protocol"
4 depends on INET && EXPERIMENTAL 4 depends on INET
5 ---help--- 5 ---help---
6 The RDS (Reliable Datagram Sockets) protocol provides reliable, 6 The RDS (Reliable Datagram Sockets) protocol provides reliable,
7 sequenced delivery of datagrams over Infiniband, iWARP, 7 sequenced delivery of datagrams over Infiniband, iWARP,
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index a1e116277477..31b74f5e61ad 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -434,12 +434,11 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
434 version = RDS_PROTOCOL_3_0; 434 version = RDS_PROTOCOL_3_0;
435 while ((common >>= 1) != 0) 435 while ((common >>= 1) != 0)
436 version++; 436 version++;
437 } 437 } else
438 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using " 438 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
439 "incompatible protocol version %u.%u\n", 439 &dp->dp_saddr,
440 &dp->dp_saddr, 440 dp->dp_protocol_major,
441 dp->dp_protocol_major, 441 dp->dp_protocol_minor);
442 dp->dp_protocol_minor);
443 return version; 442 return version;
444} 443}
445 444
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8c5bc857f04d..8eb9501e3d60 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -339,8 +339,8 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
339 sge->length = sizeof(struct rds_header); 339 sge->length = sizeof(struct rds_header);
340 340
341 sge = &recv->r_sge[1]; 341 sge = &recv->r_sge[1];
342 sge->addr = sg_dma_address(&recv->r_frag->f_sg); 342 sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
343 sge->length = sg_dma_len(&recv->r_frag->f_sg); 343 sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
344 344
345 ret = 0; 345 ret = 0;
346out: 346out:
@@ -381,7 +381,10 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
382 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, 382 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
383 recv->r_ibinc, sg_page(&recv->r_frag->f_sg), 383 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
384 (long) sg_dma_address(&recv->r_frag->f_sg), ret); 384 (long) ib_sg_dma_address(
385 ic->i_cm_id->device,
386 &recv->r_frag->f_sg),
387 ret);
385 if (ret) { 388 if (ret) {
386 rds_ib_conn_error(conn, "recv post on " 389 rds_ib_conn_error(conn, "recv post on "
387 "%pI4 returned %d, disconnecting and " 390 "%pI4 returned %d, disconnecting and "
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index c9d931e7ffec..b85107b5ef62 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -148,11 +148,9 @@ static unsigned long rfkill_ratelimit(const unsigned long last)
148 148
149static void rfkill_schedule_ratelimited(void) 149static void rfkill_schedule_ratelimited(void)
150{ 150{
151 if (delayed_work_pending(&rfkill_op_work)) 151 if (schedule_delayed_work(&rfkill_op_work,
152 return; 152 rfkill_ratelimit(rfkill_last_scheduled)))
153 schedule_delayed_work(&rfkill_op_work, 153 rfkill_last_scheduled = jiffies;
154 rfkill_ratelimit(rfkill_last_scheduled));
155 rfkill_last_scheduled = jiffies;
156} 154}
157 155
158static void rfkill_schedule_global_op(enum rfkill_sched_op op) 156static void rfkill_schedule_global_op(enum rfkill_sched_op op)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c4719ce604c2..b768fe9d5e7a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1575,10 +1575,13 @@ static int __init rose_proto_init(void)
1575 1575
1576 rose_add_loopback_neigh(); 1576 rose_add_loopback_neigh();
1577 1577
1578 proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); 1578 proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops);
1579 proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); 1579 proc_create("rose_neigh", S_IRUGO, init_net.proc_net,
1580 proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); 1580 &rose_neigh_fops);
1581 proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); 1581 proc_create("rose_nodes", S_IRUGO, init_net.proc_net,
1582 &rose_nodes_fops);
1583 proc_create("rose_routes", S_IRUGO, init_net.proc_net,
1584 &rose_routes_fops);
1582out: 1585out:
1583 return rc; 1586 return rc;
1584fail: 1587fail:
@@ -1605,10 +1608,10 @@ static void __exit rose_exit(void)
1605{ 1608{
1606 int i; 1609 int i;
1607 1610
1608 proc_net_remove(&init_net, "rose"); 1611 remove_proc_entry("rose", init_net.proc_net);
1609 proc_net_remove(&init_net, "rose_neigh"); 1612 remove_proc_entry("rose_neigh", init_net.proc_net);
1610 proc_net_remove(&init_net, "rose_nodes"); 1613 remove_proc_entry("rose_nodes", init_net.proc_net);
1611 proc_net_remove(&init_net, "rose_routes"); 1614 remove_proc_entry("rose_routes", init_net.proc_net);
1612 rose_loopback_clear(); 1615 rose_loopback_clear();
1613 1616
1614 rose_rt_free(); 1617 rose_rt_free();
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index 0d3103c4f11c..23dcef12b986 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -4,7 +4,7 @@
4 4
5config AF_RXRPC 5config AF_RXRPC
6 tristate "RxRPC session sockets" 6 tristate "RxRPC session sockets"
7 depends on INET && EXPERIMENTAL 7 depends on INET
8 select CRYPTO 8 select CRYPTO
9 select KEYS 9 select KEYS
10 help 10 help
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 05996d0dd828..e61aa6001c65 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h>
13#include <linux/net.h> 14#include <linux/net.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/skbuff.h> 16#include <linux/skbuff.h>
@@ -792,10 +793,9 @@ static const struct net_proto_family rxrpc_family_ops = {
792 */ 793 */
793static int __init af_rxrpc_init(void) 794static int __init af_rxrpc_init(void)
794{ 795{
795 struct sk_buff *dummy_skb;
796 int ret = -1; 796 int ret = -1;
797 797
798 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb)); 798 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
799 799
800 rxrpc_epoch = htonl(get_seconds()); 800 rxrpc_epoch = htonl(get_seconds());
801 801
@@ -839,8 +839,9 @@ static int __init af_rxrpc_init(void)
839 } 839 }
840 840
841#ifdef CONFIG_PROC_FS 841#ifdef CONFIG_PROC_FS
842 proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops); 842 proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
843 proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops); 843 proc_create("rxrpc_conns", 0, init_net.proc_net,
844 &rxrpc_connection_seq_fops);
844#endif 845#endif
845 return 0; 846 return 0;
846 847
@@ -878,8 +879,8 @@ static void __exit af_rxrpc_exit(void)
878 879
879 _debug("flush scheduled work"); 880 _debug("flush scheduled work");
880 flush_workqueue(rxrpc_workqueue); 881 flush_workqueue(rxrpc_workqueue);
881 proc_net_remove(&init_net, "rxrpc_conns"); 882 remove_proc_entry("rxrpc_conns", init_net.proc_net);
882 proc_net_remove(&init_net, "rxrpc_calls"); 883 remove_proc_entry("rxrpc_calls", init_net.proc_net);
883 destroy_workqueue(rxrpc_workqueue); 884 destroy_workqueue(rxrpc_workqueue);
884 kmem_cache_destroy(rxrpc_call_jar); 885 kmem_cache_destroy(rxrpc_call_jar);
885 _leave(""); 886 _leave("");
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 65d240cbf74b..8579c4bb20c9 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -485,8 +485,9 @@ errout:
485 return err; 485 return err;
486} 486}
487 487
488struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, 488struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
489 char *name, int ovr, int bind) 489 struct nlattr *est, char *name, int ovr,
490 int bind)
490{ 491{
491 struct tc_action *a; 492 struct tc_action *a;
492 struct tc_action_ops *a_o; 493 struct tc_action_ops *a_o;
@@ -542,9 +543,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
542 543
543 /* backward compatibility for policer */ 544 /* backward compatibility for policer */
544 if (name == NULL) 545 if (name == NULL)
545 err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind); 546 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
546 else 547 else
547 err = a_o->init(nla, est, a, ovr, bind); 548 err = a_o->init(net, nla, est, a, ovr, bind);
548 if (err < 0) 549 if (err < 0)
549 goto err_free; 550 goto err_free;
550 551
@@ -566,8 +567,9 @@ err_out:
566 return ERR_PTR(err); 567 return ERR_PTR(err);
567} 568}
568 569
569struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, 570struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
570 char *name, int ovr, int bind) 571 struct nlattr *est, char *name, int ovr,
572 int bind)
571{ 573{
572 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 574 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
573 struct tc_action *head = NULL, *act, *act_prev = NULL; 575 struct tc_action *head = NULL, *act, *act_prev = NULL;
@@ -579,7 +581,7 @@ struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
579 return ERR_PTR(err); 581 return ERR_PTR(err);
580 582
581 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 583 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
582 act = tcf_action_init_1(tb[i], est, name, ovr, bind); 584 act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
583 if (IS_ERR(act)) 585 if (IS_ERR(act))
584 goto err; 586 goto err;
585 act->order = i; 587 act->order = i;
@@ -960,7 +962,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
960 struct tc_action *a; 962 struct tc_action *a;
961 u32 seq = n->nlmsg_seq; 963 u32 seq = n->nlmsg_seq;
962 964
963 act = tcf_action_init(nla, NULL, NULL, ovr, 0); 965 act = tcf_action_init(net, nla, NULL, NULL, ovr, 0);
964 if (act == NULL) 966 if (act == NULL)
965 goto done; 967 goto done;
966 if (IS_ERR(act)) { 968 if (IS_ERR(act)) {
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 2c8ad7c86e43..08fa1e8a4ca4 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -51,7 +51,7 @@ static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
51 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, 51 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
52}; 52};
53 53
54static int tcf_csum_init(struct nlattr *nla, struct nlattr *est, 54static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
55 struct tc_action *a, int ovr, int bind) 55 struct tc_action *a, int ovr, int bind)
56{ 56{
57 struct nlattr *tb[TCA_CSUM_MAX + 1]; 57 struct nlattr *tb[TCA_CSUM_MAX + 1];
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 05d60859d8e3..fd2b3cff5fa2 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -58,8 +58,9 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
58 [TCA_GACT_PROB] = { .len = sizeof(struct tc_gact_p) }, 58 [TCA_GACT_PROB] = { .len = sizeof(struct tc_gact_p) },
59}; 59};
60 60
61static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, 61static int tcf_gact_init(struct net *net, struct nlattr *nla,
62 struct tc_action *a, int ovr, int bind) 62 struct nlattr *est, struct tc_action *a,
63 int ovr, int bind)
63{ 64{
64 struct nlattr *tb[TCA_GACT_MAX + 1]; 65 struct nlattr *tb[TCA_GACT_MAX + 1];
65 struct tc_gact *parm; 66 struct tc_gact *parm;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 58fb3c7aab9e..e0f6de64afec 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -102,7 +102,7 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
102 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, 102 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
103}; 103};
104 104
105static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, 105static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
106 struct tc_action *a, int ovr, int bind) 106 struct tc_action *a, int ovr, int bind)
107{ 107{
108 struct nlattr *tb[TCA_IPT_MAX + 1]; 108 struct nlattr *tb[TCA_IPT_MAX + 1];
@@ -207,10 +207,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
207 struct tcf_ipt *ipt = a->priv; 207 struct tcf_ipt *ipt = a->priv;
208 struct xt_action_param par; 208 struct xt_action_param par;
209 209
210 if (skb_cloned(skb)) { 210 if (skb_unclone(skb, GFP_ATOMIC))
211 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 211 return TC_ACT_UNSPEC;
212 return TC_ACT_UNSPEC;
213 }
214 212
215 spin_lock(&ipt->tcf_lock); 213 spin_lock(&ipt->tcf_lock);
216 214
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 9c0fd0c78814..5d676edc22a6 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -62,8 +62,9 @@ static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
62 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) }, 62 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
63}; 63};
64 64
65static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, 65static int tcf_mirred_init(struct net *net, struct nlattr *nla,
66 struct tc_action *a, int ovr, int bind) 66 struct nlattr *est, struct tc_action *a, int ovr,
67 int bind)
67{ 68{
68 struct nlattr *tb[TCA_MIRRED_MAX + 1]; 69 struct nlattr *tb[TCA_MIRRED_MAX + 1];
69 struct tc_mirred *parm; 70 struct tc_mirred *parm;
@@ -88,7 +89,7 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
88 return -EINVAL; 89 return -EINVAL;
89 } 90 }
90 if (parm->ifindex) { 91 if (parm->ifindex) {
91 dev = __dev_get_by_index(&init_net, parm->ifindex); 92 dev = __dev_get_by_index(net, parm->ifindex);
92 if (dev == NULL) 93 if (dev == NULL)
93 return -ENODEV; 94 return -ENODEV;
94 switch (dev->type) { 95 switch (dev->type) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index b5d029eb44f2..876f0ef29694 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -44,7 +44,7 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
44 [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) }, 44 [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
45}; 45};
46 46
47static int tcf_nat_init(struct nlattr *nla, struct nlattr *est, 47static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
48 struct tc_action *a, int ovr, int bind) 48 struct tc_action *a, int ovr, int bind)
49{ 49{
50 struct nlattr *tb[TCA_NAT_MAX + 1]; 50 struct nlattr *tb[TCA_NAT_MAX + 1];
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 45c53ab067a6..7ed78c9e505c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -38,8 +38,9 @@ static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
38 [TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) }, 38 [TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
39}; 39};
40 40
41static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, 41static int tcf_pedit_init(struct net *net, struct nlattr *nla,
42 struct tc_action *a, int ovr, int bind) 42 struct nlattr *est, struct tc_action *a,
43 int ovr, int bind)
43{ 44{
44 struct nlattr *tb[TCA_PEDIT_MAX + 1]; 45 struct nlattr *tb[TCA_PEDIT_MAX + 1];
45 struct tc_pedit *parm; 46 struct tc_pedit *parm;
@@ -130,8 +131,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
130 int i, munged = 0; 131 int i, munged = 0;
131 unsigned int off; 132 unsigned int off;
132 133
133 if (skb_cloned(skb) && 134 if (skb_unclone(skb, GFP_ATOMIC))
134 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
135 return p->tcf_action; 135 return p->tcf_action;
136 136
137 off = skb_network_offset(skb); 137 off = skb_network_offset(skb);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index a9de23297d47..823463adbd21 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,8 +22,23 @@
22#include <net/act_api.h> 22#include <net/act_api.h>
23#include <net/netlink.h> 23#include <net/netlink.h>
24 24
25#define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L) 25struct tcf_police {
26#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L) 26 struct tcf_common common;
27 int tcfp_result;
28 u32 tcfp_ewma_rate;
29 s64 tcfp_burst;
30 u32 tcfp_mtu;
31 s64 tcfp_toks;
32 s64 tcfp_ptoks;
33 s64 tcfp_mtu_ptoks;
34 s64 tcfp_t_c;
35 struct psched_ratecfg rate;
36 bool rate_present;
37 struct psched_ratecfg peak;
38 bool peak_present;
39};
40#define to_police(pc) \
41 container_of(pc, struct tcf_police, common)
27 42
28#define POL_TAB_MASK 15 43#define POL_TAB_MASK 15
29static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; 44static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -108,10 +123,6 @@ static void tcf_police_destroy(struct tcf_police *p)
108 write_unlock_bh(&police_lock); 123 write_unlock_bh(&police_lock);
109 gen_kill_estimator(&p->tcf_bstats, 124 gen_kill_estimator(&p->tcf_bstats,
110 &p->tcf_rate_est); 125 &p->tcf_rate_est);
111 if (p->tcfp_R_tab)
112 qdisc_put_rtab(p->tcfp_R_tab);
113 if (p->tcfp_P_tab)
114 qdisc_put_rtab(p->tcfp_P_tab);
115 /* 126 /*
116 * gen_estimator est_timer() might access p->tcf_lock 127 * gen_estimator est_timer() might access p->tcf_lock
117 * or bstats, wait a RCU grace period before freeing p 128 * or bstats, wait a RCU grace period before freeing p
@@ -130,8 +141,9 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
130 [TCA_POLICE_RESULT] = { .type = NLA_U32 }, 141 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
131}; 142};
132 143
133static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est, 144static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
134 struct tc_action *a, int ovr, int bind) 145 struct nlattr *est, struct tc_action *a,
146 int ovr, int bind)
135{ 147{
136 unsigned int h; 148 unsigned int h;
137 int ret = 0, err; 149 int ret = 0, err;
@@ -211,26 +223,36 @@ override:
211 } 223 }
212 224
213 /* No failure allowed after this point */ 225 /* No failure allowed after this point */
214 if (R_tab != NULL) { 226 police->tcfp_mtu = parm->mtu;
215 qdisc_put_rtab(police->tcfp_R_tab); 227 if (police->tcfp_mtu == 0) {
216 police->tcfp_R_tab = R_tab; 228 police->tcfp_mtu = ~0;
229 if (R_tab)
230 police->tcfp_mtu = 255 << R_tab->rate.cell_log;
231 }
232 if (R_tab) {
233 police->rate_present = true;
234 psched_ratecfg_precompute(&police->rate, R_tab->rate.rate);
235 qdisc_put_rtab(R_tab);
236 } else {
237 police->rate_present = false;
217 } 238 }
218 if (P_tab != NULL) { 239 if (P_tab) {
219 qdisc_put_rtab(police->tcfp_P_tab); 240 police->peak_present = true;
220 police->tcfp_P_tab = P_tab; 241 psched_ratecfg_precompute(&police->peak, P_tab->rate.rate);
242 qdisc_put_rtab(P_tab);
243 } else {
244 police->peak_present = false;
221 } 245 }
222 246
223 if (tb[TCA_POLICE_RESULT]) 247 if (tb[TCA_POLICE_RESULT])
224 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); 248 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
225 police->tcfp_toks = police->tcfp_burst = parm->burst; 249 police->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
226 police->tcfp_mtu = parm->mtu; 250 police->tcfp_toks = police->tcfp_burst;
227 if (police->tcfp_mtu == 0) { 251 if (police->peak_present) {
228 police->tcfp_mtu = ~0; 252 police->tcfp_mtu_ptoks = (s64) psched_l2t_ns(&police->peak,
229 if (police->tcfp_R_tab) 253 police->tcfp_mtu);
230 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log; 254 police->tcfp_ptoks = police->tcfp_mtu_ptoks;
231 } 255 }
232 if (police->tcfp_P_tab)
233 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
234 police->tcf_action = parm->action; 256 police->tcf_action = parm->action;
235 257
236 if (tb[TCA_POLICE_AVRATE]) 258 if (tb[TCA_POLICE_AVRATE])
@@ -240,7 +262,7 @@ override:
240 if (ret != ACT_P_CREATED) 262 if (ret != ACT_P_CREATED)
241 return ret; 263 return ret;
242 264
243 police->tcfp_t_c = psched_get_time(); 265 police->tcfp_t_c = ktime_to_ns(ktime_get());
244 police->tcf_index = parm->index ? parm->index : 266 police->tcf_index = parm->index ? parm->index :
245 tcf_hash_new_index(&police_idx_gen, &police_hash_info); 267 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
246 h = tcf_hash(police->tcf_index, POL_TAB_MASK); 268 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
@@ -286,9 +308,9 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
286 struct tcf_result *res) 308 struct tcf_result *res)
287{ 309{
288 struct tcf_police *police = a->priv; 310 struct tcf_police *police = a->priv;
289 psched_time_t now; 311 s64 now;
290 long toks; 312 s64 toks;
291 long ptoks = 0; 313 s64 ptoks = 0;
292 314
293 spin_lock(&police->tcf_lock); 315 spin_lock(&police->tcf_lock);
294 316
@@ -304,24 +326,25 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
304 } 326 }
305 327
306 if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { 328 if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
307 if (police->tcfp_R_tab == NULL) { 329 if (!police->rate_present) {
308 spin_unlock(&police->tcf_lock); 330 spin_unlock(&police->tcf_lock);
309 return police->tcfp_result; 331 return police->tcfp_result;
310 } 332 }
311 333
312 now = psched_get_time(); 334 now = ktime_to_ns(ktime_get());
313 toks = psched_tdiff_bounded(now, police->tcfp_t_c, 335 toks = min_t(s64, now - police->tcfp_t_c,
314 police->tcfp_burst); 336 police->tcfp_burst);
315 if (police->tcfp_P_tab) { 337 if (police->peak_present) {
316 ptoks = toks + police->tcfp_ptoks; 338 ptoks = toks + police->tcfp_ptoks;
317 if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) 339 if (ptoks > police->tcfp_mtu_ptoks)
318 ptoks = (long)L2T_P(police, police->tcfp_mtu); 340 ptoks = police->tcfp_mtu_ptoks;
319 ptoks -= L2T_P(police, qdisc_pkt_len(skb)); 341 ptoks -= (s64) psched_l2t_ns(&police->peak,
342 qdisc_pkt_len(skb));
320 } 343 }
321 toks += police->tcfp_toks; 344 toks += police->tcfp_toks;
322 if (toks > (long)police->tcfp_burst) 345 if (toks > police->tcfp_burst)
323 toks = police->tcfp_burst; 346 toks = police->tcfp_burst;
324 toks -= L2T(police, qdisc_pkt_len(skb)); 347 toks -= (s64) psched_l2t_ns(&police->rate, qdisc_pkt_len(skb));
325 if ((toks|ptoks) >= 0) { 348 if ((toks|ptoks) >= 0) {
326 police->tcfp_t_c = now; 349 police->tcfp_t_c = now;
327 police->tcfp_toks = toks; 350 police->tcfp_toks = toks;
@@ -347,15 +370,15 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
347 .index = police->tcf_index, 370 .index = police->tcf_index,
348 .action = police->tcf_action, 371 .action = police->tcf_action,
349 .mtu = police->tcfp_mtu, 372 .mtu = police->tcfp_mtu,
350 .burst = police->tcfp_burst, 373 .burst = PSCHED_NS2TICKS(police->tcfp_burst),
351 .refcnt = police->tcf_refcnt - ref, 374 .refcnt = police->tcf_refcnt - ref,
352 .bindcnt = police->tcf_bindcnt - bind, 375 .bindcnt = police->tcf_bindcnt - bind,
353 }; 376 };
354 377
355 if (police->tcfp_R_tab) 378 if (police->rate_present)
356 opt.rate = police->tcfp_R_tab->rate; 379 opt.rate.rate = psched_ratecfg_getrate(&police->rate);
357 if (police->tcfp_P_tab) 380 if (police->peak_present)
358 opt.peakrate = police->tcfp_P_tab->rate; 381 opt.peakrate.rate = psched_ratecfg_getrate(&police->peak);
359 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) 382 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
360 goto nla_put_failure; 383 goto nla_put_failure;
361 if (police->tcfp_result && 384 if (police->tcfp_result &&
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3714f60f0b3c..7725eb4ab756 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -95,8 +95,9 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
95 [TCA_DEF_DATA] = { .type = NLA_STRING, .len = SIMP_MAX_DATA }, 95 [TCA_DEF_DATA] = { .type = NLA_STRING, .len = SIMP_MAX_DATA },
96}; 96};
97 97
98static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, 98static int tcf_simp_init(struct net *net, struct nlattr *nla,
99 struct tc_action *a, int ovr, int bind) 99 struct nlattr *est, struct tc_action *a,
100 int ovr, int bind)
100{ 101{
101 struct nlattr *tb[TCA_DEF_MAX + 1]; 102 struct nlattr *tb[TCA_DEF_MAX + 1];
102 struct tc_defact *parm; 103 struct tc_defact *parm;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 476e0fac6712..cb4221171f93 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -67,8 +67,9 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
67 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, 67 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) },
68}; 68};
69 69
70static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, 70static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
71 struct tc_action *a, int ovr, int bind) 71 struct nlattr *est, struct tc_action *a,
72 int ovr, int bind)
72{ 73{
73 struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; 74 struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
74 struct tc_skbedit *parm; 75 struct tc_skbedit *parm;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ff55ed6c49b2..964f5e4f4b8a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -321,7 +321,7 @@ replay:
321 } 321 }
322 } 322 }
323 323
324 err = tp->ops->change(skb, tp, cl, t->tcm_handle, tca, &fh); 324 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh);
325 if (err == 0) { 325 if (err == 0) {
326 if (tp_created) { 326 if (tp_created) {
327 spin_lock_bh(root_lock); 327 spin_lock_bh(root_lock);
@@ -508,7 +508,7 @@ void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
508} 508}
509EXPORT_SYMBOL(tcf_exts_destroy); 509EXPORT_SYMBOL(tcf_exts_destroy);
510 510
511int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb, 511int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
512 struct nlattr *rate_tlv, struct tcf_exts *exts, 512 struct nlattr *rate_tlv, struct tcf_exts *exts,
513 const struct tcf_ext_map *map) 513 const struct tcf_ext_map *map)
514{ 514{
@@ -519,7 +519,7 @@ int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb,
519 struct tc_action *act; 519 struct tc_action *act;
520 520
521 if (map->police && tb[map->police]) { 521 if (map->police && tb[map->police]) {
522 act = tcf_action_init_1(tb[map->police], rate_tlv, 522 act = tcf_action_init_1(net, tb[map->police], rate_tlv,
523 "police", TCA_ACT_NOREPLACE, 523 "police", TCA_ACT_NOREPLACE,
524 TCA_ACT_BIND); 524 TCA_ACT_BIND);
525 if (IS_ERR(act)) 525 if (IS_ERR(act))
@@ -528,8 +528,9 @@ int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb,
528 act->type = TCA_OLD_COMPAT; 528 act->type = TCA_OLD_COMPAT;
529 exts->action = act; 529 exts->action = act;
530 } else if (map->action && tb[map->action]) { 530 } else if (map->action && tb[map->action]) {
531 act = tcf_action_init(tb[map->action], rate_tlv, NULL, 531 act = tcf_action_init(net, tb[map->action], rate_tlv,
532 TCA_ACT_NOREPLACE, TCA_ACT_BIND); 532 NULL, TCA_ACT_NOREPLACE,
533 TCA_ACT_BIND);
533 if (IS_ERR(act)) 534 if (IS_ERR(act))
534 return PTR_ERR(act); 535 return PTR_ERR(act);
535 536
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 344a11b342e5..d76a35d0dc85 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -132,15 +132,16 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
132 [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED }, 132 [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED },
133}; 133};
134 134
135static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, 135static int basic_set_parms(struct net *net, struct tcf_proto *tp,
136 unsigned long base, struct nlattr **tb, 136 struct basic_filter *f, unsigned long base,
137 struct nlattr **tb,
137 struct nlattr *est) 138 struct nlattr *est)
138{ 139{
139 int err = -EINVAL; 140 int err = -EINVAL;
140 struct tcf_exts e; 141 struct tcf_exts e;
141 struct tcf_ematch_tree t; 142 struct tcf_ematch_tree t;
142 143
143 err = tcf_exts_validate(tp, tb, est, &e, &basic_ext_map); 144 err = tcf_exts_validate(net, tp, tb, est, &e, &basic_ext_map);
144 if (err < 0) 145 if (err < 0)
145 return err; 146 return err;
146 147
@@ -162,7 +163,7 @@ errout:
162 return err; 163 return err;
163} 164}
164 165
165static int basic_change(struct sk_buff *in_skb, 166static int basic_change(struct net *net, struct sk_buff *in_skb,
166 struct tcf_proto *tp, unsigned long base, u32 handle, 167 struct tcf_proto *tp, unsigned long base, u32 handle,
167 struct nlattr **tca, unsigned long *arg) 168 struct nlattr **tca, unsigned long *arg)
168{ 169{
@@ -182,7 +183,7 @@ static int basic_change(struct sk_buff *in_skb,
182 if (f != NULL) { 183 if (f != NULL) {
183 if (handle && f->handle != handle) 184 if (handle && f->handle != handle)
184 return -EINVAL; 185 return -EINVAL;
185 return basic_set_parms(tp, f, base, tb, tca[TCA_RATE]); 186 return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]);
186 } 187 }
187 188
188 err = -ENOBUFS; 189 err = -ENOBUFS;
@@ -208,7 +209,7 @@ static int basic_change(struct sk_buff *in_skb,
208 f->handle = head->hgenerator; 209 f->handle = head->hgenerator;
209 } 210 }
210 211
211 err = basic_set_parms(tp, f, base, tb, tca[TCA_RATE]); 212 err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]);
212 if (err < 0) 213 if (err < 0)
213 goto errout; 214 goto errout;
214 215
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 6db7855b9029..3a294eb98d61 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -178,7 +178,7 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
178 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 178 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
179}; 179};
180 180
181static int cls_cgroup_change(struct sk_buff *in_skb, 181static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
182 struct tcf_proto *tp, unsigned long base, 182 struct tcf_proto *tp, unsigned long base,
183 u32 handle, struct nlattr **tca, 183 u32 handle, struct nlattr **tca,
184 unsigned long *arg) 184 unsigned long *arg)
@@ -215,7 +215,8 @@ static int cls_cgroup_change(struct sk_buff *in_skb,
215 if (err < 0) 215 if (err < 0)
216 return err; 216 return err;
217 217
218 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map); 218 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e,
219 &cgroup_ext_map);
219 if (err < 0) 220 if (err < 0)
220 return err; 221 return err;
221 222
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index ce82d0cb1b47..aa36a8c8b33b 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -351,7 +351,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
351 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 351 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
352}; 352};
353 353
354static int flow_change(struct sk_buff *in_skb, 354static int flow_change(struct net *net, struct sk_buff *in_skb,
355 struct tcf_proto *tp, unsigned long base, 355 struct tcf_proto *tp, unsigned long base,
356 u32 handle, struct nlattr **tca, 356 u32 handle, struct nlattr **tca,
357 unsigned long *arg) 357 unsigned long *arg)
@@ -397,7 +397,7 @@ static int flow_change(struct sk_buff *in_skb,
397 return -EOPNOTSUPP; 397 return -EOPNOTSUPP;
398 } 398 }
399 399
400 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map); 400 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
401 if (err < 0) 401 if (err < 0)
402 return err; 402 return err;
403 403
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 4075a0aef2aa..1135d8227f9b 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -192,7 +192,7 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
192}; 192};
193 193
194static int 194static int
195fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f, 195fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
196 struct nlattr **tb, struct nlattr **tca, unsigned long base) 196 struct nlattr **tb, struct nlattr **tca, unsigned long base)
197{ 197{
198 struct fw_head *head = (struct fw_head *)tp->root; 198 struct fw_head *head = (struct fw_head *)tp->root;
@@ -200,7 +200,7 @@ fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f,
200 u32 mask; 200 u32 mask;
201 int err; 201 int err;
202 202
203 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &fw_ext_map); 203 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &fw_ext_map);
204 if (err < 0) 204 if (err < 0)
205 return err; 205 return err;
206 206
@@ -233,7 +233,7 @@ errout:
233 return err; 233 return err;
234} 234}
235 235
236static int fw_change(struct sk_buff *in_skb, 236static int fw_change(struct net *net, struct sk_buff *in_skb,
237 struct tcf_proto *tp, unsigned long base, 237 struct tcf_proto *tp, unsigned long base,
238 u32 handle, 238 u32 handle,
239 struct nlattr **tca, 239 struct nlattr **tca,
@@ -255,7 +255,7 @@ static int fw_change(struct sk_buff *in_skb,
255 if (f != NULL) { 255 if (f != NULL) {
256 if (f->id != handle && handle) 256 if (f->id != handle && handle)
257 return -EINVAL; 257 return -EINVAL;
258 return fw_change_attrs(tp, f, tb, tca, base); 258 return fw_change_attrs(net, tp, f, tb, tca, base);
259 } 259 }
260 260
261 if (!handle) 261 if (!handle)
@@ -282,7 +282,7 @@ static int fw_change(struct sk_buff *in_skb,
282 282
283 f->id = handle; 283 f->id = handle;
284 284
285 err = fw_change_attrs(tp, f, tb, tca, base); 285 err = fw_change_attrs(net, tp, f, tb, tca, base);
286 if (err < 0) 286 if (err < 0)
287 goto errout; 287 goto errout;
288 288
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index c10d57bf98f2..37da567d833e 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -335,9 +335,10 @@ static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
335 [TCA_ROUTE4_IIF] = { .type = NLA_U32 }, 335 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
336}; 336};
337 337
338static int route4_set_parms(struct tcf_proto *tp, unsigned long base, 338static int route4_set_parms(struct net *net, struct tcf_proto *tp,
339 struct route4_filter *f, u32 handle, struct route4_head *head, 339 unsigned long base, struct route4_filter *f,
340 struct nlattr **tb, struct nlattr *est, int new) 340 u32 handle, struct route4_head *head,
341 struct nlattr **tb, struct nlattr *est, int new)
341{ 342{
342 int err; 343 int err;
343 u32 id = 0, to = 0, nhandle = 0x8000; 344 u32 id = 0, to = 0, nhandle = 0x8000;
@@ -346,7 +347,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
346 struct route4_bucket *b; 347 struct route4_bucket *b;
347 struct tcf_exts e; 348 struct tcf_exts e;
348 349
349 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map); 350 err = tcf_exts_validate(net, tp, tb, est, &e, &route_ext_map);
350 if (err < 0) 351 if (err < 0)
351 return err; 352 return err;
352 353
@@ -427,7 +428,7 @@ errout:
427 return err; 428 return err;
428} 429}
429 430
430static int route4_change(struct sk_buff *in_skb, 431static int route4_change(struct net *net, struct sk_buff *in_skb,
431 struct tcf_proto *tp, unsigned long base, 432 struct tcf_proto *tp, unsigned long base,
432 u32 handle, 433 u32 handle,
433 struct nlattr **tca, 434 struct nlattr **tca,
@@ -457,7 +458,7 @@ static int route4_change(struct sk_buff *in_skb,
457 if (f->bkt) 458 if (f->bkt)
458 old_handle = f->handle; 459 old_handle = f->handle;
459 460
460 err = route4_set_parms(tp, base, f, handle, head, tb, 461 err = route4_set_parms(net, tp, base, f, handle, head, tb,
461 tca[TCA_RATE], 0); 462 tca[TCA_RATE], 0);
462 if (err < 0) 463 if (err < 0)
463 return err; 464 return err;
@@ -480,7 +481,7 @@ static int route4_change(struct sk_buff *in_skb,
480 if (f == NULL) 481 if (f == NULL)
481 goto errout; 482 goto errout;
482 483
483 err = route4_set_parms(tp, base, f, handle, head, tb, 484 err = route4_set_parms(net, tp, base, f, handle, head, tb,
484 tca[TCA_RATE], 1); 485 tca[TCA_RATE], 1);
485 if (err < 0) 486 if (err < 0)
486 goto errout; 487 goto errout;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 494bbb90924a..252d8b05872e 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -416,7 +416,7 @@ static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
416 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, 416 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
417}; 417};
418 418
419static int rsvp_change(struct sk_buff *in_skb, 419static int rsvp_change(struct net *net, struct sk_buff *in_skb,
420 struct tcf_proto *tp, unsigned long base, 420 struct tcf_proto *tp, unsigned long base,
421 u32 handle, 421 u32 handle,
422 struct nlattr **tca, 422 struct nlattr **tca,
@@ -440,7 +440,7 @@ static int rsvp_change(struct sk_buff *in_skb,
440 if (err < 0) 440 if (err < 0)
441 return err; 441 return err;
442 442
443 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map); 443 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
444 if (err < 0) 444 if (err < 0)
445 return err; 445 return err;
446 446
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index a1293b4ab7a1..b86535a40169 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -197,9 +197,10 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
197}; 197};
198 198
199static int 199static int
200tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, 200tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
201 struct tcindex_data *p, struct tcindex_filter_result *r, 201 u32 handle, struct tcindex_data *p,
202 struct nlattr **tb, struct nlattr *est) 202 struct tcindex_filter_result *r, struct nlattr **tb,
203 struct nlattr *est)
203{ 204{
204 int err, balloc = 0; 205 int err, balloc = 0;
205 struct tcindex_filter_result new_filter_result, *old_r = r; 206 struct tcindex_filter_result new_filter_result, *old_r = r;
@@ -208,7 +209,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
208 struct tcindex_filter *f = NULL; /* make gcc behave */ 209 struct tcindex_filter *f = NULL; /* make gcc behave */
209 struct tcf_exts e; 210 struct tcf_exts e;
210 211
211 err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map); 212 err = tcf_exts_validate(net, tp, tb, est, &e, &tcindex_ext_map);
212 if (err < 0) 213 if (err < 0)
213 return err; 214 return err;
214 215
@@ -332,7 +333,7 @@ errout:
332} 333}
333 334
334static int 335static int
335tcindex_change(struct sk_buff *in_skb, 336tcindex_change(struct net *net, struct sk_buff *in_skb,
336 struct tcf_proto *tp, unsigned long base, u32 handle, 337 struct tcf_proto *tp, unsigned long base, u32 handle,
337 struct nlattr **tca, unsigned long *arg) 338 struct nlattr **tca, unsigned long *arg)
338{ 339{
@@ -353,7 +354,8 @@ tcindex_change(struct sk_buff *in_skb,
353 if (err < 0) 354 if (err < 0)
354 return err; 355 return err;
355 356
356 return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE]); 357 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
358 tca[TCA_RATE]);
357} 359}
358 360
359 361
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c7c27bc91b5a..eb07a1e536e6 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -488,15 +488,15 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
488 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) }, 488 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
489}; 489};
490 490
491static int u32_set_parms(struct tcf_proto *tp, unsigned long base, 491static int u32_set_parms(struct net *net, struct tcf_proto *tp,
492 struct tc_u_hnode *ht, 492 unsigned long base, struct tc_u_hnode *ht,
493 struct tc_u_knode *n, struct nlattr **tb, 493 struct tc_u_knode *n, struct nlattr **tb,
494 struct nlattr *est) 494 struct nlattr *est)
495{ 495{
496 int err; 496 int err;
497 struct tcf_exts e; 497 struct tcf_exts e;
498 498
499 err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map); 499 err = tcf_exts_validate(net, tp, tb, est, &e, &u32_ext_map);
500 if (err < 0) 500 if (err < 0)
501 return err; 501 return err;
502 502
@@ -544,7 +544,7 @@ errout:
544 return err; 544 return err;
545} 545}
546 546
547static int u32_change(struct sk_buff *in_skb, 547static int u32_change(struct net *net, struct sk_buff *in_skb,
548 struct tcf_proto *tp, unsigned long base, u32 handle, 548 struct tcf_proto *tp, unsigned long base, u32 handle,
549 struct nlattr **tca, 549 struct nlattr **tca,
550 unsigned long *arg) 550 unsigned long *arg)
@@ -570,7 +570,8 @@ static int u32_change(struct sk_buff *in_skb,
570 if (TC_U32_KEY(n->handle) == 0) 570 if (TC_U32_KEY(n->handle) == 0)
571 return -EINVAL; 571 return -EINVAL;
572 572
573 return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]); 573 return u32_set_parms(net, tp, base, n->ht_up, n, tb,
574 tca[TCA_RATE]);
574 } 575 }
575 576
576 if (tb[TCA_U32_DIVISOR]) { 577 if (tb[TCA_U32_DIVISOR]) {
@@ -656,7 +657,7 @@ static int u32_change(struct sk_buff *in_skb,
656 } 657 }
657#endif 658#endif
658 659
659 err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]); 660 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]);
660 if (err == 0) { 661 if (err == 0) {
661 struct tc_u_knode **ins; 662 struct tc_u_knode **ins;
662 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) 663 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d84f7e734cd7..a181b484812a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -493,7 +493,7 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
493} 493}
494EXPORT_SYMBOL(qdisc_watchdog_init); 494EXPORT_SYMBOL(qdisc_watchdog_init);
495 495
496void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) 496void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
497{ 497{
498 if (test_bit(__QDISC_STATE_DEACTIVATED, 498 if (test_bit(__QDISC_STATE_DEACTIVATED,
499 &qdisc_root_sleeping(wd->qdisc)->state)) 499 &qdisc_root_sleeping(wd->qdisc)->state))
@@ -502,10 +502,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
502 qdisc_throttled(wd->qdisc); 502 qdisc_throttled(wd->qdisc);
503 503
504 hrtimer_start(&wd->timer, 504 hrtimer_start(&wd->timer,
505 ns_to_ktime(PSCHED_TICKS2NS(expires)), 505 ns_to_ktime(expires),
506 HRTIMER_MODE_ABS); 506 HRTIMER_MODE_ABS);
507} 507}
508EXPORT_SYMBOL(qdisc_watchdog_schedule); 508EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
509 509
510void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) 510void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
511{ 511{
@@ -1768,7 +1768,7 @@ static int __net_init psched_net_init(struct net *net)
1768{ 1768{
1769 struct proc_dir_entry *e; 1769 struct proc_dir_entry *e;
1770 1770
1771 e = proc_net_fops_create(net, "psched", 0, &psched_fops); 1771 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1772 if (e == NULL) 1772 if (e == NULL)
1773 return -ENOMEM; 1773 return -ENOMEM;
1774 1774
@@ -1777,7 +1777,7 @@ static int __net_init psched_net_init(struct net *net)
1777 1777
1778static void __net_exit psched_net_exit(struct net *net) 1778static void __net_exit psched_net_exit(struct net *net)
1779{ 1779{
1780 proc_net_remove(net, "psched"); 1780 remove_proc_entry("psched", net->proc_net);
1781} 1781}
1782#else 1782#else
1783static int __net_init psched_net_init(struct net *net) 1783static int __net_init psched_net_init(struct net *net)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5d81a4478514..ffad48109a22 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <net/sch_generic.h>
28#include <net/pkt_sched.h> 29#include <net/pkt_sched.h>
29#include <net/dst.h> 30#include <net/dst.h>
30 31
@@ -896,3 +897,39 @@ void dev_shutdown(struct net_device *dev)
896 897
897 WARN_ON(timer_pending(&dev->watchdog_timer)); 898 WARN_ON(timer_pending(&dev->watchdog_timer));
898} 899}
900
901void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
902{
903 u64 factor;
904 u64 mult;
905 int shift;
906
907 r->rate_bps = rate << 3;
908 r->shift = 0;
909 r->mult = 1;
910 /*
911 * Calibrate mult, shift so that token counting is accurate
912 * for smallest packet size (64 bytes). Token (time in ns) is
913 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will
914 * work as long as the smallest packet transfer time can be
915 * accurately represented in nanosec.
916 */
917 if (r->rate_bps > 0) {
918 /*
919 * Higher shift gives better accuracy. Find the largest
920 * shift such that mult fits in 32 bits.
921 */
922 for (shift = 0; shift < 16; shift++) {
923 r->shift = shift;
924 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
925 mult = div64_u64(factor, r->rate_bps);
926 if (mult > UINT_MAX)
927 break;
928 }
929
930 r->shift = shift - 1;
931 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
932 r->mult = div64_u64(factor, r->rate_bps);
933 }
934}
935EXPORT_SYMBOL(psched_ratecfg_precompute);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index d2922c0ef57a..03c2692ca01e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -38,6 +38,7 @@
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <net/netlink.h> 40#include <net/netlink.h>
41#include <net/sch_generic.h>
41#include <net/pkt_sched.h> 42#include <net/pkt_sched.h>
42 43
43/* HTB algorithm. 44/* HTB algorithm.
@@ -71,12 +72,6 @@ enum htb_cmode {
71 HTB_CAN_SEND /* class can send */ 72 HTB_CAN_SEND /* class can send */
72}; 73};
73 74
74struct htb_rate_cfg {
75 u64 rate_bps;
76 u32 mult;
77 u32 shift;
78};
79
80/* interior & leaf nodes; props specific to leaves are marked L: */ 75/* interior & leaf nodes; props specific to leaves are marked L: */
81struct htb_class { 76struct htb_class {
82 struct Qdisc_class_common common; 77 struct Qdisc_class_common common;
@@ -124,8 +119,8 @@ struct htb_class {
124 int filter_cnt; 119 int filter_cnt;
125 120
126 /* token bucket parameters */ 121 /* token bucket parameters */
127 struct htb_rate_cfg rate; 122 struct psched_ratecfg rate;
128 struct htb_rate_cfg ceil; 123 struct psched_ratecfg ceil;
129 s64 buffer, cbuffer; /* token bucket depth/rate */ 124 s64 buffer, cbuffer; /* token bucket depth/rate */
130 psched_tdiff_t mbuffer; /* max wait time */ 125 psched_tdiff_t mbuffer; /* max wait time */
131 s64 tokens, ctokens; /* current number of tokens */ 126 s64 tokens, ctokens; /* current number of tokens */
@@ -168,45 +163,6 @@ struct htb_sched {
168 struct work_struct work; 163 struct work_struct work;
169}; 164};
170 165
171static u64 l2t_ns(struct htb_rate_cfg *r, unsigned int len)
172{
173 return ((u64)len * r->mult) >> r->shift;
174}
175
176static void htb_precompute_ratedata(struct htb_rate_cfg *r)
177{
178 u64 factor;
179 u64 mult;
180 int shift;
181
182 r->shift = 0;
183 r->mult = 1;
184 /*
185 * Calibrate mult, shift so that token counting is accurate
186 * for smallest packet size (64 bytes). Token (time in ns) is
187 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will
188 * work as long as the smallest packet transfer time can be
189 * accurately represented in nanosec.
190 */
191 if (r->rate_bps > 0) {
192 /*
193 * Higher shift gives better accuracy. Find the largest
194 * shift such that mult fits in 32 bits.
195 */
196 for (shift = 0; shift < 16; shift++) {
197 r->shift = shift;
198 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
199 mult = div64_u64(factor, r->rate_bps);
200 if (mult > UINT_MAX)
201 break;
202 }
203
204 r->shift = shift - 1;
205 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
206 r->mult = div64_u64(factor, r->rate_bps);
207 }
208}
209
210/* find class in global hash table using given handle */ 166/* find class in global hash table using given handle */
211static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 167static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
212{ 168{
@@ -632,7 +588,7 @@ static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
632 588
633 if (toks > cl->buffer) 589 if (toks > cl->buffer)
634 toks = cl->buffer; 590 toks = cl->buffer;
635 toks -= (s64) l2t_ns(&cl->rate, bytes); 591 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
636 if (toks <= -cl->mbuffer) 592 if (toks <= -cl->mbuffer)
637 toks = 1 - cl->mbuffer; 593 toks = 1 - cl->mbuffer;
638 594
@@ -645,7 +601,7 @@ static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
645 601
646 if (toks > cl->cbuffer) 602 if (toks > cl->cbuffer)
647 toks = cl->cbuffer; 603 toks = cl->cbuffer;
648 toks -= (s64) l2t_ns(&cl->ceil, bytes); 604 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
649 if (toks <= -cl->mbuffer) 605 if (toks <= -cl->mbuffer)
650 toks = 1 - cl->mbuffer; 606 toks = 1 - cl->mbuffer;
651 607
@@ -919,7 +875,7 @@ ok:
919 q->now = ktime_to_ns(ktime_get()); 875 q->now = ktime_to_ns(ktime_get());
920 start_at = jiffies; 876 start_at = jiffies;
921 877
922 next_event = q->now + 5 * NSEC_PER_SEC; 878 next_event = q->now + 5LLU * NSEC_PER_SEC;
923 879
924 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 880 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
925 /* common case optimization - skip event handler quickly */ 881 /* common case optimization - skip event handler quickly */
@@ -1134,10 +1090,10 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1134 1090
1135 memset(&opt, 0, sizeof(opt)); 1091 memset(&opt, 0, sizeof(opt));
1136 1092
1137 opt.rate.rate = cl->rate.rate_bps >> 3; 1093 opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
1138 opt.buffer = cl->buffer; 1094 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1139 opt.ceil.rate = cl->ceil.rate_bps >> 3; 1095 opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
1140 opt.cbuffer = cl->cbuffer; 1096 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1141 opt.quantum = cl->quantum; 1097 opt.quantum = cl->quantum;
1142 opt.prio = cl->prio; 1098 opt.prio = cl->prio;
1143 opt.level = cl->level; 1099 opt.level = cl->level;
@@ -1459,8 +1415,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1459 cl->parent = parent; 1415 cl->parent = parent;
1460 1416
1461 /* set class to be in HTB_CAN_SEND state */ 1417 /* set class to be in HTB_CAN_SEND state */
1462 cl->tokens = hopt->buffer; 1418 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1463 cl->ctokens = hopt->cbuffer; 1419 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1464 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ 1420 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
1465 cl->t_c = psched_get_time(); 1421 cl->t_c = psched_get_time();
1466 cl->cmode = HTB_CAN_SEND; 1422 cl->cmode = HTB_CAN_SEND;
@@ -1503,17 +1459,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1503 cl->prio = TC_HTB_NUMPRIO - 1; 1459 cl->prio = TC_HTB_NUMPRIO - 1;
1504 } 1460 }
1505 1461
1506 cl->buffer = hopt->buffer; 1462 psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
1507 cl->cbuffer = hopt->cbuffer; 1463 psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
1508
1509 cl->rate.rate_bps = (u64)hopt->rate.rate << 3;
1510 cl->ceil.rate_bps = (u64)hopt->ceil.rate << 3;
1511
1512 htb_precompute_ratedata(&cl->rate);
1513 htb_precompute_ratedata(&cl->ceil);
1514 1464
1515 cl->buffer = hopt->buffer << PSCHED_SHIFT; 1465 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1516 cl->cbuffer = hopt->buffer << PSCHED_SHIFT; 1466 cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
1517 1467
1518 sch_tree_unlock(sch); 1468 sch_tree_unlock(sch);
1519 1469
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 298c0ddfb57e..3d2acc7a9c80 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
438 if (q->rate) { 438 if (q->rate) {
439 struct sk_buff_head *list = &sch->q; 439 struct sk_buff_head *list = &sch->q;
440 440
441 delay += packet_len_2_sched_time(skb->len, q);
442
443 if (!skb_queue_empty(list)) { 441 if (!skb_queue_empty(list)) {
444 /* 442 /*
445 * Last packet in queue is reference point (now). 443 * Last packet in queue is reference point (now),
446 * First packet in queue is already in flight, 444 * calculate this time bonus and subtract
447 * calculate this time bonus and substract
448 * from delay. 445 * from delay.
449 */ 446 */
450 delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; 447 delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
448 delay = max_t(psched_tdiff_t, 0, delay);
451 now = netem_skb_cb(skb_peek_tail(list))->time_to_send; 449 now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
452 } 450 }
451
452 delay += packet_len_2_sched_time(skb->len, q);
453 } 453 }
454 454
455 cb->time_to_send = now + delay; 455 cb->time_to_send = now + delay;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 4b056c15e90c..c8388f3c3426 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -19,6 +19,7 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <net/netlink.h> 21#include <net/netlink.h>
22#include <net/sch_generic.h>
22#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
23 24
24 25
@@ -100,23 +101,21 @@
100struct tbf_sched_data { 101struct tbf_sched_data {
101/* Parameters */ 102/* Parameters */
102 u32 limit; /* Maximal length of backlog: bytes */ 103 u32 limit; /* Maximal length of backlog: bytes */
103 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ 104 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
104 u32 mtu; 105 s64 mtu;
105 u32 max_size; 106 u32 max_size;
106 struct qdisc_rate_table *R_tab; 107 struct psched_ratecfg rate;
107 struct qdisc_rate_table *P_tab; 108 struct psched_ratecfg peak;
109 bool peak_present;
108 110
109/* Variables */ 111/* Variables */
110 long tokens; /* Current number of B tokens */ 112 s64 tokens; /* Current number of B tokens */
111 long ptokens; /* Current number of P tokens */ 113 s64 ptokens; /* Current number of P tokens */
112 psched_time_t t_c; /* Time check-point */ 114 s64 t_c; /* Time check-point */
113 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */ 115 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
114 struct qdisc_watchdog watchdog; /* Watchdog timer */ 116 struct qdisc_watchdog watchdog; /* Watchdog timer */
115}; 117};
116 118
117#define L2T(q, L) qdisc_l2t((q)->R_tab, L)
118#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
119
120static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) 119static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
121{ 120{
122 struct tbf_sched_data *q = qdisc_priv(sch); 121 struct tbf_sched_data *q = qdisc_priv(sch);
@@ -156,24 +155,24 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
156 skb = q->qdisc->ops->peek(q->qdisc); 155 skb = q->qdisc->ops->peek(q->qdisc);
157 156
158 if (skb) { 157 if (skb) {
159 psched_time_t now; 158 s64 now;
160 long toks; 159 s64 toks;
161 long ptoks = 0; 160 s64 ptoks = 0;
162 unsigned int len = qdisc_pkt_len(skb); 161 unsigned int len = qdisc_pkt_len(skb);
163 162
164 now = psched_get_time(); 163 now = ktime_to_ns(ktime_get());
165 toks = psched_tdiff_bounded(now, q->t_c, q->buffer); 164 toks = min_t(s64, now - q->t_c, q->buffer);
166 165
167 if (q->P_tab) { 166 if (q->peak_present) {
168 ptoks = toks + q->ptokens; 167 ptoks = toks + q->ptokens;
169 if (ptoks > (long)q->mtu) 168 if (ptoks > q->mtu)
170 ptoks = q->mtu; 169 ptoks = q->mtu;
171 ptoks -= L2T_P(q, len); 170 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
172 } 171 }
173 toks += q->tokens; 172 toks += q->tokens;
174 if (toks > (long)q->buffer) 173 if (toks > q->buffer)
175 toks = q->buffer; 174 toks = q->buffer;
176 toks -= L2T(q, len); 175 toks -= (s64) psched_l2t_ns(&q->rate, len);
177 176
178 if ((toks|ptoks) >= 0) { 177 if ((toks|ptoks) >= 0) {
179 skb = qdisc_dequeue_peeked(q->qdisc); 178 skb = qdisc_dequeue_peeked(q->qdisc);
@@ -189,8 +188,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
189 return skb; 188 return skb;
190 } 189 }
191 190
192 qdisc_watchdog_schedule(&q->watchdog, 191 qdisc_watchdog_schedule_ns(&q->watchdog,
193 now + max_t(long, -toks, -ptoks)); 192 now + max_t(long, -toks, -ptoks));
194 193
195 /* Maybe we have a shorter packet in the queue, 194 /* Maybe we have a shorter packet in the queue,
196 which can be sent now. It sounds cool, 195 which can be sent now. It sounds cool,
@@ -214,7 +213,7 @@ static void tbf_reset(struct Qdisc *sch)
214 213
215 qdisc_reset(q->qdisc); 214 qdisc_reset(q->qdisc);
216 sch->q.qlen = 0; 215 sch->q.qlen = 0;
217 q->t_c = psched_get_time(); 216 q->t_c = ktime_to_ns(ktime_get());
218 q->tokens = q->buffer; 217 q->tokens = q->buffer;
219 q->ptokens = q->mtu; 218 q->ptokens = q->mtu;
220 qdisc_watchdog_cancel(&q->watchdog); 219 qdisc_watchdog_cancel(&q->watchdog);
@@ -293,14 +292,19 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
293 q->qdisc = child; 292 q->qdisc = child;
294 } 293 }
295 q->limit = qopt->limit; 294 q->limit = qopt->limit;
296 q->mtu = qopt->mtu; 295 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
297 q->max_size = max_size; 296 q->max_size = max_size;
298 q->buffer = qopt->buffer; 297 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
299 q->tokens = q->buffer; 298 q->tokens = q->buffer;
300 q->ptokens = q->mtu; 299 q->ptokens = q->mtu;
301 300
302 swap(q->R_tab, rtab); 301 psched_ratecfg_precompute(&q->rate, rtab->rate.rate);
303 swap(q->P_tab, ptab); 302 if (ptab) {
303 psched_ratecfg_precompute(&q->peak, ptab->rate.rate);
304 q->peak_present = true;
305 } else {
306 q->peak_present = false;
307 }
304 308
305 sch_tree_unlock(sch); 309 sch_tree_unlock(sch);
306 err = 0; 310 err = 0;
@@ -319,7 +323,7 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
319 if (opt == NULL) 323 if (opt == NULL)
320 return -EINVAL; 324 return -EINVAL;
321 325
322 q->t_c = psched_get_time(); 326 q->t_c = ktime_to_ns(ktime_get());
323 qdisc_watchdog_init(&q->watchdog, sch); 327 qdisc_watchdog_init(&q->watchdog, sch);
324 q->qdisc = &noop_qdisc; 328 q->qdisc = &noop_qdisc;
325 329
@@ -331,12 +335,6 @@ static void tbf_destroy(struct Qdisc *sch)
331 struct tbf_sched_data *q = qdisc_priv(sch); 335 struct tbf_sched_data *q = qdisc_priv(sch);
332 336
333 qdisc_watchdog_cancel(&q->watchdog); 337 qdisc_watchdog_cancel(&q->watchdog);
334
335 if (q->P_tab)
336 qdisc_put_rtab(q->P_tab);
337 if (q->R_tab)
338 qdisc_put_rtab(q->R_tab);
339
340 qdisc_destroy(q->qdisc); 338 qdisc_destroy(q->qdisc);
341} 339}
342 340
@@ -352,13 +350,13 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
352 goto nla_put_failure; 350 goto nla_put_failure;
353 351
354 opt.limit = q->limit; 352 opt.limit = q->limit;
355 opt.rate = q->R_tab->rate; 353 opt.rate.rate = psched_ratecfg_getrate(&q->rate);
356 if (q->P_tab) 354 if (q->peak_present)
357 opt.peakrate = q->P_tab->rate; 355 opt.peakrate.rate = psched_ratecfg_getrate(&q->peak);
358 else 356 else
359 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 357 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
360 opt.mtu = q->mtu; 358 opt.mtu = PSCHED_NS2TICKS(q->mtu);
361 opt.buffer = q->buffer; 359 opt.buffer = PSCHED_NS2TICKS(q->buffer);
362 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) 360 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
363 goto nla_put_failure; 361 goto nla_put_failure;
364 362
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index a9edd2e205f4..cf4852814e0c 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig IP_SCTP 5menuconfig IP_SCTP
6 tristate "The SCTP Protocol (EXPERIMENTAL)" 6 tristate "The SCTP Protocol"
7 depends on INET && EXPERIMENTAL 7 depends on INET
8 depends on IPV6 || IPV6=n 8 depends on IPV6 || IPV6=n
9 select CRYPTO 9 select CRYPTO
10 select CRYPTO_HMAC 10 select CRYPTO_HMAC
@@ -66,12 +66,36 @@ config SCTP_DBG_OBJCNT
66 'cat /proc/net/sctp/sctp_dbg_objcnt' 66 'cat /proc/net/sctp/sctp_dbg_objcnt'
67 67
68 If unsure, say N 68 If unsure, say N
69choice
70 prompt "Default SCTP cookie HMAC encoding"
71 default SCTP_DEFAULT_COOKIE_HMAC_MD5
72 help
73 This option sets the default sctp cookie hmac algorithm
74 when in doubt select 'md5'
75
76config SCTP_DEFAULT_COOKIE_HMAC_MD5
77 bool "Enable optional MD5 hmac cookie generation"
78 help
79 Enable optional MD5 hmac based SCTP cookie generation
80 select SCTP_COOKIE_HMAC_MD5
81
82config SCTP_DEFAULT_COOKIE_HMAC_SHA1
83 bool "Enable optional SHA1 hmac cookie generation"
84 help
85 Enable optional SHA1 hmac based SCTP cookie generation
86 select SCTP_COOKIE_HMAC_SHA1
87
88config SCTP_DEFAULT_COOKIE_HMAC_NONE
89 bool "Use no hmac alg in SCTP cookie generation"
90 help
91 Use no hmac algorithm in SCTP cookie generation
92
93endchoice
69 94
70config SCTP_COOKIE_HMAC_MD5 95config SCTP_COOKIE_HMAC_MD5
71 bool "Enable optional MD5 hmac cookie generation" 96 bool "Enable optional MD5 hmac cookie generation"
72 help 97 help
73 Enable optional MD5 hmac based SCTP cookie generation 98 Enable optional MD5 hmac based SCTP cookie generation
74 default y
75 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5 99 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5
76 select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5 100 select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5
77 101
@@ -79,7 +103,6 @@ config SCTP_COOKIE_HMAC_SHA1
79 bool "Enable optional SHA1 hmac cookie generation" 103 bool "Enable optional SHA1 hmac cookie generation"
80 help 104 help
81 Enable optional SHA1 hmac based SCTP cookie generation 105 Enable optional SHA1 hmac based SCTP cookie generation
82 default y
83 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1 106 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
84 select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1 107 select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
85 108
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index b45ed1f96921..2f95f5a5145d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -434,8 +434,7 @@ void sctp_association_free(struct sctp_association *asoc)
434 * on our state. 434 * on our state.
435 */ 435 */
436 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 436 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
437 if (timer_pending(&asoc->timers[i]) && 437 if (del_timer(&asoc->timers[i]))
438 del_timer(&asoc->timers[i]))
439 sctp_association_put(asoc); 438 sctp_association_put(asoc);
440 } 439 }
441 440
@@ -1497,7 +1496,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1497 1496
1498 /* Stop the SACK timer. */ 1497 /* Stop the SACK timer. */
1499 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 1498 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1500 if (timer_pending(timer) && del_timer(timer)) 1499 if (del_timer(timer))
1501 sctp_association_put(asoc); 1500 sctp_association_put(asoc);
1502 } 1501 }
1503} 1502}
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 159b9bc5d633..ba1dfc3f8def 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
71 return; 71 return;
72 72
73 if (atomic_dec_and_test(&key->refcnt)) { 73 if (atomic_dec_and_test(&key->refcnt)) {
74 kfree(key); 74 kzfree(key);
75 SCTP_DBG_OBJCNT_DEC(keys); 75 SCTP_DBG_OBJCNT_DEC(keys);
76 } 76 }
77} 77}
@@ -200,27 +200,28 @@ static struct sctp_auth_bytes *sctp_auth_make_key_vector(
200 struct sctp_auth_bytes *new; 200 struct sctp_auth_bytes *new;
201 __u32 len; 201 __u32 len;
202 __u32 offset = 0; 202 __u32 offset = 0;
203 __u16 random_len, hmacs_len, chunks_len = 0;
203 204
204 len = ntohs(random->param_hdr.length) + ntohs(hmacs->param_hdr.length); 205 random_len = ntohs(random->param_hdr.length);
205 if (chunks) 206 hmacs_len = ntohs(hmacs->param_hdr.length);
206 len += ntohs(chunks->param_hdr.length); 207 if (chunks)
208 chunks_len = ntohs(chunks->param_hdr.length);
207 209
208 new = kmalloc(sizeof(struct sctp_auth_bytes) + len, gfp); 210 len = random_len + hmacs_len + chunks_len;
211
212 new = sctp_auth_create_key(len, gfp);
209 if (!new) 213 if (!new)
210 return NULL; 214 return NULL;
211 215
212 new->len = len; 216 memcpy(new->data, random, random_len);
213 217 offset += random_len;
214 memcpy(new->data, random, ntohs(random->param_hdr.length));
215 offset += ntohs(random->param_hdr.length);
216 218
217 if (chunks) { 219 if (chunks) {
218 memcpy(new->data + offset, chunks, 220 memcpy(new->data + offset, chunks, chunks_len);
219 ntohs(chunks->param_hdr.length)); 221 offset += chunks_len;
220 offset += ntohs(chunks->param_hdr.length);
221 } 222 }
222 223
223 memcpy(new->data + offset, hmacs, ntohs(hmacs->param_hdr.length)); 224 memcpy(new->data + offset, hmacs, hmacs_len);
224 225
225 return new; 226 return new;
226} 227}
@@ -350,8 +351,8 @@ static struct sctp_auth_bytes *sctp_auth_asoc_create_secret(
350 secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector, 351 secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector,
351 gfp); 352 gfp);
352out: 353out:
353 kfree(local_key_vector); 354 sctp_auth_key_put(local_key_vector);
354 kfree(peer_key_vector); 355 sctp_auth_key_put(peer_key_vector);
355 356
356 return secret; 357 return secret;
357} 358}
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 17a001bac2cc..73aad3d16a45 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -151,9 +151,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
151 ep->rcvbuf_policy = net->sctp.rcvbuf_policy; 151 ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
152 152
153 /* Initialize the secret key used with cookie. */ 153 /* Initialize the secret key used with cookie. */
154 get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); 154 get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
155 ep->last_key = ep->current_key = 0;
156 ep->key_changed_at = jiffies;
157 155
158 /* SCTP-AUTH extensions*/ 156 /* SCTP-AUTH extensions*/
159 INIT_LIST_HEAD(&ep->endpoint_shared_keys); 157 INIT_LIST_HEAD(&ep->endpoint_shared_keys);
@@ -271,6 +269,8 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
271 sctp_inq_free(&ep->base.inqueue); 269 sctp_inq_free(&ep->base.inqueue);
272 sctp_bind_addr_free(&ep->base.bind_addr); 270 sctp_bind_addr_free(&ep->base.bind_addr);
273 271
272 memset(ep->secret_key, 0, sizeof(ep->secret_key));
273
274 /* Remove and free the port */ 274 /* Remove and free the port */
275 if (sctp_sk(ep->base.sk)->bind_hash) 275 if (sctp_sk(ep->base.sk)->bind_hash)
276 sctp_put_port(ep->base.sk); 276 sctp_put_port(ep->base.sk);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 8bd3c279427e..965bbbbe48d4 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -468,8 +468,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
468 } else { 468 } else {
469 struct net *net = sock_net(sk); 469 struct net *net = sock_net(sk);
470 470
471 if (timer_pending(&t->proto_unreach_timer) && 471 if (del_timer(&t->proto_unreach_timer))
472 del_timer(&t->proto_unreach_timer))
473 sctp_association_put(asoc); 472 sctp_association_put(asoc);
474 473
475 sctp_do_sm(net, SCTP_EVENT_T_OTHER, 474 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f3f0f4dc31dd..391a245d5203 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
326 */ 326 */
327 rcu_read_lock(); 327 rcu_read_lock();
328 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 328 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
329 if (!laddr->valid && laddr->state != SCTP_ADDR_SRC) 329 if (!laddr->valid)
330 continue; 330 continue;
331 if ((laddr->a.sa.sa_family == AF_INET6) && 331 if ((laddr->state == SCTP_ADDR_SRC) &&
332 (laddr->a.sa.sa_family == AF_INET6) &&
332 (scope <= sctp_scope(&laddr->a))) { 333 (scope <= sctp_scope(&laddr->a))) {
333 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 334 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
334 if (!baddr || (matchlen < bmatchlen)) { 335 if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 379c81dee9d1..01dca753db16 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -224,7 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
224 224
225/* Free the outqueue structure and any related pending chunks. 225/* Free the outqueue structure and any related pending chunks.
226 */ 226 */
227void sctp_outq_teardown(struct sctp_outq *q) 227static void __sctp_outq_teardown(struct sctp_outq *q)
228{ 228{
229 struct sctp_transport *transport; 229 struct sctp_transport *transport;
230 struct list_head *lchunk, *temp; 230 struct list_head *lchunk, *temp;
@@ -277,8 +277,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
277 sctp_chunk_free(chunk); 277 sctp_chunk_free(chunk);
278 } 278 }
279 279
280 q->error = 0;
281
282 /* Throw away any leftover control chunks. */ 280 /* Throw away any leftover control chunks. */
283 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 281 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
284 list_del_init(&chunk->list); 282 list_del_init(&chunk->list);
@@ -286,11 +284,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
286 } 284 }
287} 285}
288 286
287void sctp_outq_teardown(struct sctp_outq *q)
288{
289 __sctp_outq_teardown(q);
290 sctp_outq_init(q->asoc, q);
291}
292
289/* Free the outqueue structure and any related pending chunks. */ 293/* Free the outqueue structure and any related pending chunks. */
290void sctp_outq_free(struct sctp_outq *q) 294void sctp_outq_free(struct sctp_outq *q)
291{ 295{
292 /* Throw away leftover chunks. */ 296 /* Throw away leftover chunks. */
293 sctp_outq_teardown(q); 297 __sctp_outq_teardown(q);
294 298
295 /* If we were kmalloc()'d, free the memory. */ 299 /* If we were kmalloc()'d, free the memory. */
296 if (q->malloced) 300 if (q->malloced)
@@ -1696,10 +1700,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1696 * address. 1700 * address.
1697 */ 1701 */
1698 if (!transport->flight_size) { 1702 if (!transport->flight_size) {
1699 if (timer_pending(&transport->T3_rtx_timer) && 1703 if (del_timer(&transport->T3_rtx_timer))
1700 del_timer(&transport->T3_rtx_timer)) {
1701 sctp_transport_put(transport); 1704 sctp_transport_put(transport);
1702 }
1703 } else if (restart_timer) { 1705 } else if (restart_timer) {
1704 if (!mod_timer(&transport->T3_rtx_timer, 1706 if (!mod_timer(&transport->T3_rtx_timer,
1705 jiffies + transport->rto)) 1707 jiffies + transport->rto))
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index bc6cd75cc1dc..ad0dba870341 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -122,11 +122,12 @@ static const struct file_operations sctpprobe_fops = {
122 .llseek = noop_llseek, 122 .llseek = noop_llseek,
123}; 123};
124 124
125sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep, 125static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
126 const struct sctp_association *asoc, 126 const struct sctp_endpoint *ep,
127 const sctp_subtype_t type, 127 const struct sctp_association *asoc,
128 void *arg, 128 const sctp_subtype_t type,
129 sctp_cmd_seq_t *commands) 129 void *arg,
130 sctp_cmd_seq_t *commands)
130{ 131{
131 struct sctp_transport *sp; 132 struct sctp_transport *sp;
132 static __u32 lcwnd = 0; 133 static __u32 lcwnd = 0;
@@ -182,13 +183,20 @@ static __init int sctpprobe_init(void)
182{ 183{
183 int ret = -ENOMEM; 184 int ret = -ENOMEM;
184 185
186 /* Warning: if the function signature of sctp_sf_eat_sack_6_2,
187 * has been changed, you also have to change the signature of
188 * jsctp_sf_eat_sack, otherwise you end up right here!
189 */
190 BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2,
191 jsctp_sf_eat_sack) == 0);
192
185 init_waitqueue_head(&sctpw.wait); 193 init_waitqueue_head(&sctpw.wait);
186 spin_lock_init(&sctpw.lock); 194 spin_lock_init(&sctpw.lock);
187 if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL)) 195 if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
188 return ret; 196 return ret;
189 197
190 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, 198 if (!proc_create(procname, S_IRUSR, init_net.proc_net,
191 &sctpprobe_fops)) 199 &sctpprobe_fops))
192 goto free_kfifo; 200 goto free_kfifo;
193 201
194 ret = register_jprobe(&sctp_recv_probe); 202 ret = register_jprobe(&sctp_recv_probe);
@@ -200,7 +208,7 @@ static __init int sctpprobe_init(void)
200 return 0; 208 return 0;
201 209
202remove_proc: 210remove_proc:
203 proc_net_remove(&init_net, procname); 211 remove_proc_entry(procname, init_net.proc_net);
204free_kfifo: 212free_kfifo:
205 kfifo_free(&sctpw.fifo); 213 kfifo_free(&sctpw.fifo);
206 return ret; 214 return ret;
@@ -209,7 +217,7 @@ free_kfifo:
209static __exit void sctpprobe_exit(void) 217static __exit void sctpprobe_exit(void)
210{ 218{
211 kfifo_free(&sctpw.fifo); 219 kfifo_free(&sctpw.fifo);
212 proc_net_remove(&init_net, procname); 220 remove_proc_entry(procname, init_net.proc_net);
213 unregister_jprobe(&sctp_recv_probe); 221 unregister_jprobe(&sctp_recv_probe);
214} 222}
215 223
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 2c7785bacf74..1c2e46cb9191 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -595,7 +595,7 @@ static void sctp_v4_ecn_capable(struct sock *sk)
595 INET_ECN_xmit(sk); 595 INET_ECN_xmit(sk);
596} 596}
597 597
598void sctp_addr_wq_timeout_handler(unsigned long arg) 598static void sctp_addr_wq_timeout_handler(unsigned long arg)
599{ 599{
600 struct net *net = (struct net *)arg; 600 struct net *net = (struct net *)arg;
601 struct sctp_sockaddr_entry *addrw, *temp; 601 struct sctp_sockaddr_entry *addrw, *temp;
@@ -1191,9 +1191,9 @@ static int __net_init sctp_net_init(struct net *net)
1191 net->sctp.cookie_preserve_enable = 1; 1191 net->sctp.cookie_preserve_enable = 1;
1192 1192
1193 /* Default sctp sockets to use md5 as their hmac alg */ 1193 /* Default sctp sockets to use md5 as their hmac alg */
1194#if defined (CONFIG_CRYPTO_MD5) 1194#if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
1195 net->sctp.sctp_hmac_alg = "md5"; 1195 net->sctp.sctp_hmac_alg = "md5";
1196#elif defined (CONFIG_CRYPTO_SHA1) 1196#elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
1197 net->sctp.sctp_hmac_alg = "sha1"; 1197 net->sctp.sctp_hmac_alg = "sha1";
1198#else 1198#else
1199 net->sctp.sctp_hmac_alg = NULL; 1199 net->sctp.sctp_hmac_alg = NULL;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e1c5fc2be6b8..cf579e71cff0 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1201,7 +1201,7 @@ nodata:
1201 * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) 1201 * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
1202 * This is a helper function to allocate an error chunk for 1202 * This is a helper function to allocate an error chunk for
1203 * for those invalid parameter codes in which we may not want 1203 * for those invalid parameter codes in which we may not want
1204 * to report all the errors, if the incomming chunk is large 1204 * to report all the errors, if the incoming chunk is large
1205 */ 1205 */
1206static inline struct sctp_chunk *sctp_make_op_error_fixed( 1206static inline struct sctp_chunk *sctp_make_op_error_fixed(
1207 const struct sctp_association *asoc, 1207 const struct sctp_association *asoc,
@@ -1589,8 +1589,6 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1589 struct sctp_signed_cookie *cookie; 1589 struct sctp_signed_cookie *cookie;
1590 struct scatterlist sg; 1590 struct scatterlist sg;
1591 int headersize, bodysize; 1591 int headersize, bodysize;
1592 unsigned int keylen;
1593 char *key;
1594 1592
1595 /* Header size is static data prior to the actual cookie, including 1593 /* Header size is static data prior to the actual cookie, including
1596 * any padding. 1594 * any padding.
@@ -1650,12 +1648,11 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1650 1648
1651 /* Sign the message. */ 1649 /* Sign the message. */
1652 sg_init_one(&sg, &cookie->c, bodysize); 1650 sg_init_one(&sg, &cookie->c, bodysize);
1653 keylen = SCTP_SECRET_SIZE;
1654 key = (char *)ep->secret_key[ep->current_key];
1655 desc.tfm = sctp_sk(ep->base.sk)->hmac; 1651 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1656 desc.flags = 0; 1652 desc.flags = 0;
1657 1653
1658 if (crypto_hash_setkey(desc.tfm, key, keylen) || 1654 if (crypto_hash_setkey(desc.tfm, ep->secret_key,
1655 sizeof(ep->secret_key)) ||
1659 crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) 1656 crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
1660 goto free_cookie; 1657 goto free_cookie;
1661 } 1658 }
@@ -1682,8 +1679,7 @@ struct sctp_association *sctp_unpack_cookie(
1682 int headersize, bodysize, fixed_size; 1679 int headersize, bodysize, fixed_size;
1683 __u8 *digest = ep->digest; 1680 __u8 *digest = ep->digest;
1684 struct scatterlist sg; 1681 struct scatterlist sg;
1685 unsigned int keylen, len; 1682 unsigned int len;
1686 char *key;
1687 sctp_scope_t scope; 1683 sctp_scope_t scope;
1688 struct sk_buff *skb = chunk->skb; 1684 struct sk_buff *skb = chunk->skb;
1689 struct timeval tv; 1685 struct timeval tv;
@@ -1718,34 +1714,21 @@ struct sctp_association *sctp_unpack_cookie(
1718 goto no_hmac; 1714 goto no_hmac;
1719 1715
1720 /* Check the signature. */ 1716 /* Check the signature. */
1721 keylen = SCTP_SECRET_SIZE;
1722 sg_init_one(&sg, bear_cookie, bodysize); 1717 sg_init_one(&sg, bear_cookie, bodysize);
1723 key = (char *)ep->secret_key[ep->current_key];
1724 desc.tfm = sctp_sk(ep->base.sk)->hmac; 1718 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1725 desc.flags = 0; 1719 desc.flags = 0;
1726 1720
1727 memset(digest, 0x00, SCTP_SIGNATURE_SIZE); 1721 memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
1728 if (crypto_hash_setkey(desc.tfm, key, keylen) || 1722 if (crypto_hash_setkey(desc.tfm, ep->secret_key,
1723 sizeof(ep->secret_key)) ||
1729 crypto_hash_digest(&desc, &sg, bodysize, digest)) { 1724 crypto_hash_digest(&desc, &sg, bodysize, digest)) {
1730 *error = -SCTP_IERROR_NOMEM; 1725 *error = -SCTP_IERROR_NOMEM;
1731 goto fail; 1726 goto fail;
1732 } 1727 }
1733 1728
1734 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { 1729 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
1735 /* Try the previous key. */ 1730 *error = -SCTP_IERROR_BAD_SIG;
1736 key = (char *)ep->secret_key[ep->last_key]; 1731 goto fail;
1737 memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
1738 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1739 crypto_hash_digest(&desc, &sg, bodysize, digest)) {
1740 *error = -SCTP_IERROR_NOMEM;
1741 goto fail;
1742 }
1743
1744 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
1745 /* Yikes! Still bad signature! */
1746 *error = -SCTP_IERROR_BAD_SIG;
1747 goto fail;
1748 }
1749 } 1732 }
1750 1733
1751no_hmac: 1734no_hmac:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c9577754a708..8aab894aeabe 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -674,10 +674,8 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
674 674
675 list_for_each_entry(t, &asoc->peer.transport_addr_list, 675 list_for_each_entry(t, &asoc->peer.transport_addr_list,
676 transports) { 676 transports) {
677 if (timer_pending(&t->T3_rtx_timer) && 677 if (del_timer(&t->T3_rtx_timer))
678 del_timer(&t->T3_rtx_timer)) {
679 sctp_transport_put(t); 678 sctp_transport_put(t);
680 }
681 } 679 }
682} 680}
683 681
@@ -1517,7 +1515,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1517 1515
1518 case SCTP_CMD_TIMER_STOP: 1516 case SCTP_CMD_TIMER_STOP:
1519 timer = &asoc->timers[cmd->obj.to]; 1517 timer = &asoc->timers[cmd->obj.to];
1520 if (timer_pending(timer) && del_timer(timer)) 1518 if (del_timer(timer))
1521 sctp_association_put(asoc); 1519 sctp_association_put(asoc);
1522 break; 1520 break;
1523 1521
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 618ec7e216ca..5131fcfedb03 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1779,8 +1779,10 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
1779 1779
1780 /* Update the content of current association. */ 1780 /* Update the content of current association. */
1781 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1781 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1782 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1783 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1782 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
1783 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1784 SCTP_STATE(SCTP_STATE_ESTABLISHED));
1785 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1784 return SCTP_DISPOSITION_CONSUME; 1786 return SCTP_DISPOSITION_CONSUME;
1785 1787
1786nomem_ev: 1788nomem_ev:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e65758cb038..cedd9bf67b8c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3390 3390
3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
3392out: 3392out:
3393 kfree(authkey); 3393 kzfree(authkey);
3394 return ret; 3394 return ret;
3395} 3395}
3396 3396
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 043889ac86c0..bf3c6e8fc401 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -366,7 +366,11 @@ int sctp_sysctl_net_register(struct net *net)
366 366
367void sctp_sysctl_net_unregister(struct net *net) 367void sctp_sysctl_net_unregister(struct net *net)
368{ 368{
369 struct ctl_table *table;
370
371 table = net->sctp.sysctl_header->ctl_table_arg;
369 unregister_net_sysctl_table(net->sctp.sysctl_header); 372 unregister_net_sysctl_table(net->sctp.sysctl_header);
373 kfree(table);
370} 374}
371 375
372static struct ctl_table_header * sctp_sysctl_header; 376static struct ctl_table_header * sctp_sysctl_header;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 4e45bb68aef0..fafd2a461ba0 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -151,13 +151,11 @@ void sctp_transport_free(struct sctp_transport *transport)
151 * structure hang around in memory since we know 151 * structure hang around in memory since we know
152 * the tranport is going away. 152 * the tranport is going away.
153 */ 153 */
154 if (timer_pending(&transport->T3_rtx_timer) && 154 if (del_timer(&transport->T3_rtx_timer))
155 del_timer(&transport->T3_rtx_timer))
156 sctp_transport_put(transport); 155 sctp_transport_put(transport);
157 156
158 /* Delete the ICMP proto unreachable timer if it's active. */ 157 /* Delete the ICMP proto unreachable timer if it's active. */
159 if (timer_pending(&transport->proto_unreach_timer) && 158 if (del_timer(&transport->proto_unreach_timer))
160 del_timer(&transport->proto_unreach_timer))
161 sctp_association_put(transport->asoc); 159 sctp_association_put(transport->asoc);
162 160
163 sctp_transport_put(transport); 161 sctp_transport_put(transport);
@@ -168,10 +166,6 @@ static void sctp_transport_destroy_rcu(struct rcu_head *head)
168 struct sctp_transport *transport; 166 struct sctp_transport *transport;
169 167
170 transport = container_of(head, struct sctp_transport, rcu); 168 transport = container_of(head, struct sctp_transport, rcu);
171 if (transport->asoc)
172 sctp_association_put(transport->asoc);
173
174 sctp_packet_free(&transport->packet);
175 169
176 dst_release(transport->dst); 170 dst_release(transport->dst);
177 kfree(transport); 171 kfree(transport);
@@ -186,6 +180,11 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
186 SCTP_ASSERT(transport->dead, "Transport is not dead", return); 180 SCTP_ASSERT(transport->dead, "Transport is not dead", return);
187 181
188 call_rcu(&transport->rcu, sctp_transport_destroy_rcu); 182 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
183
184 sctp_packet_free(&transport->packet);
185
186 if (transport->asoc)
187 sctp_association_put(transport->asoc);
189} 188}
190 189
191/* Start T3_rtx timer if it is not already running and update the heartbeat 190/* Start T3_rtx timer if it is not already running and update the heartbeat
@@ -654,10 +653,9 @@ void sctp_transport_reset(struct sctp_transport *t)
654void sctp_transport_immediate_rtx(struct sctp_transport *t) 653void sctp_transport_immediate_rtx(struct sctp_transport *t)
655{ 654{
656 /* Stop pending T3_rtx_timer */ 655 /* Stop pending T3_rtx_timer */
657 if (timer_pending(&t->T3_rtx_timer)) { 656 if (del_timer(&t->T3_rtx_timer))
658 (void)del_timer(&t->T3_rtx_timer);
659 sctp_transport_put(t); 657 sctp_transport_put(t);
660 } 658
661 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); 659 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
662 if (!timer_pending(&t->T3_rtx_timer)) { 660 if (!timer_pending(&t->T3_rtx_timer)) {
663 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) 661 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
diff --git a/net/socket.c b/net/socket.c
index 2ca51c719ef9..ee0d029e5130 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -69,7 +69,6 @@
69#include <linux/proc_fs.h> 69#include <linux/proc_fs.h>
70#include <linux/seq_file.h> 70#include <linux/seq_file.h>
71#include <linux/mutex.h> 71#include <linux/mutex.h>
72#include <linux/wanrouter.h>
73#include <linux/if_bridge.h> 72#include <linux/if_bridge.h>
74#include <linux/if_frad.h> 73#include <linux/if_frad.h>
75#include <linux/if_vlan.h> 74#include <linux/if_vlan.h>
@@ -2838,7 +2837,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2838 } 2837 }
2839 2838
2840 ifr = compat_alloc_user_space(buf_size); 2839 ifr = compat_alloc_user_space(buf_size);
2841 rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); 2840 rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
2842 2841
2843 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) 2842 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
2844 return -EFAULT; 2843 return -EFAULT;
@@ -2862,12 +2861,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2862 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 2861 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
2863 2862
2864 if (copy_in_user(rxnfc, compat_rxnfc, 2863 if (copy_in_user(rxnfc, compat_rxnfc,
2865 (void *)(&rxnfc->fs.m_ext + 1) - 2864 (void __user *)(&rxnfc->fs.m_ext + 1) -
2866 (void *)rxnfc) || 2865 (void __user *)rxnfc) ||
2867 copy_in_user(&rxnfc->fs.ring_cookie, 2866 copy_in_user(&rxnfc->fs.ring_cookie,
2868 &compat_rxnfc->fs.ring_cookie, 2867 &compat_rxnfc->fs.ring_cookie,
2869 (void *)(&rxnfc->fs.location + 1) - 2868 (void __user *)(&rxnfc->fs.location + 1) -
2870 (void *)&rxnfc->fs.ring_cookie) || 2869 (void __user *)&rxnfc->fs.ring_cookie) ||
2871 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, 2870 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
2872 sizeof(rxnfc->rule_cnt))) 2871 sizeof(rxnfc->rule_cnt)))
2873 return -EFAULT; 2872 return -EFAULT;
@@ -2879,12 +2878,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2879 2878
2880 if (convert_out) { 2879 if (convert_out) {
2881 if (copy_in_user(compat_rxnfc, rxnfc, 2880 if (copy_in_user(compat_rxnfc, rxnfc,
2882 (const void *)(&rxnfc->fs.m_ext + 1) - 2881 (const void __user *)(&rxnfc->fs.m_ext + 1) -
2883 (const void *)rxnfc) || 2882 (const void __user *)rxnfc) ||
2884 copy_in_user(&compat_rxnfc->fs.ring_cookie, 2883 copy_in_user(&compat_rxnfc->fs.ring_cookie,
2885 &rxnfc->fs.ring_cookie, 2884 &rxnfc->fs.ring_cookie,
2886 (const void *)(&rxnfc->fs.location + 1) - 2885 (const void __user *)(&rxnfc->fs.location + 1) -
2887 (const void *)&rxnfc->fs.ring_cookie) || 2886 (const void __user *)&rxnfc->fs.ring_cookie) ||
2888 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, 2887 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
2889 sizeof(rxnfc->rule_cnt))) 2888 sizeof(rxnfc->rule_cnt)))
2890 return -EFAULT; 2889 return -EFAULT;
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 03d03e37a7d5..516fe2caac2c 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -10,7 +10,7 @@ config SUNRPC_BACKCHANNEL
10 10
11config SUNRPC_XPRT_RDMA 11config SUNRPC_XPRT_RDMA
12 tristate 12 tristate
13 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL 13 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS
14 default SUNRPC && INFINIBAND 14 default SUNRPC && INFINIBAND
15 help 15 help
16 This option allows the NFS client and server to support 16 This option allows the NFS client and server to support
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 909dc0c31aab..911ef008b701 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -192,17 +192,23 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
192 const void *q; 192 const void *q;
193 unsigned int seclen; 193 unsigned int seclen;
194 unsigned int timeout; 194 unsigned int timeout;
195 unsigned long now = jiffies;
195 u32 window_size; 196 u32 window_size;
196 int ret; 197 int ret;
197 198
198 /* First unsigned int gives the lifetime (in seconds) of the cred */ 199 /* First unsigned int gives the remaining lifetime in seconds of the
200 * credential - e.g. the remaining TGT lifetime for Kerberos or
201 * the -t value passed to GSSD.
202 */
199 p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); 203 p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
200 if (IS_ERR(p)) 204 if (IS_ERR(p))
201 goto err; 205 goto err;
202 if (timeout == 0) 206 if (timeout == 0)
203 timeout = GSSD_MIN_TIMEOUT; 207 timeout = GSSD_MIN_TIMEOUT;
204 ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4; 208 ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
205 /* Sequence number window. Determines the maximum number of simultaneous requests */ 209 /* Sequence number window. Determines the maximum number of
210 * simultaneous requests
211 */
206 p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); 212 p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
207 if (IS_ERR(p)) 213 if (IS_ERR(p))
208 goto err; 214 goto err;
@@ -237,9 +243,11 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
237 p = ERR_PTR(ret); 243 p = ERR_PTR(ret);
238 goto err; 244 goto err;
239 } 245 }
246 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n",
247 __func__, ctx->gc_expiry, now, timeout);
240 return q; 248 return q;
241err: 249err:
242 dprintk("RPC: %s returning %ld\n", __func__, -PTR_ERR(p)); 250 dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p));
243 return p; 251 return p;
244} 252}
245 253
@@ -1145,7 +1153,7 @@ gss_marshal(struct rpc_task *task, __be32 *p)
1145 1153
1146 /* We compute the checksum for the verifier over the xdr-encoded bytes 1154 /* We compute the checksum for the verifier over the xdr-encoded bytes
1147 * starting with the xid and ending at the end of the credential: */ 1155 * starting with the xid and ending at the end of the credential: */
1148 iov.iov_base = xprt_skip_transport_header(task->tk_xprt, 1156 iov.iov_base = xprt_skip_transport_header(req->rq_xprt,
1149 req->rq_snd_buf.head[0].iov_base); 1157 req->rq_snd_buf.head[0].iov_base);
1150 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; 1158 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1151 xdr_buf_from_iov(&iov, &verf_buf); 1159 xdr_buf_from_iov(&iov, &verf_buf);
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index b174fcd9ff4c..f0f4eee63a35 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -140,7 +140,7 @@ gss_mech_get(struct gss_api_mech *gm)
140 140
141EXPORT_SYMBOL_GPL(gss_mech_get); 141EXPORT_SYMBOL_GPL(gss_mech_get);
142 142
143struct gss_api_mech * 143static struct gss_api_mech *
144_gss_mech_get_by_name(const char *name) 144_gss_mech_get_by_name(const char *name)
145{ 145{
146 struct gss_api_mech *pos, *gm = NULL; 146 struct gss_api_mech *pos, *gm = NULL;
@@ -205,7 +205,7 @@ mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor)
205 return 0; 205 return 0;
206} 206}
207 207
208struct gss_api_mech *_gss_mech_get_by_pseudoflavor(u32 pseudoflavor) 208static struct gss_api_mech *_gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
209{ 209{
210 struct gss_api_mech *gm = NULL, *pos; 210 struct gss_api_mech *gm = NULL, *pos;
211 211
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index a9c0bbccad6b..890a29912d5a 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -59,7 +59,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
59 struct xdr_buf *xbufp; 59 struct xdr_buf *xbufp;
60 60
61 dprintk("RPC: free allocations for req= %p\n", req); 61 dprintk("RPC: free allocations for req= %p\n", req);
62 BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 62 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
63 xbufp = &req->rq_private_buf; 63 xbufp = &req->rq_private_buf;
64 free_page((unsigned long)xbufp->head[0].iov_base); 64 free_page((unsigned long)xbufp->head[0].iov_base);
65 xbufp = &req->rq_snd_buf; 65 xbufp = &req->rq_snd_buf;
@@ -191,7 +191,9 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
191 191
192 dprintk("RPC: destroy backchannel transport\n"); 192 dprintk("RPC: destroy backchannel transport\n");
193 193
194 BUG_ON(max_reqs == 0); 194 if (max_reqs == 0)
195 goto out;
196
195 spin_lock_bh(&xprt->bc_pa_lock); 197 spin_lock_bh(&xprt->bc_pa_lock);
196 xprt_dec_alloc_count(xprt, max_reqs); 198 xprt_dec_alloc_count(xprt, max_reqs);
197 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 199 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
@@ -202,6 +204,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
202 } 204 }
203 spin_unlock_bh(&xprt->bc_pa_lock); 205 spin_unlock_bh(&xprt->bc_pa_lock);
204 206
207out:
205 dprintk("RPC: backchannel list empty= %s\n", 208 dprintk("RPC: backchannel list empty= %s\n",
206 list_empty(&xprt->bc_pa_list) ? "true" : "false"); 209 list_empty(&xprt->bc_pa_list) ? "true" : "false");
207} 210}
@@ -255,7 +258,7 @@ void xprt_free_bc_request(struct rpc_rqst *req)
255 dprintk("RPC: free backchannel req=%p\n", req); 258 dprintk("RPC: free backchannel req=%p\n", req);
256 259
257 smp_mb__before_clear_bit(); 260 smp_mb__before_clear_bit();
258 BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 261 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
259 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 262 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
260 smp_mb__after_clear_bit(); 263 smp_mb__after_clear_bit();
261 264
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 0b2eb388cbda..15c7a8a1c24f 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -53,7 +53,7 @@ int bc_send(struct rpc_rqst *req)
53 if (IS_ERR(task)) 53 if (IS_ERR(task))
54 ret = PTR_ERR(task); 54 ret = PTR_ERR(task);
55 else { 55 else {
56 BUG_ON(atomic_read(&task->tk_count) != 1); 56 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
57 ret = task->tk_status; 57 ret = task->tk_status;
58 rpc_put_task(task); 58 rpc_put_task(task);
59 } 59 }
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index fc2f7aa4dca7..9afa4393c217 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -775,11 +775,11 @@ static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
775 if (rp->q.list.next == &cd->queue) { 775 if (rp->q.list.next == &cd->queue) {
776 spin_unlock(&queue_lock); 776 spin_unlock(&queue_lock);
777 mutex_unlock(&inode->i_mutex); 777 mutex_unlock(&inode->i_mutex);
778 BUG_ON(rp->offset); 778 WARN_ON_ONCE(rp->offset);
779 return 0; 779 return 0;
780 } 780 }
781 rq = container_of(rp->q.list.next, struct cache_request, q.list); 781 rq = container_of(rp->q.list.next, struct cache_request, q.list);
782 BUG_ON(rq->q.reader); 782 WARN_ON_ONCE(rq->q.reader);
783 if (rp->offset == 0) 783 if (rp->offset == 0)
784 rq->readers++; 784 rq->readers++;
785 spin_unlock(&queue_lock); 785 spin_unlock(&queue_lock);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cdc7564b4512..a9f7906c1a6a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -132,8 +132,10 @@ static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
132 int error; 132 int error;
133 133
134 dir = rpc_d_lookup_sb(sb, dir_name); 134 dir = rpc_d_lookup_sb(sb, dir_name);
135 if (dir == NULL) 135 if (dir == NULL) {
136 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
136 return dir; 137 return dir;
138 }
137 for (;;) { 139 for (;;) {
138 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 140 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
139 name[sizeof(name) - 1] = '\0'; 141 name[sizeof(name) - 1] = '\0';
@@ -192,7 +194,8 @@ static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
192 case RPC_PIPEFS_MOUNT: 194 case RPC_PIPEFS_MOUNT:
193 dentry = rpc_setup_pipedir_sb(sb, clnt, 195 dentry = rpc_setup_pipedir_sb(sb, clnt,
194 clnt->cl_program->pipe_dir_name); 196 clnt->cl_program->pipe_dir_name);
195 BUG_ON(dentry == NULL); 197 if (!dentry)
198 return -ENOENT;
196 if (IS_ERR(dentry)) 199 if (IS_ERR(dentry))
197 return PTR_ERR(dentry); 200 return PTR_ERR(dentry);
198 clnt->cl_dentry = dentry; 201 clnt->cl_dentry = dentry;
@@ -234,7 +237,7 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
234 spin_lock(&sn->rpc_client_lock); 237 spin_lock(&sn->rpc_client_lock);
235 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 238 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
236 if (clnt->cl_program->pipe_dir_name == NULL) 239 if (clnt->cl_program->pipe_dir_name == NULL)
237 break; 240 continue;
238 if (rpc_clnt_skip_event(clnt, event)) 241 if (rpc_clnt_skip_event(clnt, event))
239 continue; 242 continue;
240 if (atomic_inc_not_zero(&clnt->cl_count) == 0) 243 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
@@ -552,7 +555,7 @@ EXPORT_SYMBOL_GPL(rpc_clone_client);
552 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth 555 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
553 * 556 *
554 * @clnt: RPC client whose parameters are copied 557 * @clnt: RPC client whose parameters are copied
555 * @auth: security flavor for new client 558 * @flavor: security flavor for new client
556 * 559 *
557 * Returns a fresh RPC client or an ERR_PTR. 560 * Returns a fresh RPC client or an ERR_PTR.
558 */ 561 */
@@ -607,6 +610,8 @@ EXPORT_SYMBOL_GPL(rpc_killall_tasks);
607 */ 610 */
608void rpc_shutdown_client(struct rpc_clnt *clnt) 611void rpc_shutdown_client(struct rpc_clnt *clnt)
609{ 612{
613 might_sleep();
614
610 dprintk_rcu("RPC: shutting down %s client for %s\n", 615 dprintk_rcu("RPC: shutting down %s client for %s\n",
611 clnt->cl_protname, 616 clnt->cl_protname,
612 rcu_dereference(clnt->cl_xprt)->servername); 617 rcu_dereference(clnt->cl_xprt)->servername);
@@ -693,21 +698,19 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
693 const struct rpc_program *program, 698 const struct rpc_program *program,
694 u32 vers) 699 u32 vers)
695{ 700{
701 struct rpc_create_args args = {
702 .program = program,
703 .prognumber = program->number,
704 .version = vers,
705 .authflavor = old->cl_auth->au_flavor,
706 .client_name = old->cl_principal,
707 };
696 struct rpc_clnt *clnt; 708 struct rpc_clnt *clnt;
697 const struct rpc_version *version;
698 int err; 709 int err;
699 710
700 BUG_ON(vers >= program->nrvers || !program->version[vers]); 711 clnt = __rpc_clone_client(&args, old);
701 version = program->version[vers];
702 clnt = rpc_clone_client(old);
703 if (IS_ERR(clnt)) 712 if (IS_ERR(clnt))
704 goto out; 713 goto out;
705 clnt->cl_procinfo = version->procs;
706 clnt->cl_maxproc = version->nrprocs;
707 clnt->cl_protname = program->name;
708 clnt->cl_prog = program->number;
709 clnt->cl_vers = version->number;
710 clnt->cl_stats = program->stats;
711 err = rpc_ping(clnt); 714 err = rpc_ping(clnt);
712 if (err != 0) { 715 if (err != 0) {
713 rpc_shutdown_client(clnt); 716 rpc_shutdown_client(clnt);
@@ -832,7 +835,12 @@ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flag
832 }; 835 };
833 int status; 836 int status;
834 837
835 BUG_ON(flags & RPC_TASK_ASYNC); 838 WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
839 if (flags & RPC_TASK_ASYNC) {
840 rpc_release_calldata(task_setup_data.callback_ops,
841 task_setup_data.callback_data);
842 return -EINVAL;
843 }
836 844
837 task = rpc_run_task(&task_setup_data); 845 task = rpc_run_task(&task_setup_data);
838 if (IS_ERR(task)) 846 if (IS_ERR(task))
@@ -908,7 +916,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
908 916
909 task->tk_action = call_bc_transmit; 917 task->tk_action = call_bc_transmit;
910 atomic_inc(&task->tk_count); 918 atomic_inc(&task->tk_count);
911 BUG_ON(atomic_read(&task->tk_count) != 2); 919 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
912 rpc_execute(task); 920 rpc_execute(task);
913 921
914out: 922out:
@@ -1368,6 +1376,7 @@ call_refreshresult(struct rpc_task *task)
1368 return; 1376 return;
1369 case -ETIMEDOUT: 1377 case -ETIMEDOUT:
1370 rpc_delay(task, 3*HZ); 1378 rpc_delay(task, 3*HZ);
1379 case -EKEYEXPIRED:
1371 case -EAGAIN: 1380 case -EAGAIN:
1372 status = -EACCES; 1381 status = -EACCES;
1373 if (!task->tk_cred_retry) 1382 if (!task->tk_cred_retry)
@@ -1391,7 +1400,7 @@ call_allocate(struct rpc_task *task)
1391{ 1400{
1392 unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack; 1401 unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1393 struct rpc_rqst *req = task->tk_rqstp; 1402 struct rpc_rqst *req = task->tk_rqstp;
1394 struct rpc_xprt *xprt = task->tk_xprt; 1403 struct rpc_xprt *xprt = req->rq_xprt;
1395 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1404 struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1396 1405
1397 dprint_status(task); 1406 dprint_status(task);
@@ -1499,7 +1508,7 @@ rpc_xdr_encode(struct rpc_task *task)
1499static void 1508static void
1500call_bind(struct rpc_task *task) 1509call_bind(struct rpc_task *task)
1501{ 1510{
1502 struct rpc_xprt *xprt = task->tk_xprt; 1511 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1503 1512
1504 dprint_status(task); 1513 dprint_status(task);
1505 1514
@@ -1593,7 +1602,7 @@ retry_timeout:
1593static void 1602static void
1594call_connect(struct rpc_task *task) 1603call_connect(struct rpc_task *task)
1595{ 1604{
1596 struct rpc_xprt *xprt = task->tk_xprt; 1605 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1597 1606
1598 dprintk("RPC: %5u call_connect xprt %p %s connected\n", 1607 dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1599 task->tk_pid, xprt, 1608 task->tk_pid, xprt,
@@ -1654,7 +1663,6 @@ call_transmit(struct rpc_task *task)
1654 task->tk_action = call_transmit_status; 1663 task->tk_action = call_transmit_status;
1655 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1664 /* Encode here so that rpcsec_gss can use correct sequence number. */
1656 if (rpc_task_need_encode(task)) { 1665 if (rpc_task_need_encode(task)) {
1657 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
1658 rpc_xdr_encode(task); 1666 rpc_xdr_encode(task);
1659 /* Did the encode result in an error condition? */ 1667 /* Did the encode result in an error condition? */
1660 if (task->tk_status != 0) { 1668 if (task->tk_status != 0) {
@@ -1677,7 +1685,7 @@ call_transmit(struct rpc_task *task)
1677 if (rpc_reply_expected(task)) 1685 if (rpc_reply_expected(task))
1678 return; 1686 return;
1679 task->tk_action = rpc_exit_task; 1687 task->tk_action = rpc_exit_task;
1680 rpc_wake_up_queued_task(&task->tk_xprt->pending, task); 1688 rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
1681} 1689}
1682 1690
1683/* 1691/*
@@ -1738,7 +1746,6 @@ call_bc_transmit(struct rpc_task *task)
1738{ 1746{
1739 struct rpc_rqst *req = task->tk_rqstp; 1747 struct rpc_rqst *req = task->tk_rqstp;
1740 1748
1741 BUG_ON(task->tk_status != 0);
1742 task->tk_status = xprt_prepare_transmit(task); 1749 task->tk_status = xprt_prepare_transmit(task);
1743 if (task->tk_status == -EAGAIN) { 1750 if (task->tk_status == -EAGAIN) {
1744 /* 1751 /*
@@ -1777,7 +1784,7 @@ call_bc_transmit(struct rpc_task *task)
1777 */ 1784 */
1778 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1785 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1779 "error: %d\n", task->tk_status); 1786 "error: %d\n", task->tk_status);
1780 xprt_conditional_disconnect(task->tk_xprt, 1787 xprt_conditional_disconnect(req->rq_xprt,
1781 req->rq_connect_cookie); 1788 req->rq_connect_cookie);
1782 break; 1789 break;
1783 default: 1790 default:
@@ -1785,7 +1792,7 @@ call_bc_transmit(struct rpc_task *task)
1785 * We were unable to reply and will have to drop the 1792 * We were unable to reply and will have to drop the
1786 * request. The server should reconnect and retransmit. 1793 * request. The server should reconnect and retransmit.
1787 */ 1794 */
1788 BUG_ON(task->tk_status == -EAGAIN); 1795 WARN_ON_ONCE(task->tk_status == -EAGAIN);
1789 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1796 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1790 "error: %d\n", task->tk_status); 1797 "error: %d\n", task->tk_status);
1791 break; 1798 break;
@@ -1829,7 +1836,7 @@ call_status(struct rpc_task *task)
1829 case -ETIMEDOUT: 1836 case -ETIMEDOUT:
1830 task->tk_action = call_timeout; 1837 task->tk_action = call_timeout;
1831 if (task->tk_client->cl_discrtry) 1838 if (task->tk_client->cl_discrtry)
1832 xprt_conditional_disconnect(task->tk_xprt, 1839 xprt_conditional_disconnect(req->rq_xprt,
1833 req->rq_connect_cookie); 1840 req->rq_connect_cookie);
1834 break; 1841 break;
1835 case -ECONNRESET: 1842 case -ECONNRESET:
@@ -1984,7 +1991,7 @@ out_retry:
1984 if (task->tk_rqstp == req) { 1991 if (task->tk_rqstp == req) {
1985 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; 1992 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
1986 if (task->tk_client->cl_discrtry) 1993 if (task->tk_client->cl_discrtry)
1987 xprt_conditional_disconnect(task->tk_xprt, 1994 xprt_conditional_disconnect(req->rq_xprt,
1988 req->rq_connect_cookie); 1995 req->rq_connect_cookie);
1989 } 1996 }
1990} 1997}
@@ -1998,7 +2005,7 @@ rpc_encode_header(struct rpc_task *task)
1998 2005
1999 /* FIXME: check buffer size? */ 2006 /* FIXME: check buffer size? */
2000 2007
2001 p = xprt_skip_transport_header(task->tk_xprt, p); 2008 p = xprt_skip_transport_header(req->rq_xprt, p);
2002 *p++ = req->rq_xid; /* XID */ 2009 *p++ = req->rq_xid; /* XID */
2003 *p++ = htonl(RPC_CALL); /* CALL */ 2010 *p++ = htonl(RPC_CALL); /* CALL */
2004 *p++ = htonl(RPC_VERSION); /* RPC version */ 2011 *p++ = htonl(RPC_VERSION); /* RPC version */
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 80f5dd23417d..fd10981ea792 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1093,7 +1093,7 @@ void rpc_put_sb_net(const struct net *net)
1093{ 1093{
1094 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1094 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1095 1095
1096 BUG_ON(sn->pipefs_sb == NULL); 1096 WARN_ON(sn->pipefs_sb == NULL);
1097 mutex_unlock(&sn->pipefs_sb_lock); 1097 mutex_unlock(&sn->pipefs_sb_lock);
1098} 1098}
1099EXPORT_SYMBOL_GPL(rpc_put_sb_net); 1099EXPORT_SYMBOL_GPL(rpc_put_sb_net);
@@ -1152,14 +1152,19 @@ static void rpc_kill_sb(struct super_block *sb)
1152 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1152 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1153 1153
1154 mutex_lock(&sn->pipefs_sb_lock); 1154 mutex_lock(&sn->pipefs_sb_lock);
1155 if (sn->pipefs_sb != sb) {
1156 mutex_unlock(&sn->pipefs_sb_lock);
1157 goto out;
1158 }
1155 sn->pipefs_sb = NULL; 1159 sn->pipefs_sb = NULL;
1156 mutex_unlock(&sn->pipefs_sb_lock); 1160 mutex_unlock(&sn->pipefs_sb_lock);
1157 put_net(net);
1158 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", 1161 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n",
1159 net, NET_NAME(net)); 1162 net, NET_NAME(net));
1160 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1163 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1161 RPC_PIPEFS_UMOUNT, 1164 RPC_PIPEFS_UMOUNT,
1162 sb); 1165 sb);
1166 put_net(net);
1167out:
1163 kill_litter_super(sb); 1168 kill_litter_super(sb);
1164} 1169}
1165 1170
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index a70acae496e4..795a0f4e920b 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -23,7 +23,6 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/nsproxy.h>
27#include <net/ipv6.h> 26#include <net/ipv6.h>
28 27
29#include <linux/sunrpc/clnt.h> 28#include <linux/sunrpc/clnt.h>
@@ -884,7 +883,10 @@ static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
884 u32 len; 883 u32 len;
885 884
886 len = strlen(string); 885 len = strlen(string);
887 BUG_ON(len > maxstrlen); 886 WARN_ON_ONCE(len > maxstrlen);
887 if (len > maxstrlen)
888 /* truncate and hope for the best */
889 len = maxstrlen;
888 p = xdr_reserve_space(xdr, 4 + len); 890 p = xdr_reserve_space(xdr, 4 + len);
889 xdr_encode_opaque(p, string, len); 891 xdr_encode_opaque(p, string, len);
890} 892}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6357fcb00c7e..fb20f25ddec9 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -98,6 +98,39 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
99} 99}
100 100
101static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
102{
103 struct list_head *q = &queue->tasks[queue->priority];
104 struct rpc_task *task;
105
106 if (!list_empty(q)) {
107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
108 if (task->tk_owner == queue->owner)
109 list_move_tail(&task->u.tk_wait.list, q);
110 }
111}
112
113static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
114{
115 if (queue->priority != priority) {
116 /* Fairness: rotate the list when changing priority */
117 rpc_rotate_queue_owner(queue);
118 queue->priority = priority;
119 }
120}
121
122static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
123{
124 queue->owner = pid;
125 queue->nr = RPC_BATCH_COUNT;
126}
127
128static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
129{
130 rpc_set_waitqueue_priority(queue, queue->maxpriority);
131 rpc_set_waitqueue_owner(queue, 0);
132}
133
101/* 134/*
102 * Add new request to a priority queue. 135 * Add new request to a priority queue.
103 */ 136 */
@@ -109,9 +142,11 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
109 struct rpc_task *t; 142 struct rpc_task *t;
110 143
111 INIT_LIST_HEAD(&task->u.tk_wait.links); 144 INIT_LIST_HEAD(&task->u.tk_wait.links);
112 q = &queue->tasks[queue_priority];
113 if (unlikely(queue_priority > queue->maxpriority)) 145 if (unlikely(queue_priority > queue->maxpriority))
114 q = &queue->tasks[queue->maxpriority]; 146 queue_priority = queue->maxpriority;
147 if (queue_priority > queue->priority)
148 rpc_set_waitqueue_priority(queue, queue_priority);
149 q = &queue->tasks[queue_priority];
115 list_for_each_entry(t, q, u.tk_wait.list) { 150 list_for_each_entry(t, q, u.tk_wait.list) {
116 if (t->tk_owner == task->tk_owner) { 151 if (t->tk_owner == task->tk_owner) {
117 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); 152 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
@@ -133,7 +168,9 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
133 struct rpc_task *task, 168 struct rpc_task *task,
134 unsigned char queue_priority) 169 unsigned char queue_priority)
135{ 170{
136 BUG_ON (RPC_IS_QUEUED(task)); 171 WARN_ON_ONCE(RPC_IS_QUEUED(task));
172 if (RPC_IS_QUEUED(task))
173 return;
137 174
138 if (RPC_IS_PRIORITY(queue)) 175 if (RPC_IS_PRIORITY(queue))
139 __rpc_add_wait_queue_priority(queue, task, queue_priority); 176 __rpc_add_wait_queue_priority(queue, task, queue_priority);
@@ -178,24 +215,6 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
178 task->tk_pid, queue, rpc_qname(queue)); 215 task->tk_pid, queue, rpc_qname(queue));
179} 216}
180 217
181static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
182{
183 queue->priority = priority;
184 queue->count = 1 << (priority * 2);
185}
186
187static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
188{
189 queue->owner = pid;
190 queue->nr = RPC_BATCH_COUNT;
191}
192
193static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
194{
195 rpc_set_waitqueue_priority(queue, queue->maxpriority);
196 rpc_set_waitqueue_owner(queue, 0);
197}
198
199static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 218static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
200{ 219{
201 int i; 220 int i;
@@ -334,7 +353,7 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
334 353
335 __rpc_add_wait_queue(q, task, queue_priority); 354 __rpc_add_wait_queue(q, task, queue_priority);
336 355
337 BUG_ON(task->tk_callback != NULL); 356 WARN_ON_ONCE(task->tk_callback != NULL);
338 task->tk_callback = action; 357 task->tk_callback = action;
339 __rpc_add_timer(q, task); 358 __rpc_add_timer(q, task);
340} 359}
@@ -343,7 +362,12 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
343 rpc_action action) 362 rpc_action action)
344{ 363{
345 /* We shouldn't ever put an inactive task to sleep */ 364 /* We shouldn't ever put an inactive task to sleep */
346 BUG_ON(!RPC_IS_ACTIVATED(task)); 365 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
366 if (!RPC_IS_ACTIVATED(task)) {
367 task->tk_status = -EIO;
368 rpc_put_task_async(task);
369 return;
370 }
347 371
348 /* 372 /*
349 * Protect the queue operations. 373 * Protect the queue operations.
@@ -358,7 +382,12 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
358 rpc_action action, int priority) 382 rpc_action action, int priority)
359{ 383{
360 /* We shouldn't ever put an inactive task to sleep */ 384 /* We shouldn't ever put an inactive task to sleep */
361 BUG_ON(!RPC_IS_ACTIVATED(task)); 385 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
386 if (!RPC_IS_ACTIVATED(task)) {
387 task->tk_status = -EIO;
388 rpc_put_task_async(task);
389 return;
390 }
362 391
363 /* 392 /*
364 * Protect the queue operations. 393 * Protect the queue operations.
@@ -367,6 +396,7 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
367 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); 396 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
368 spin_unlock_bh(&q->lock); 397 spin_unlock_bh(&q->lock);
369} 398}
399EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
370 400
371/** 401/**
372 * __rpc_do_wake_up_task - wake up a single rpc_task 402 * __rpc_do_wake_up_task - wake up a single rpc_task
@@ -451,8 +481,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
451 /* 481 /*
452 * Check if we need to switch queues. 482 * Check if we need to switch queues.
453 */ 483 */
454 if (--queue->count) 484 goto new_owner;
455 goto new_owner;
456 } 485 }
457 486
458 /* 487 /*
@@ -697,7 +726,9 @@ static void __rpc_execute(struct rpc_task *task)
697 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 726 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
698 task->tk_pid, task->tk_flags); 727 task->tk_pid, task->tk_flags);
699 728
700 BUG_ON(RPC_IS_QUEUED(task)); 729 WARN_ON_ONCE(RPC_IS_QUEUED(task));
730 if (RPC_IS_QUEUED(task))
731 return;
701 732
702 for (;;) { 733 for (;;) {
703 void (*do_action)(struct rpc_task *); 734 void (*do_action)(struct rpc_task *);
@@ -919,16 +950,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
919 return task; 950 return task;
920} 951}
921 952
953/*
954 * rpc_free_task - release rpc task and perform cleanups
955 *
956 * Note that we free up the rpc_task _after_ rpc_release_calldata()
957 * in order to work around a workqueue dependency issue.
958 *
959 * Tejun Heo states:
960 * "Workqueue currently considers two work items to be the same if they're
961 * on the same address and won't execute them concurrently - ie. it
962 * makes a work item which is queued again while being executed wait
963 * for the previous execution to complete.
964 *
965 * If a work function frees the work item, and then waits for an event
966 * which should be performed by another work item and *that* work item
967 * recycles the freed work item, it can create a false dependency loop.
968 * There really is no reliable way to detect this short of verifying
969 * every memory free."
970 *
971 */
922static void rpc_free_task(struct rpc_task *task) 972static void rpc_free_task(struct rpc_task *task)
923{ 973{
924 const struct rpc_call_ops *tk_ops = task->tk_ops; 974 unsigned short tk_flags = task->tk_flags;
925 void *calldata = task->tk_calldata; 975
976 rpc_release_calldata(task->tk_ops, task->tk_calldata);
926 977
927 if (task->tk_flags & RPC_TASK_DYNAMIC) { 978 if (tk_flags & RPC_TASK_DYNAMIC) {
928 dprintk("RPC: %5u freeing task\n", task->tk_pid); 979 dprintk("RPC: %5u freeing task\n", task->tk_pid);
929 mempool_free(task, rpc_task_mempool); 980 mempool_free(task, rpc_task_mempool);
930 } 981 }
931 rpc_release_calldata(tk_ops, calldata);
932} 982}
933 983
934static void rpc_async_release(struct work_struct *work) 984static void rpc_async_release(struct work_struct *work)
@@ -938,8 +988,7 @@ static void rpc_async_release(struct work_struct *work)
938 988
939static void rpc_release_resources_task(struct rpc_task *task) 989static void rpc_release_resources_task(struct rpc_task *task)
940{ 990{
941 if (task->tk_rqstp) 991 xprt_release(task);
942 xprt_release(task);
943 if (task->tk_msg.rpc_cred) { 992 if (task->tk_msg.rpc_cred) {
944 put_rpccred(task->tk_msg.rpc_cred); 993 put_rpccred(task->tk_msg.rpc_cred);
945 task->tk_msg.rpc_cred = NULL; 994 task->tk_msg.rpc_cred = NULL;
@@ -981,7 +1030,7 @@ static void rpc_release_task(struct rpc_task *task)
981{ 1030{
982 dprintk("RPC: %5u release task\n", task->tk_pid); 1031 dprintk("RPC: %5u release task\n", task->tk_pid);
983 1032
984 BUG_ON (RPC_IS_QUEUED(task)); 1033 WARN_ON_ONCE(RPC_IS_QUEUED(task));
985 1034
986 rpc_release_resources_task(task); 1035 rpc_release_resources_task(task);
987 1036
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 3ee7461926d8..dbf12ac5ecb7 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -20,7 +20,6 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/nsproxy.h>
24 23
25#include <linux/sunrpc/types.h> 24#include <linux/sunrpc/types.h>
26#include <linux/sunrpc/xdr.h> 25#include <linux/sunrpc/xdr.h>
@@ -324,7 +323,9 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
324 * The caller checks for sv_nrpools > 1, which 323 * The caller checks for sv_nrpools > 1, which
325 * implies that we've been initialized. 324 * implies that we've been initialized.
326 */ 325 */
327 BUG_ON(m->count == 0); 326 WARN_ON_ONCE(m->count == 0);
327 if (m->count == 0)
328 return;
328 329
329 switch (m->mode) { 330 switch (m->mode) {
330 case SVC_POOL_PERCPU: 331 case SVC_POOL_PERCPU:
@@ -585,7 +586,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
585 * We assume one is at most one page 586 * We assume one is at most one page
586 */ 587 */
587 arghi = 0; 588 arghi = 0;
588 BUG_ON(pages > RPCSVC_MAXPAGES); 589 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
590 if (pages > RPCSVC_MAXPAGES)
591 pages = RPCSVC_MAXPAGES;
589 while (pages) { 592 while (pages) {
590 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0); 593 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
591 if (!p) 594 if (!p)
@@ -946,7 +949,9 @@ int svc_register(const struct svc_serv *serv, struct net *net,
946 unsigned int i; 949 unsigned int i;
947 int error = 0; 950 int error = 0;
948 951
949 BUG_ON(proto == 0 && port == 0); 952 WARN_ON_ONCE(proto == 0 && port == 0);
953 if (proto == 0 && port == 0)
954 return -EINVAL;
950 955
951 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 956 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
952 for (i = 0; i < progp->pg_nvers; i++) { 957 for (i = 0; i < progp->pg_nvers; i++) {
@@ -1035,7 +1040,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
1035} 1040}
1036 1041
1037/* 1042/*
1038 * Printk the given error with the address of the client that caused it. 1043 * dprintk the given error with the address of the client that caused it.
1039 */ 1044 */
1040static __printf(2, 3) 1045static __printf(2, 3)
1041void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 1046void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
@@ -1049,8 +1054,7 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1049 vaf.fmt = fmt; 1054 vaf.fmt = fmt;
1050 vaf.va = &args; 1055 vaf.va = &args;
1051 1056
1052 net_warn_ratelimited("svc: %s: %pV", 1057 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1053 svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1054 1058
1055 va_end(args); 1059 va_end(args);
1056} 1060}
@@ -1299,7 +1303,7 @@ svc_process(struct svc_rqst *rqstp)
1299 * Setup response xdr_buf. 1303 * Setup response xdr_buf.
1300 * Initially it has just one page 1304 * Initially it has just one page
1301 */ 1305 */
1302 rqstp->rq_resused = 1; 1306 rqstp->rq_next_page = &rqstp->rq_respages[1];
1303 resv->iov_base = page_address(rqstp->rq_respages[0]); 1307 resv->iov_base = page_address(rqstp->rq_respages[0]);
1304 resv->iov_len = 0; 1308 resv->iov_len = 0;
1305 rqstp->rq_res.pages = rqstp->rq_respages + 1; 1309 rqstp->rq_res.pages = rqstp->rq_respages + 1;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 194d865fae72..b8e47fac7315 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -218,7 +218,9 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
218 */ 218 */
219static void svc_xprt_received(struct svc_xprt *xprt) 219static void svc_xprt_received(struct svc_xprt *xprt)
220{ 220{
221 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 221 WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags));
222 if (!test_bit(XPT_BUSY, &xprt->xpt_flags))
223 return;
222 /* As soon as we clear busy, the xprt could be closed and 224 /* As soon as we clear busy, the xprt could be closed and
223 * 'put', so we need a reference to call svc_xprt_enqueue with: 225 * 'put', so we need a reference to call svc_xprt_enqueue with:
224 */ 226 */
@@ -577,7 +579,10 @@ int svc_alloc_arg(struct svc_rqst *rqstp)
577 579
578 /* now allocate needed pages. If we get a failure, sleep briefly */ 580 /* now allocate needed pages. If we get a failure, sleep briefly */
579 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 581 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
580 BUG_ON(pages >= RPCSVC_MAXPAGES); 582 WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES);
583 if (pages >= RPCSVC_MAXPAGES)
584 /* use as many pages as possible */
585 pages = RPCSVC_MAXPAGES - 1;
581 for (i = 0; i < pages ; i++) 586 for (i = 0; i < pages ; i++)
582 while (rqstp->rq_pages[i] == NULL) { 587 while (rqstp->rq_pages[i] == NULL) {
583 struct page *p = alloc_page(GFP_KERNEL); 588 struct page *p = alloc_page(GFP_KERNEL);
@@ -926,7 +931,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
926 spin_lock_bh(&serv->sv_lock); 931 spin_lock_bh(&serv->sv_lock);
927 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 932 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
928 list_del_init(&xprt->xpt_list); 933 list_del_init(&xprt->xpt_list);
929 BUG_ON(!list_empty(&xprt->xpt_ready)); 934 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
930 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 935 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
931 serv->sv_tmpcnt--; 936 serv->sv_tmpcnt--;
932 spin_unlock_bh(&serv->sv_lock); 937 spin_unlock_bh(&serv->sv_lock);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 03827cef1fa7..0f679df7d072 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -84,7 +84,11 @@ static struct lock_class_key svc_slock_key[2];
84static void svc_reclassify_socket(struct socket *sock) 84static void svc_reclassify_socket(struct socket *sock)
85{ 85{
86 struct sock *sk = sock->sk; 86 struct sock *sk = sock->sk;
87 BUG_ON(sock_owned_by_user(sk)); 87
88 WARN_ON_ONCE(sock_owned_by_user(sk));
89 if (sock_owned_by_user(sk))
90 return;
91
88 switch (sk->sk_family) { 92 switch (sk->sk_family) {
89 case AF_INET: 93 case AF_INET:
90 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", 94 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
@@ -461,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
461} 465}
462 466
463/* 467/*
464 * See net/ipv6/datagram.c : datagram_recv_ctl 468 * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
465 */ 469 */
466static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, 470static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
467 struct cmsghdr *cmh) 471 struct cmsghdr *cmh)
@@ -601,6 +605,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
601 rqstp->rq_respages = rqstp->rq_pages + 1 + 605 rqstp->rq_respages = rqstp->rq_pages + 1 +
602 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); 606 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
603 } 607 }
608 rqstp->rq_next_page = rqstp->rq_respages+1;
604 609
605 if (serv->sv_stats) 610 if (serv->sv_stats)
606 serv->sv_stats->netudpcnt++; 611 serv->sv_stats->netudpcnt++;
@@ -874,9 +879,9 @@ static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst
874{ 879{
875 unsigned int i, len, npages; 880 unsigned int i, len, npages;
876 881
877 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) 882 if (svsk->sk_datalen == 0)
878 return 0; 883 return 0;
879 len = svsk->sk_tcplen - sizeof(rpc_fraghdr); 884 len = svsk->sk_datalen;
880 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 885 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
881 for (i = 0; i < npages; i++) { 886 for (i = 0; i < npages; i++) {
882 if (rqstp->rq_pages[i] != NULL) 887 if (rqstp->rq_pages[i] != NULL)
@@ -893,9 +898,9 @@ static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
893{ 898{
894 unsigned int i, len, npages; 899 unsigned int i, len, npages;
895 900
896 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) 901 if (svsk->sk_datalen == 0)
897 return; 902 return;
898 len = svsk->sk_tcplen - sizeof(rpc_fraghdr); 903 len = svsk->sk_datalen;
899 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 904 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
900 for (i = 0; i < npages; i++) { 905 for (i = 0; i < npages; i++) {
901 svsk->sk_pages[i] = rqstp->rq_pages[i]; 906 svsk->sk_pages[i] = rqstp->rq_pages[i];
@@ -907,9 +912,9 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
907{ 912{
908 unsigned int i, len, npages; 913 unsigned int i, len, npages;
909 914
910 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) 915 if (svsk->sk_datalen == 0)
911 goto out; 916 goto out;
912 len = svsk->sk_tcplen - sizeof(rpc_fraghdr); 917 len = svsk->sk_datalen;
913 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 918 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
914 for (i = 0; i < npages; i++) { 919 for (i = 0; i < npages; i++) {
915 BUG_ON(svsk->sk_pages[i] == NULL); 920 BUG_ON(svsk->sk_pages[i] == NULL);
@@ -918,13 +923,12 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
918 } 923 }
919out: 924out:
920 svsk->sk_tcplen = 0; 925 svsk->sk_tcplen = 0;
926 svsk->sk_datalen = 0;
921} 927}
922 928
923/* 929/*
924 * Receive data. 930 * Receive fragment record header.
925 * If we haven't gotten the record length yet, get the next four bytes. 931 * If we haven't gotten the record length yet, get the next four bytes.
926 * Otherwise try to gobble up as much as possible up to the complete
927 * record length.
928 */ 932 */
929static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) 933static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
930{ 934{
@@ -950,32 +954,16 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
950 return -EAGAIN; 954 return -EAGAIN;
951 } 955 }
952 956
953 svsk->sk_reclen = ntohl(svsk->sk_reclen); 957 dprintk("svc: TCP record, %d bytes\n", svc_sock_reclen(svsk));
954 if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) { 958 if (svc_sock_reclen(svsk) + svsk->sk_datalen >
955 /* FIXME: technically, a record can be fragmented, 959 serv->sv_max_mesg) {
956 * and non-terminal fragments will not have the top 960 net_notice_ratelimited("RPC: fragment too large: %d\n",
957 * bit set in the fragment length header. 961 svc_sock_reclen(svsk));
958 * But apparently no known nfs clients send fragmented
959 * records. */
960 net_notice_ratelimited("RPC: multiple fragments per record not supported\n");
961 goto err_delete;
962 }
963
964 svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
965 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
966 if (svsk->sk_reclen > serv->sv_max_mesg) {
967 net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n",
968 (unsigned long)svsk->sk_reclen);
969 goto err_delete; 962 goto err_delete;
970 } 963 }
971 } 964 }
972 965
973 if (svsk->sk_reclen < 8) 966 return svc_sock_reclen(svsk);
974 goto err_delete; /* client is nuts. */
975
976 len = svsk->sk_reclen;
977
978 return len;
979error: 967error:
980 dprintk("RPC: TCP recv_record got %d\n", len); 968 dprintk("RPC: TCP recv_record got %d\n", len);
981 return len; 969 return len;
@@ -1019,7 +1007,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
1019 if (dst->iov_len < src->iov_len) 1007 if (dst->iov_len < src->iov_len)
1020 return -EAGAIN; /* whatever; just giving up. */ 1008 return -EAGAIN; /* whatever; just giving up. */
1021 memcpy(dst->iov_base, src->iov_base, src->iov_len); 1009 memcpy(dst->iov_base, src->iov_base, src->iov_len);
1022 xprt_complete_rqst(req->rq_task, svsk->sk_reclen); 1010 xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
1023 rqstp->rq_arg.len = 0; 1011 rqstp->rq_arg.len = 0;
1024 return 0; 1012 return 0;
1025} 1013}
@@ -1038,6 +1026,17 @@ static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
1038 return i; 1026 return i;
1039} 1027}
1040 1028
1029static void svc_tcp_fragment_received(struct svc_sock *svsk)
1030{
1031 /* If we have more data, signal svc_xprt_enqueue() to try again */
1032 if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
1033 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1034 dprintk("svc: TCP %s record (%d bytes)\n",
1035 svc_sock_final_rec(svsk) ? "final" : "nonfinal",
1036 svc_sock_reclen(svsk));
1037 svsk->sk_tcplen = 0;
1038 svsk->sk_reclen = 0;
1039}
1041 1040
1042/* 1041/*
1043 * Receive data from a TCP socket. 1042 * Receive data from a TCP socket.
@@ -1064,29 +1063,39 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1064 goto error; 1063 goto error;
1065 1064
1066 base = svc_tcp_restore_pages(svsk, rqstp); 1065 base = svc_tcp_restore_pages(svsk, rqstp);
1067 want = svsk->sk_reclen - base; 1066 want = svc_sock_reclen(svsk) - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
1068 1067
1069 vec = rqstp->rq_vec; 1068 vec = rqstp->rq_vec;
1070 1069
1071 pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], 1070 pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
1072 svsk->sk_reclen); 1071 svsk->sk_datalen + want);
1073 1072
1074 rqstp->rq_respages = &rqstp->rq_pages[pnum]; 1073 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1074 rqstp->rq_next_page = rqstp->rq_respages + 1;
1075 1075
1076 /* Now receive data */ 1076 /* Now receive data */
1077 len = svc_partial_recvfrom(rqstp, vec, pnum, want, base); 1077 len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
1078 if (len >= 0) 1078 if (len >= 0) {
1079 svsk->sk_tcplen += len; 1079 svsk->sk_tcplen += len;
1080 if (len != want) { 1080 svsk->sk_datalen += len;
1081 }
1082 if (len != want || !svc_sock_final_rec(svsk)) {
1081 svc_tcp_save_pages(svsk, rqstp); 1083 svc_tcp_save_pages(svsk, rqstp);
1082 if (len < 0 && len != -EAGAIN) 1084 if (len < 0 && len != -EAGAIN)
1083 goto err_other; 1085 goto err_delete;
1084 dprintk("svc: incomplete TCP record (%d of %d)\n", 1086 if (len == want)
1085 svsk->sk_tcplen, svsk->sk_reclen); 1087 svc_tcp_fragment_received(svsk);
1088 else
1089 dprintk("svc: incomplete TCP record (%d of %d)\n",
1090 (int)(svsk->sk_tcplen - sizeof(rpc_fraghdr)),
1091 svc_sock_reclen(svsk));
1086 goto err_noclose; 1092 goto err_noclose;
1087 } 1093 }
1088 1094
1089 rqstp->rq_arg.len = svsk->sk_reclen; 1095 if (svc_sock_reclen(svsk) < 8)
1096 goto err_delete; /* client is nuts. */
1097
1098 rqstp->rq_arg.len = svsk->sk_datalen;
1090 rqstp->rq_arg.page_base = 0; 1099 rqstp->rq_arg.page_base = 0;
1091 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { 1100 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1092 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; 1101 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
@@ -1103,11 +1112,8 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1103 len = receive_cb_reply(svsk, rqstp); 1112 len = receive_cb_reply(svsk, rqstp);
1104 1113
1105 /* Reset TCP read info */ 1114 /* Reset TCP read info */
1106 svsk->sk_reclen = 0; 1115 svsk->sk_datalen = 0;
1107 svsk->sk_tcplen = 0; 1116 svc_tcp_fragment_received(svsk);
1108 /* If we have more data, signal svc_xprt_enqueue() to try again */
1109 if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
1110 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1111 1117
1112 if (len < 0) 1118 if (len < 0)
1113 goto error; 1119 goto error;
@@ -1116,15 +1122,14 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1116 if (serv->sv_stats) 1122 if (serv->sv_stats)
1117 serv->sv_stats->nettcpcnt++; 1123 serv->sv_stats->nettcpcnt++;
1118 1124
1119 dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len);
1120 return rqstp->rq_arg.len; 1125 return rqstp->rq_arg.len;
1121 1126
1122error: 1127error:
1123 if (len != -EAGAIN) 1128 if (len != -EAGAIN)
1124 goto err_other; 1129 goto err_delete;
1125 dprintk("RPC: TCP recvfrom got EAGAIN\n"); 1130 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1126 return 0; 1131 return 0;
1127err_other: 1132err_delete:
1128 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1133 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1129 svsk->sk_xprt.xpt_server->sv_name, -len); 1134 svsk->sk_xprt.xpt_server->sv_name, -len);
1130 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1135 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1301,6 +1306,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1301 1306
1302 svsk->sk_reclen = 0; 1307 svsk->sk_reclen = 0;
1303 svsk->sk_tcplen = 0; 1308 svsk->sk_tcplen = 0;
1309 svsk->sk_datalen = 0;
1304 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages)); 1310 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
1305 1311
1306 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1312 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 08f50afd5f2a..56055632f151 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -318,7 +318,10 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
318 318
319 tail = buf->tail; 319 tail = buf->tail;
320 head = buf->head; 320 head = buf->head;
321 BUG_ON (len > head->iov_len); 321
322 WARN_ON_ONCE(len > head->iov_len);
323 if (len > head->iov_len)
324 len = head->iov_len;
322 325
323 /* Shift the tail first */ 326 /* Shift the tail first */
324 if (tail->iov_len != 0) { 327 if (tail->iov_len != 0) {
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index bd462a532acf..846c34fdee9f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -430,21 +430,23 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
430 */ 430 */
431void xprt_release_rqst_cong(struct rpc_task *task) 431void xprt_release_rqst_cong(struct rpc_task *task)
432{ 432{
433 __xprt_put_cong(task->tk_xprt, task->tk_rqstp); 433 struct rpc_rqst *req = task->tk_rqstp;
434
435 __xprt_put_cong(req->rq_xprt, req);
434} 436}
435EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); 437EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
436 438
437/** 439/**
438 * xprt_adjust_cwnd - adjust transport congestion window 440 * xprt_adjust_cwnd - adjust transport congestion window
441 * @xprt: pointer to xprt
439 * @task: recently completed RPC request used to adjust window 442 * @task: recently completed RPC request used to adjust window
440 * @result: result code of completed RPC request 443 * @result: result code of completed RPC request
441 * 444 *
442 * We use a time-smoothed congestion estimator to avoid heavy oscillation. 445 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
443 */ 446 */
444void xprt_adjust_cwnd(struct rpc_task *task, int result) 447void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
445{ 448{
446 struct rpc_rqst *req = task->tk_rqstp; 449 struct rpc_rqst *req = task->tk_rqstp;
447 struct rpc_xprt *xprt = task->tk_xprt;
448 unsigned long cwnd = xprt->cwnd; 450 unsigned long cwnd = xprt->cwnd;
449 451
450 if (result >= 0 && cwnd <= xprt->cong) { 452 if (result >= 0 && cwnd <= xprt->cong) {
@@ -695,7 +697,7 @@ out_abort:
695 */ 697 */
696void xprt_connect(struct rpc_task *task) 698void xprt_connect(struct rpc_task *task)
697{ 699{
698 struct rpc_xprt *xprt = task->tk_xprt; 700 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
699 701
700 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, 702 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
701 xprt, (xprt_connected(xprt) ? "is" : "is not")); 703 xprt, (xprt_connected(xprt) ? "is" : "is not"));
@@ -722,13 +724,13 @@ void xprt_connect(struct rpc_task *task)
722 if (xprt_test_and_set_connecting(xprt)) 724 if (xprt_test_and_set_connecting(xprt))
723 return; 725 return;
724 xprt->stat.connect_start = jiffies; 726 xprt->stat.connect_start = jiffies;
725 xprt->ops->connect(task); 727 xprt->ops->connect(xprt, task);
726 } 728 }
727} 729}
728 730
729static void xprt_connect_status(struct rpc_task *task) 731static void xprt_connect_status(struct rpc_task *task)
730{ 732{
731 struct rpc_xprt *xprt = task->tk_xprt; 733 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
732 734
733 if (task->tk_status == 0) { 735 if (task->tk_status == 0) {
734 xprt->stat.connect_count++; 736 xprt->stat.connect_count++;
@@ -832,7 +834,7 @@ static void xprt_timer(struct rpc_task *task)
832 spin_lock_bh(&xprt->transport_lock); 834 spin_lock_bh(&xprt->transport_lock);
833 if (!req->rq_reply_bytes_recvd) { 835 if (!req->rq_reply_bytes_recvd) {
834 if (xprt->ops->timer) 836 if (xprt->ops->timer)
835 xprt->ops->timer(task); 837 xprt->ops->timer(xprt, task);
836 } else 838 } else
837 task->tk_status = 0; 839 task->tk_status = 0;
838 spin_unlock_bh(&xprt->transport_lock); 840 spin_unlock_bh(&xprt->transport_lock);
@@ -1091,7 +1093,7 @@ EXPORT_SYMBOL_GPL(xprt_free);
1091 */ 1093 */
1092void xprt_reserve(struct rpc_task *task) 1094void xprt_reserve(struct rpc_task *task)
1093{ 1095{
1094 struct rpc_xprt *xprt = task->tk_xprt; 1096 struct rpc_xprt *xprt;
1095 1097
1096 task->tk_status = 0; 1098 task->tk_status = 0;
1097 if (task->tk_rqstp != NULL) 1099 if (task->tk_rqstp != NULL)
@@ -1099,7 +1101,10 @@ void xprt_reserve(struct rpc_task *task)
1099 1101
1100 task->tk_timeout = 0; 1102 task->tk_timeout = 0;
1101 task->tk_status = -EAGAIN; 1103 task->tk_status = -EAGAIN;
1104 rcu_read_lock();
1105 xprt = rcu_dereference(task->tk_client->cl_xprt);
1102 xprt->ops->alloc_slot(xprt, task); 1106 xprt->ops->alloc_slot(xprt, task);
1107 rcu_read_unlock();
1103} 1108}
1104 1109
1105static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 1110static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
@@ -1136,10 +1141,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1136void xprt_release(struct rpc_task *task) 1141void xprt_release(struct rpc_task *task)
1137{ 1142{
1138 struct rpc_xprt *xprt; 1143 struct rpc_xprt *xprt;
1139 struct rpc_rqst *req; 1144 struct rpc_rqst *req = task->tk_rqstp;
1140 1145
1141 if (!(req = task->tk_rqstp)) 1146 if (req == NULL) {
1147 if (task->tk_client) {
1148 rcu_read_lock();
1149 xprt = rcu_dereference(task->tk_client->cl_xprt);
1150 if (xprt->snd_task == task)
1151 xprt_release_write(xprt, task);
1152 rcu_read_unlock();
1153 }
1142 return; 1154 return;
1155 }
1143 1156
1144 xprt = req->rq_xprt; 1157 xprt = req->rq_xprt;
1145 if (task->tk_ops->rpc_count_stats != NULL) 1158 if (task->tk_ops->rpc_count_stats != NULL)
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 558fbab574f0..e03725bfe2b8 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -171,7 +171,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type) 171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
172{ 172{
173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt); 174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
175 int nsegs, nchunks = 0; 175 int nsegs, nchunks = 0;
176 unsigned int pos; 176 unsigned int pos;
177 struct rpcrdma_mr_seg *seg = req->rl_segments; 177 struct rpcrdma_mr_seg *seg = req->rl_segments;
@@ -366,7 +366,7 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
366int 366int
367rpcrdma_marshal_req(struct rpc_rqst *rqst) 367rpcrdma_marshal_req(struct rpc_rqst *rqst)
368{ 368{
369 struct rpc_xprt *xprt = rqst->rq_task->tk_xprt; 369 struct rpc_xprt *xprt = rqst->rq_xprt;
370 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 370 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
371 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 371 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
372 char *base; 372 char *base;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 41cb63b623df..0ce75524ed21 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -521,11 +521,11 @@ next_sge:
521 rqstp->rq_pages[ch_no] = NULL; 521 rqstp->rq_pages[ch_no] = NULL;
522 522
523 /* 523 /*
524 * Detach res pages. svc_release must see a resused count of 524 * Detach res pages. If svc_release sees any it will attempt to
525 * zero or it will attempt to put them. 525 * put them.
526 */ 526 */
527 while (rqstp->rq_resused) 527 while (rqstp->rq_next_page != rqstp->rq_respages)
528 rqstp->rq_respages[--rqstp->rq_resused] = NULL; 528 *(--rqstp->rq_next_page) = NULL;
529 529
530 return err; 530 return err;
531} 531}
@@ -550,7 +550,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
550 550
551 /* rq_respages starts after the last arg page */ 551 /* rq_respages starts after the last arg page */
552 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; 552 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
553 rqstp->rq_resused = 0; 553 rqstp->rq_next_page = &rqstp->rq_arg.pages[page_no];
554 554
555 /* Rebuild rq_arg head and tail. */ 555 /* Rebuild rq_arg head and tail. */
556 rqstp->rq_arg.head[0] = head->arg.head[0]; 556 rqstp->rq_arg.head[0] = head->arg.head[0];
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 42eb7ba0b903..c1d124dc772b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -548,6 +548,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
548 int sge_no; 548 int sge_no;
549 int sge_bytes; 549 int sge_bytes;
550 int page_no; 550 int page_no;
551 int pages;
551 int ret; 552 int ret;
552 553
553 /* Post a recv buffer to handle another request. */ 554 /* Post a recv buffer to handle another request. */
@@ -611,7 +612,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
611 * respages array. They are our pages until the I/O 612 * respages array. They are our pages until the I/O
612 * completes. 613 * completes.
613 */ 614 */
614 for (page_no = 0; page_no < rqstp->rq_resused; page_no++) { 615 pages = rqstp->rq_next_page - rqstp->rq_respages;
616 for (page_no = 0; page_no < pages; page_no++) {
615 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; 617 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
616 ctxt->count++; 618 ctxt->count++;
617 rqstp->rq_respages[page_no] = NULL; 619 rqstp->rq_respages[page_no] = NULL;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index c9aa7a35f3bf..d0074289708e 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -426,9 +426,8 @@ xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
426} 426}
427 427
428static void 428static void
429xprt_rdma_connect(struct rpc_task *task) 429xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
430{ 430{
431 struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt;
432 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 431 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
433 432
434 if (r_xprt->rx_ep.rep_connected != 0) { 433 if (r_xprt->rx_ep.rep_connected != 0) {
@@ -475,7 +474,7 @@ xprt_rdma_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
475static void * 474static void *
476xprt_rdma_allocate(struct rpc_task *task, size_t size) 475xprt_rdma_allocate(struct rpc_task *task, size_t size)
477{ 476{
478 struct rpc_xprt *xprt = task->tk_xprt; 477 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
479 struct rpcrdma_req *req, *nreq; 478 struct rpcrdma_req *req, *nreq;
480 479
481 req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf); 480 req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf);
@@ -627,7 +626,7 @@ static int
627xprt_rdma_send_request(struct rpc_task *task) 626xprt_rdma_send_request(struct rpc_task *task)
628{ 627{
629 struct rpc_rqst *rqst = task->tk_rqstp; 628 struct rpc_rqst *rqst = task->tk_rqstp;
630 struct rpc_xprt *xprt = task->tk_xprt; 629 struct rpc_xprt *xprt = rqst->rq_xprt;
631 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 630 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
632 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 631 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
633 632
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 9a66c95b5837..cc1445dc1d1a 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -235,13 +235,13 @@ struct rpcrdma_create_data_internal {
235}; 235};
236 236
237#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \ 237#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
238 (rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_rsize) 238 (rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
239 239
240#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\ 240#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
241 (rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_wsize) 241 (rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
242 242
243#define RPCRDMA_INLINE_PAD_VALUE(rq)\ 243#define RPCRDMA_INLINE_PAD_VALUE(rq)\
244 rpcx_to_rdmad(rq->rq_task->tk_xprt).padding 244 rpcx_to_rdmad(rq->rq_xprt).padding
245 245
246/* 246/*
247 * Statistics for RPCRDMA 247 * Statistics for RPCRDMA
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 75853cabf4c9..37cbda63f45c 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -770,7 +770,7 @@ static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
770 goto out_release; 770 goto out_release;
771 if (req->rq_bytes_sent == req->rq_snd_buf.len) 771 if (req->rq_bytes_sent == req->rq_snd_buf.len)
772 goto out_release; 772 goto out_release;
773 set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state); 773 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
774out_release: 774out_release:
775 xprt_release_xprt(xprt, task); 775 xprt_release_xprt(xprt, task);
776} 776}
@@ -1005,7 +1005,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
1005 1005
1006 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); 1006 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1007 1007
1008 xprt_adjust_cwnd(task, copied); 1008 xprt_adjust_cwnd(xprt, task, copied);
1009 xprt_complete_rqst(task, copied); 1009 xprt_complete_rqst(task, copied);
1010 1010
1011 out_unlock: 1011 out_unlock:
@@ -1646,9 +1646,9 @@ static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t
1646 * 1646 *
1647 * Adjust the congestion window after a retransmit timeout has occurred. 1647 * Adjust the congestion window after a retransmit timeout has occurred.
1648 */ 1648 */
1649static void xs_udp_timer(struct rpc_task *task) 1649static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1650{ 1650{
1651 xprt_adjust_cwnd(task, -ETIMEDOUT); 1651 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1652} 1652}
1653 1653
1654static unsigned short xs_get_random_port(void) 1654static unsigned short xs_get_random_port(void)
@@ -1731,7 +1731,9 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1731 */ 1731 */
1732static void xs_local_rpcbind(struct rpc_task *task) 1732static void xs_local_rpcbind(struct rpc_task *task)
1733{ 1733{
1734 xprt_set_bound(task->tk_xprt); 1734 rcu_read_lock();
1735 xprt_set_bound(rcu_dereference(task->tk_client->cl_xprt));
1736 rcu_read_unlock();
1735} 1737}
1736 1738
1737static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) 1739static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
@@ -1746,7 +1748,6 @@ static inline void xs_reclassify_socketu(struct socket *sock)
1746{ 1748{
1747 struct sock *sk = sock->sk; 1749 struct sock *sk = sock->sk;
1748 1750
1749 BUG_ON(sock_owned_by_user(sk));
1750 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", 1751 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1751 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); 1752 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1752} 1753}
@@ -1755,7 +1756,6 @@ static inline void xs_reclassify_socket4(struct socket *sock)
1755{ 1756{
1756 struct sock *sk = sock->sk; 1757 struct sock *sk = sock->sk;
1757 1758
1758 BUG_ON(sock_owned_by_user(sk));
1759 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1759 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1760 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); 1760 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1761} 1761}
@@ -1764,13 +1764,16 @@ static inline void xs_reclassify_socket6(struct socket *sock)
1764{ 1764{
1765 struct sock *sk = sock->sk; 1765 struct sock *sk = sock->sk;
1766 1766
1767 BUG_ON(sock_owned_by_user(sk));
1768 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1767 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1769 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); 1768 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1770} 1769}
1771 1770
1772static inline void xs_reclassify_socket(int family, struct socket *sock) 1771static inline void xs_reclassify_socket(int family, struct socket *sock)
1773{ 1772{
1773 WARN_ON_ONCE(sock_owned_by_user(sock->sk));
1774 if (sock_owned_by_user(sock->sk))
1775 return;
1776
1774 switch (family) { 1777 switch (family) {
1775 case AF_LOCAL: 1778 case AF_LOCAL:
1776 xs_reclassify_socketu(sock); 1779 xs_reclassify_socketu(sock);
@@ -1901,6 +1904,10 @@ static void xs_local_setup_socket(struct work_struct *work)
1901 dprintk("RPC: xprt %p: socket %s does not exist\n", 1904 dprintk("RPC: xprt %p: socket %s does not exist\n",
1902 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1905 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1903 break; 1906 break;
1907 case -ECONNREFUSED:
1908 dprintk("RPC: xprt %p: connection refused for %s\n",
1909 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1910 break;
1904 default: 1911 default:
1905 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n", 1912 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1906 __func__, -status, 1913 __func__, -status,
@@ -2200,6 +2207,7 @@ out:
2200 2207
2201/** 2208/**
2202 * xs_connect - connect a socket to a remote endpoint 2209 * xs_connect - connect a socket to a remote endpoint
2210 * @xprt: pointer to transport structure
2203 * @task: address of RPC task that manages state of connect request 2211 * @task: address of RPC task that manages state of connect request
2204 * 2212 *
2205 * TCP: If the remote end dropped the connection, delay reconnecting. 2213 * TCP: If the remote end dropped the connection, delay reconnecting.
@@ -2211,9 +2219,8 @@ out:
2211 * If a UDP socket connect fails, the delay behavior here prevents 2219 * If a UDP socket connect fails, the delay behavior here prevents
2212 * retry floods (hard mounts). 2220 * retry floods (hard mounts).
2213 */ 2221 */
2214static void xs_connect(struct rpc_task *task) 2222static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2215{ 2223{
2216 struct rpc_xprt *xprt = task->tk_xprt;
2217 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2224 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2218 2225
2219 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { 2226 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
@@ -2329,9 +2336,11 @@ static void *bc_malloc(struct rpc_task *task, size_t size)
2329 struct page *page; 2336 struct page *page;
2330 struct rpc_buffer *buf; 2337 struct rpc_buffer *buf;
2331 2338
2332 BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer)); 2339 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2333 page = alloc_page(GFP_KERNEL); 2340 if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
2341 return NULL;
2334 2342
2343 page = alloc_page(GFP_KERNEL);
2335 if (!page) 2344 if (!page)
2336 return NULL; 2345 return NULL;
2337 2346
@@ -2393,7 +2402,6 @@ static int bc_send_request(struct rpc_task *task)
2393{ 2402{
2394 struct rpc_rqst *req = task->tk_rqstp; 2403 struct rpc_rqst *req = task->tk_rqstp;
2395 struct svc_xprt *xprt; 2404 struct svc_xprt *xprt;
2396 struct svc_sock *svsk;
2397 u32 len; 2405 u32 len;
2398 2406
2399 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); 2407 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
@@ -2401,7 +2409,6 @@ static int bc_send_request(struct rpc_task *task)
2401 * Get the server socket associated with this callback xprt 2409 * Get the server socket associated with this callback xprt
2402 */ 2410 */
2403 xprt = req->rq_xprt->bc_xprt; 2411 xprt = req->rq_xprt->bc_xprt;
2404 svsk = container_of(xprt, struct svc_sock, sk_xprt);
2405 2412
2406 /* 2413 /*
2407 * Grab the mutex to serialize data as the connection is shared 2414 * Grab the mutex to serialize data as the connection is shared
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index bc41bd31eadc..4f99600a5fed 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig TIPC 5menuconfig TIPC
6 tristate "The TIPC Protocol (EXPERIMENTAL)" 6 tristate "The TIPC Protocol"
7 depends on INET && EXPERIMENTAL 7 depends on INET
8 ---help--- 8 ---help---
9 The Transparent Inter Process Communication (TIPC) protocol is 9 The Transparent Inter Process Communication (TIPC) protocol is
10 specially designed for intra cluster communication. This protocol 10 specially designed for intra cluster communication. This protocol
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 54f89f90ac33..2655c9f4ecad 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -774,6 +774,7 @@ void tipc_bclink_init(void)
774 bcl->owner = &bclink->node; 774 bcl->owner = &bclink->node;
775 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 775 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
776 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 776 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
777 spin_lock_init(&bcbearer->bearer.lock);
777 bcl->b_ptr = &bcbearer->bearer; 778 bcl->b_ptr = &bcbearer->bearer;
778 bcl->state = WORKING_WORKING; 779 bcl->state = WORKING_WORKING;
779 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 780 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 9b4e4833a484..a9622b6cd916 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -43,7 +43,8 @@
43#define SS_LISTENING -1 /* socket is listening */ 43#define SS_LISTENING -1 /* socket is listening */
44#define SS_READY -2 /* socket is connectionless */ 44#define SS_READY -2 /* socket is connectionless */
45 45
46#define OVERLOAD_LIMIT_BASE 10000 46#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
47 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 48#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48 49
49struct tipc_sock { 50struct tipc_sock {
@@ -129,19 +130,6 @@ static void advance_rx_queue(struct sock *sk)
129} 130}
130 131
131/** 132/**
132 * discard_rx_queue - discard all buffers in socket receive queue
133 *
134 * Caller must hold socket lock
135 */
136static void discard_rx_queue(struct sock *sk)
137{
138 struct sk_buff *buf;
139
140 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
141 kfree_skb(buf);
142}
143
144/**
145 * reject_rx_queue - reject all buffers in socket receive queue 133 * reject_rx_queue - reject all buffers in socket receive queue
146 * 134 *
147 * Caller must hold socket lock 135 * Caller must hold socket lock
@@ -215,7 +203,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
215 203
216 sock_init_data(sock, sk); 204 sock_init_data(sock, sk);
217 sk->sk_backlog_rcv = backlog_rcv; 205 sk->sk_backlog_rcv = backlog_rcv;
218 sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2;
219 sk->sk_data_ready = tipc_data_ready; 206 sk->sk_data_ready = tipc_data_ready;
220 sk->sk_write_space = tipc_write_space; 207 sk->sk_write_space = tipc_write_space;
221 tipc_sk(sk)->p = tp_ptr; 208 tipc_sk(sk)->p = tp_ptr;
@@ -292,7 +279,7 @@ static int release(struct socket *sock)
292 res = tipc_deleteport(tport->ref); 279 res = tipc_deleteport(tport->ref);
293 280
294 /* Discard any remaining (connection-based) messages in receive queue */ 281 /* Discard any remaining (connection-based) messages in receive queue */
295 discard_rx_queue(sk); 282 __skb_queue_purge(&sk->sk_receive_queue);
296 283
297 /* Reject any messages that accumulated in backlog queue */ 284 /* Reject any messages that accumulated in backlog queue */
298 sock->state = SS_DISCONNECTING; 285 sock->state = SS_DISCONNECTING;
@@ -516,8 +503,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
516 if (unlikely((m->msg_namelen < sizeof(*dest)) || 503 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
517 (dest->family != AF_TIPC))) 504 (dest->family != AF_TIPC)))
518 return -EINVAL; 505 return -EINVAL;
519 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 506 if (total_len > TIPC_MAX_USER_MSG_SIZE)
520 (m->msg_iovlen > (unsigned int)INT_MAX))
521 return -EMSGSIZE; 507 return -EMSGSIZE;
522 508
523 if (iocb) 509 if (iocb)
@@ -625,8 +611,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
625 if (unlikely(dest)) 611 if (unlikely(dest))
626 return send_msg(iocb, sock, m, total_len); 612 return send_msg(iocb, sock, m, total_len);
627 613
628 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 614 if (total_len > TIPC_MAX_USER_MSG_SIZE)
629 (m->msg_iovlen > (unsigned int)INT_MAX))
630 return -EMSGSIZE; 615 return -EMSGSIZE;
631 616
632 if (iocb) 617 if (iocb)
@@ -711,8 +696,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
711 goto exit; 696 goto exit;
712 } 697 }
713 698
714 if ((total_len > (unsigned int)INT_MAX) || 699 if (total_len > (unsigned int)INT_MAX) {
715 (m->msg_iovlen > (unsigned int)INT_MAX)) {
716 res = -EMSGSIZE; 700 res = -EMSGSIZE;
717 goto exit; 701 goto exit;
718 } 702 }
@@ -1155,34 +1139,6 @@ static void tipc_data_ready(struct sock *sk, int len)
1155} 1139}
1156 1140
1157/** 1141/**
1158 * rx_queue_full - determine if receive queue can accept another message
1159 * @msg: message to be added to queue
1160 * @queue_size: current size of queue
1161 * @base: nominal maximum size of queue
1162 *
1163 * Returns 1 if queue is unable to accept message, 0 otherwise
1164 */
1165static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1166{
1167 u32 threshold;
1168 u32 imp = msg_importance(msg);
1169
1170 if (imp == TIPC_LOW_IMPORTANCE)
1171 threshold = base;
1172 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1173 threshold = base * 2;
1174 else if (imp == TIPC_HIGH_IMPORTANCE)
1175 threshold = base * 100;
1176 else
1177 return 0;
1178
1179 if (msg_connected(msg))
1180 threshold *= 4;
1181
1182 return queue_size >= threshold;
1183}
1184
1185/**
1186 * filter_connect - Handle all incoming messages for a connection-based socket 1142 * filter_connect - Handle all incoming messages for a connection-based socket
1187 * @tsock: TIPC socket 1143 * @tsock: TIPC socket
1188 * @msg: message 1144 * @msg: message
@@ -1260,6 +1216,36 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1260} 1216}
1261 1217
1262/** 1218/**
1219 * rcvbuf_limit - get proper overload limit of socket receive queue
1220 * @sk: socket
1221 * @buf: message
1222 *
1223 * For all connection oriented messages, irrespective of importance,
1224 * the default overload value (i.e. 67MB) is set as limit.
1225 *
1226 * For all connectionless messages, by default new queue limits are
1227 * as belows:
1228 *
1229 * TIPC_LOW_IMPORTANCE (5MB)
1230 * TIPC_MEDIUM_IMPORTANCE (10MB)
1231 * TIPC_HIGH_IMPORTANCE (20MB)
1232 * TIPC_CRITICAL_IMPORTANCE (40MB)
1233 *
1234 * Returns overload limit according to corresponding message importance
1235 */
1236static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1237{
1238 struct tipc_msg *msg = buf_msg(buf);
1239 unsigned int limit;
1240
1241 if (msg_connected(msg))
1242 limit = CONN_OVERLOAD_LIMIT;
1243 else
1244 limit = sk->sk_rcvbuf << (msg_importance(msg) + 5);
1245 return limit;
1246}
1247
1248/**
1263 * filter_rcv - validate incoming message 1249 * filter_rcv - validate incoming message
1264 * @sk: socket 1250 * @sk: socket
1265 * @buf: message 1251 * @buf: message
@@ -1275,7 +1261,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1275{ 1261{
1276 struct socket *sock = sk->sk_socket; 1262 struct socket *sock = sk->sk_socket;
1277 struct tipc_msg *msg = buf_msg(buf); 1263 struct tipc_msg *msg = buf_msg(buf);
1278 u32 recv_q_len; 1264 unsigned int limit = rcvbuf_limit(sk, buf);
1279 u32 res = TIPC_OK; 1265 u32 res = TIPC_OK;
1280 1266
1281 /* Reject message if it is wrong sort of message for socket */ 1267 /* Reject message if it is wrong sort of message for socket */
@@ -1292,15 +1278,13 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1292 } 1278 }
1293 1279
1294 /* Reject message if there isn't room to queue it */ 1280 /* Reject message if there isn't room to queue it */
1295 recv_q_len = skb_queue_len(&sk->sk_receive_queue); 1281 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1296 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) { 1282 return TIPC_ERR_OVERLOAD;
1297 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
1298 return TIPC_ERR_OVERLOAD;
1299 }
1300 1283
1301 /* Enqueue message (finally!) */ 1284 /* Enqueue message */
1302 TIPC_SKB_CB(buf)->handle = 0; 1285 TIPC_SKB_CB(buf)->handle = 0;
1303 __skb_queue_tail(&sk->sk_receive_queue, buf); 1286 __skb_queue_tail(&sk->sk_receive_queue, buf);
1287 skb_set_owner_r(buf, sk);
1304 1288
1305 sk->sk_data_ready(sk, 0); 1289 sk->sk_data_ready(sk, 0);
1306 return TIPC_OK; 1290 return TIPC_OK;
@@ -1349,7 +1333,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1349 if (!sock_owned_by_user(sk)) { 1333 if (!sock_owned_by_user(sk)) {
1350 res = filter_rcv(sk, buf); 1334 res = filter_rcv(sk, buf);
1351 } else { 1335 } else {
1352 if (sk_add_backlog(sk, buf, sk->sk_rcvbuf)) 1336 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
1353 res = TIPC_ERR_OVERLOAD; 1337 res = TIPC_ERR_OVERLOAD;
1354 else 1338 else
1355 res = TIPC_OK; 1339 res = TIPC_OK;
@@ -1583,6 +1567,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1583 } else { 1567 } else {
1584 __skb_dequeue(&sk->sk_receive_queue); 1568 __skb_dequeue(&sk->sk_receive_queue);
1585 __skb_queue_head(&new_sk->sk_receive_queue, buf); 1569 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1570 skb_set_owner_r(buf, new_sk);
1586 } 1571 }
1587 release_sock(new_sk); 1572 release_sock(new_sk);
1588 1573
@@ -1637,7 +1622,7 @@ restart:
1637 case SS_DISCONNECTING: 1622 case SS_DISCONNECTING:
1638 1623
1639 /* Discard any unreceived messages */ 1624 /* Discard any unreceived messages */
1640 discard_rx_queue(sk); 1625 __skb_queue_purge(&sk->sk_receive_queue);
1641 1626
1642 /* Wake up anyone sleeping in poll */ 1627 /* Wake up anyone sleeping in poll */
1643 sk->sk_state_change(sk); 1628 sk->sk_state_change(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5b5c876c80e9..87d284289012 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2402,7 +2402,7 @@ static int __net_init unix_net_init(struct net *net)
2402 goto out; 2402 goto out;
2403 2403
2404#ifdef CONFIG_PROC_FS 2404#ifdef CONFIG_PROC_FS
2405 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) { 2405 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2406 unix_sysctl_unregister(net); 2406 unix_sysctl_unregister(net);
2407 goto out; 2407 goto out;
2408 } 2408 }
@@ -2415,7 +2415,7 @@ out:
2415static void __net_exit unix_net_exit(struct net *net) 2415static void __net_exit unix_net_exit(struct net *net)
2416{ 2416{
2417 unix_sysctl_unregister(net); 2417 unix_sysctl_unregister(net);
2418 proc_net_remove(net, "unix"); 2418 remove_proc_entry("unix", net->proc_net);
2419} 2419}
2420 2420
2421static struct pernet_operations unix_net_ops = { 2421static struct pernet_operations unix_net_ops = {
@@ -2426,9 +2426,8 @@ static struct pernet_operations unix_net_ops = {
2426static int __init af_unix_init(void) 2426static int __init af_unix_init(void)
2427{ 2427{
2428 int rc = -1; 2428 int rc = -1;
2429 struct sk_buff *dummy_skb;
2430 2429
2431 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)); 2430 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2432 2431
2433 rc = proto_register(&unix_proto, 1); 2432 rc = proto_register(&unix_proto, 1);
2434 if (rc != 0) { 2433 if (rc != 0) {
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
new file mode 100644
index 000000000000..b5fa7e40cdcb
--- /dev/null
+++ b/net/vmw_vsock/Kconfig
@@ -0,0 +1,28 @@
1#
2# Vsock protocol
3#
4
5config VSOCKETS
6 tristate "Virtual Socket protocol"
7 help
8 Virtual Socket Protocol is a socket protocol similar to TCP/IP
9 allowing comunication between Virtual Machines and hypervisor
10 or host.
11
12 You should also select one or more hypervisor-specific transports
13 below.
14
15 To compile this driver as a module, choose M here: the module
16 will be called vsock. If unsure, say N.
17
18config VMWARE_VMCI_VSOCKETS
19 tristate "VMware VMCI transport for Virtual Sockets"
20 depends on VSOCKETS && VMWARE_VMCI
21 help
22 This module implements a VMCI transport for Virtual Sockets.
23
24 Enable this transport if your Virtual Machine runs on a VMware
25 hypervisor.
26
27 To compile this driver as a module, choose M here: the module
28 will be called vmw_vsock_vmci_transport. If unsure, say N.
diff --git a/net/vmw_vsock/Makefile b/net/vmw_vsock/Makefile
new file mode 100644
index 000000000000..2ce52d70f224
--- /dev/null
+++ b/net/vmw_vsock/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_VSOCKETS) += vsock.o
2obj-$(CONFIG_VMWARE_VMCI_VSOCKETS) += vmw_vsock_vmci_transport.o
3
4vsock-y += af_vsock.o vsock_addr.o
5
6vmw_vsock_vmci_transport-y += vmci_transport.o vmci_transport_notify.o \
7 vmci_transport_notify_qstate.o
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
new file mode 100644
index 000000000000..ca511c4f388a
--- /dev/null
+++ b/net/vmw_vsock/af_vsock.c
@@ -0,0 +1,2012 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16/* Implementation notes:
17 *
18 * - There are two kinds of sockets: those created by user action (such as
19 * calling socket(2)) and those created by incoming connection request packets.
20 *
21 * - There are two "global" tables, one for bound sockets (sockets that have
22 * specified an address that they are responsible for) and one for connected
23 * sockets (sockets that have established a connection with another socket).
24 * These tables are "global" in that all sockets on the system are placed
25 * within them. - Note, though, that the bound table contains an extra entry
26 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
27 * that list. The bound table is used solely for lookup of sockets when packets
28 * are received and that's not necessary for SOCK_DGRAM sockets since we create
29 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
30 * sockets out of the bound hash buckets will reduce the chance of collisions
31 * when looking for SOCK_STREAM sockets and prevents us from having to check the
32 * socket type in the hash table lookups.
33 *
34 * - Sockets created by user action will either be "client" sockets that
35 * initiate a connection or "server" sockets that listen for connections; we do
36 * not support simultaneous connects (two "client" sockets connecting).
37 *
38 * - "Server" sockets are referred to as listener sockets throughout this
39 * implementation because they are in the SS_LISTEN state. When a connection
40 * request is received (the second kind of socket mentioned above), we create a
41 * new socket and refer to it as a pending socket. These pending sockets are
42 * placed on the pending connection list of the listener socket. When future
43 * packets are received for the address the listener socket is bound to, we
44 * check if the source of the packet is from one that has an existing pending
45 * connection. If it does, we process the packet for the pending socket. When
46 * that socket reaches the connected state, it is removed from the listener
47 * socket's pending list and enqueued in the listener socket's accept queue.
48 * Callers of accept(2) will accept connected sockets from the listener socket's
49 * accept queue. If the socket cannot be accepted for some reason then it is
50 * marked rejected. Once the connection is accepted, it is owned by the user
51 * process and the responsibility for cleanup falls with that user process.
52 *
53 * - It is possible that these pending sockets will never reach the connected
54 * state; in fact, we may never receive another packet after the connection
55 * request. Because of this, we must schedule a cleanup function to run in the
56 * future, after some amount of time passes where a connection should have been
57 * established. This function ensures that the socket is off all lists so it
58 * cannot be retrieved, then drops all references to the socket so it is cleaned
59 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
60 * function will also cleanup rejected sockets, those that reach the connected
61 * state but leave it before they have been accepted.
62 *
63 * - Sockets created by user action will be cleaned up when the user process
64 * calls close(2), causing our release implementation to be called. Our release
65 * implementation will perform some cleanup then drop the last reference so our
66 * sk_destruct implementation is invoked. Our sk_destruct implementation will
67 * perform additional cleanup that's common for both types of sockets.
68 *
69 * - A socket's reference count is what ensures that the structure won't be
70 * freed. Each entry in a list (such as the "global" bound and connected tables
71 * and the listener socket's pending list and connected queue) ensures a
72 * reference. When we defer work until process context and pass a socket as our
73 * argument, we must ensure the reference count is increased to ensure the
74 * socket isn't freed before the function is run; the deferred function will
75 * then drop the reference.
76 */
77
78#include <linux/types.h>
79#include <linux/bitops.h>
80#include <linux/cred.h>
81#include <linux/init.h>
82#include <linux/io.h>
83#include <linux/kernel.h>
84#include <linux/kmod.h>
85#include <linux/list.h>
86#include <linux/miscdevice.h>
87#include <linux/module.h>
88#include <linux/mutex.h>
89#include <linux/net.h>
90#include <linux/poll.h>
91#include <linux/skbuff.h>
92#include <linux/smp.h>
93#include <linux/socket.h>
94#include <linux/stddef.h>
95#include <linux/unistd.h>
96#include <linux/wait.h>
97#include <linux/workqueue.h>
98#include <net/sock.h>
99
100#include "af_vsock.h"
101
102static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
103static void vsock_sk_destruct(struct sock *sk);
104static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
105
106/* Protocol family. */
107static struct proto vsock_proto = {
108 .name = "AF_VSOCK",
109 .owner = THIS_MODULE,
110 .obj_size = sizeof(struct vsock_sock),
111};
112
113/* The default peer timeout indicates how long we will wait for a peer response
114 * to a control message.
115 */
116#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
117
118#define SS_LISTEN 255
119
120static const struct vsock_transport *transport;
121static DEFINE_MUTEX(vsock_register_mutex);
122
123/**** EXPORTS ****/
124
125/* Get the ID of the local context. This is transport dependent. */
126
127int vm_sockets_get_local_cid(void)
128{
129 return transport->get_local_cid();
130}
131EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
132
133/**** UTILS ****/
134
135/* Each bound VSocket is stored in the bind hash table and each connected
136 * VSocket is stored in the connected hash table.
137 *
138 * Unbound sockets are all put on the same list attached to the end of the hash
139 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
140 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
141 * represents the list that addr hashes to).
142 *
143 * Specifically, we initialize the vsock_bind_table array to a size of
144 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
145 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
146 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
147 * mods with VSOCK_HASH_SIZE - 1 to ensure this.
148 */
149#define VSOCK_HASH_SIZE 251
150#define MAX_PORT_RETRIES 24
151
152#define VSOCK_HASH(addr) ((addr)->svm_port % (VSOCK_HASH_SIZE - 1))
153#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
154#define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
155
156/* XXX This can probably be implemented in a better way. */
157#define VSOCK_CONN_HASH(src, dst) \
158 (((src)->svm_cid ^ (dst)->svm_port) % (VSOCK_HASH_SIZE - 1))
159#define vsock_connected_sockets(src, dst) \
160 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
161#define vsock_connected_sockets_vsk(vsk) \
162 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
163
164static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
165static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
166static DEFINE_SPINLOCK(vsock_table_lock);
167
168static __init void vsock_init_tables(void)
169{
170 int i;
171
172 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
173 INIT_LIST_HEAD(&vsock_bind_table[i]);
174
175 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
176 INIT_LIST_HEAD(&vsock_connected_table[i]);
177}
178
179static void __vsock_insert_bound(struct list_head *list,
180 struct vsock_sock *vsk)
181{
182 sock_hold(&vsk->sk);
183 list_add(&vsk->bound_table, list);
184}
185
186static void __vsock_insert_connected(struct list_head *list,
187 struct vsock_sock *vsk)
188{
189 sock_hold(&vsk->sk);
190 list_add(&vsk->connected_table, list);
191}
192
193static void __vsock_remove_bound(struct vsock_sock *vsk)
194{
195 list_del_init(&vsk->bound_table);
196 sock_put(&vsk->sk);
197}
198
199static void __vsock_remove_connected(struct vsock_sock *vsk)
200{
201 list_del_init(&vsk->connected_table);
202 sock_put(&vsk->sk);
203}
204
205static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
206{
207 struct vsock_sock *vsk;
208
209 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
210 if (vsock_addr_equals_addr_any(addr, &vsk->local_addr))
211 return sk_vsock(vsk);
212
213 return NULL;
214}
215
216static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
217 struct sockaddr_vm *dst)
218{
219 struct vsock_sock *vsk;
220
221 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
222 connected_table) {
223 if (vsock_addr_equals_addr(src, &vsk->remote_addr)
224 && vsock_addr_equals_addr(dst, &vsk->local_addr)) {
225 return sk_vsock(vsk);
226 }
227 }
228
229 return NULL;
230}
231
232static bool __vsock_in_bound_table(struct vsock_sock *vsk)
233{
234 return !list_empty(&vsk->bound_table);
235}
236
237static bool __vsock_in_connected_table(struct vsock_sock *vsk)
238{
239 return !list_empty(&vsk->connected_table);
240}
241
242static void vsock_insert_unbound(struct vsock_sock *vsk)
243{
244 spin_lock_bh(&vsock_table_lock);
245 __vsock_insert_bound(vsock_unbound_sockets, vsk);
246 spin_unlock_bh(&vsock_table_lock);
247}
248
249void vsock_insert_connected(struct vsock_sock *vsk)
250{
251 struct list_head *list = vsock_connected_sockets(
252 &vsk->remote_addr, &vsk->local_addr);
253
254 spin_lock_bh(&vsock_table_lock);
255 __vsock_insert_connected(list, vsk);
256 spin_unlock_bh(&vsock_table_lock);
257}
258EXPORT_SYMBOL_GPL(vsock_insert_connected);
259
260void vsock_remove_bound(struct vsock_sock *vsk)
261{
262 spin_lock_bh(&vsock_table_lock);
263 __vsock_remove_bound(vsk);
264 spin_unlock_bh(&vsock_table_lock);
265}
266EXPORT_SYMBOL_GPL(vsock_remove_bound);
267
268void vsock_remove_connected(struct vsock_sock *vsk)
269{
270 spin_lock_bh(&vsock_table_lock);
271 __vsock_remove_connected(vsk);
272 spin_unlock_bh(&vsock_table_lock);
273}
274EXPORT_SYMBOL_GPL(vsock_remove_connected);
275
276struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
277{
278 struct sock *sk;
279
280 spin_lock_bh(&vsock_table_lock);
281 sk = __vsock_find_bound_socket(addr);
282 if (sk)
283 sock_hold(sk);
284
285 spin_unlock_bh(&vsock_table_lock);
286
287 return sk;
288}
289EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
290
291struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
292 struct sockaddr_vm *dst)
293{
294 struct sock *sk;
295
296 spin_lock_bh(&vsock_table_lock);
297 sk = __vsock_find_connected_socket(src, dst);
298 if (sk)
299 sock_hold(sk);
300
301 spin_unlock_bh(&vsock_table_lock);
302
303 return sk;
304}
305EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
306
307static bool vsock_in_bound_table(struct vsock_sock *vsk)
308{
309 bool ret;
310
311 spin_lock_bh(&vsock_table_lock);
312 ret = __vsock_in_bound_table(vsk);
313 spin_unlock_bh(&vsock_table_lock);
314
315 return ret;
316}
317
318static bool vsock_in_connected_table(struct vsock_sock *vsk)
319{
320 bool ret;
321
322 spin_lock_bh(&vsock_table_lock);
323 ret = __vsock_in_connected_table(vsk);
324 spin_unlock_bh(&vsock_table_lock);
325
326 return ret;
327}
328
329void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
330{
331 int i;
332
333 spin_lock_bh(&vsock_table_lock);
334
335 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
336 struct vsock_sock *vsk;
337 list_for_each_entry(vsk, &vsock_connected_table[i],
338 connected_table);
339 fn(sk_vsock(vsk));
340 }
341
342 spin_unlock_bh(&vsock_table_lock);
343}
344EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
345
346void vsock_add_pending(struct sock *listener, struct sock *pending)
347{
348 struct vsock_sock *vlistener;
349 struct vsock_sock *vpending;
350
351 vlistener = vsock_sk(listener);
352 vpending = vsock_sk(pending);
353
354 sock_hold(pending);
355 sock_hold(listener);
356 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
357}
358EXPORT_SYMBOL_GPL(vsock_add_pending);
359
360void vsock_remove_pending(struct sock *listener, struct sock *pending)
361{
362 struct vsock_sock *vpending = vsock_sk(pending);
363
364 list_del_init(&vpending->pending_links);
365 sock_put(listener);
366 sock_put(pending);
367}
368EXPORT_SYMBOL_GPL(vsock_remove_pending);
369
370void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
371{
372 struct vsock_sock *vlistener;
373 struct vsock_sock *vconnected;
374
375 vlistener = vsock_sk(listener);
376 vconnected = vsock_sk(connected);
377
378 sock_hold(connected);
379 sock_hold(listener);
380 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
381}
382EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
383
384static struct sock *vsock_dequeue_accept(struct sock *listener)
385{
386 struct vsock_sock *vlistener;
387 struct vsock_sock *vconnected;
388
389 vlistener = vsock_sk(listener);
390
391 if (list_empty(&vlistener->accept_queue))
392 return NULL;
393
394 vconnected = list_entry(vlistener->accept_queue.next,
395 struct vsock_sock, accept_queue);
396
397 list_del_init(&vconnected->accept_queue);
398 sock_put(listener);
399 /* The caller will need a reference on the connected socket so we let
400 * it call sock_put().
401 */
402
403 return sk_vsock(vconnected);
404}
405
406static bool vsock_is_accept_queue_empty(struct sock *sk)
407{
408 struct vsock_sock *vsk = vsock_sk(sk);
409 return list_empty(&vsk->accept_queue);
410}
411
412static bool vsock_is_pending(struct sock *sk)
413{
414 struct vsock_sock *vsk = vsock_sk(sk);
415 return !list_empty(&vsk->pending_links);
416}
417
418static int vsock_send_shutdown(struct sock *sk, int mode)
419{
420 return transport->shutdown(vsock_sk(sk), mode);
421}
422
423void vsock_pending_work(struct work_struct *work)
424{
425 struct sock *sk;
426 struct sock *listener;
427 struct vsock_sock *vsk;
428 bool cleanup;
429
430 vsk = container_of(work, struct vsock_sock, dwork.work);
431 sk = sk_vsock(vsk);
432 listener = vsk->listener;
433 cleanup = true;
434
435 lock_sock(listener);
436 lock_sock(sk);
437
438 if (vsock_is_pending(sk)) {
439 vsock_remove_pending(listener, sk);
440 } else if (!vsk->rejected) {
441 /* We are not on the pending list and accept() did not reject
442 * us, so we must have been accepted by our user process. We
443 * just need to drop our references to the sockets and be on
444 * our way.
445 */
446 cleanup = false;
447 goto out;
448 }
449
450 listener->sk_ack_backlog--;
451
452 /* We need to remove ourself from the global connected sockets list so
453 * incoming packets can't find this socket, and to reduce the reference
454 * count.
455 */
456 if (vsock_in_connected_table(vsk))
457 vsock_remove_connected(vsk);
458
459 sk->sk_state = SS_FREE;
460
461out:
462 release_sock(sk);
463 release_sock(listener);
464 if (cleanup)
465 sock_put(sk);
466
467 sock_put(sk);
468 sock_put(listener);
469}
470EXPORT_SYMBOL_GPL(vsock_pending_work);
471
472/**** SOCKET OPERATIONS ****/
473
474static int __vsock_bind_stream(struct vsock_sock *vsk,
475 struct sockaddr_vm *addr)
476{
477 static u32 port = LAST_RESERVED_PORT + 1;
478 struct sockaddr_vm new_addr;
479
480 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
481
482 if (addr->svm_port == VMADDR_PORT_ANY) {
483 bool found = false;
484 unsigned int i;
485
486 for (i = 0; i < MAX_PORT_RETRIES; i++) {
487 if (port <= LAST_RESERVED_PORT)
488 port = LAST_RESERVED_PORT + 1;
489
490 new_addr.svm_port = port++;
491
492 if (!__vsock_find_bound_socket(&new_addr)) {
493 found = true;
494 break;
495 }
496 }
497
498 if (!found)
499 return -EADDRNOTAVAIL;
500 } else {
501 /* If port is in reserved range, ensure caller
502 * has necessary privileges.
503 */
504 if (addr->svm_port <= LAST_RESERVED_PORT &&
505 !capable(CAP_NET_BIND_SERVICE)) {
506 return -EACCES;
507 }
508
509 if (__vsock_find_bound_socket(&new_addr))
510 return -EADDRINUSE;
511 }
512
513 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
514
515 /* Remove stream sockets from the unbound list and add them to the hash
516 * table for easy lookup by its address. The unbound list is simply an
517 * extra entry at the end of the hash table, a trick used by AF_UNIX.
518 */
519 __vsock_remove_bound(vsk);
520 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
521
522 return 0;
523}
524
525static int __vsock_bind_dgram(struct vsock_sock *vsk,
526 struct sockaddr_vm *addr)
527{
528 return transport->dgram_bind(vsk, addr);
529}
530
531static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
532{
533 struct vsock_sock *vsk = vsock_sk(sk);
534 u32 cid;
535 int retval;
536
537 /* First ensure this socket isn't already bound. */
538 if (vsock_addr_bound(&vsk->local_addr))
539 return -EINVAL;
540
541 /* Now bind to the provided address or select appropriate values if
542 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
543 * like AF_INET prevents binding to a non-local IP address (in most
544 * cases), we only allow binding to the local CID.
545 */
546 cid = transport->get_local_cid();
547 if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
548 return -EADDRNOTAVAIL;
549
550 switch (sk->sk_socket->type) {
551 case SOCK_STREAM:
552 spin_lock_bh(&vsock_table_lock);
553 retval = __vsock_bind_stream(vsk, addr);
554 spin_unlock_bh(&vsock_table_lock);
555 break;
556
557 case SOCK_DGRAM:
558 retval = __vsock_bind_dgram(vsk, addr);
559 break;
560
561 default:
562 retval = -EINVAL;
563 break;
564 }
565
566 return retval;
567}
568
569struct sock *__vsock_create(struct net *net,
570 struct socket *sock,
571 struct sock *parent,
572 gfp_t priority,
573 unsigned short type)
574{
575 struct sock *sk;
576 struct vsock_sock *psk;
577 struct vsock_sock *vsk;
578
579 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto);
580 if (!sk)
581 return NULL;
582
583 sock_init_data(sock, sk);
584
585 /* sk->sk_type is normally set in sock_init_data, but only if sock is
586 * non-NULL. We make sure that our sockets always have a type by
587 * setting it here if needed.
588 */
589 if (!sock)
590 sk->sk_type = type;
591
592 vsk = vsock_sk(sk);
593 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
594 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
595
596 sk->sk_destruct = vsock_sk_destruct;
597 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
598 sk->sk_state = 0;
599 sock_reset_flag(sk, SOCK_DONE);
600
601 INIT_LIST_HEAD(&vsk->bound_table);
602 INIT_LIST_HEAD(&vsk->connected_table);
603 vsk->listener = NULL;
604 INIT_LIST_HEAD(&vsk->pending_links);
605 INIT_LIST_HEAD(&vsk->accept_queue);
606 vsk->rejected = false;
607 vsk->sent_request = false;
608 vsk->ignore_connecting_rst = false;
609 vsk->peer_shutdown = 0;
610
611 psk = parent ? vsock_sk(parent) : NULL;
612 if (parent) {
613 vsk->trusted = psk->trusted;
614 vsk->owner = get_cred(psk->owner);
615 vsk->connect_timeout = psk->connect_timeout;
616 } else {
617 vsk->trusted = capable(CAP_NET_ADMIN);
618 vsk->owner = get_current_cred();
619 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
620 }
621
622 if (transport->init(vsk, psk) < 0) {
623 sk_free(sk);
624 return NULL;
625 }
626
627 if (sock)
628 vsock_insert_unbound(vsk);
629
630 return sk;
631}
632EXPORT_SYMBOL_GPL(__vsock_create);
633
634static void __vsock_release(struct sock *sk)
635{
636 if (sk) {
637 struct sk_buff *skb;
638 struct sock *pending;
639 struct vsock_sock *vsk;
640
641 vsk = vsock_sk(sk);
642 pending = NULL; /* Compiler warning. */
643
644 if (vsock_in_bound_table(vsk))
645 vsock_remove_bound(vsk);
646
647 if (vsock_in_connected_table(vsk))
648 vsock_remove_connected(vsk);
649
650 transport->release(vsk);
651
652 lock_sock(sk);
653 sock_orphan(sk);
654 sk->sk_shutdown = SHUTDOWN_MASK;
655
656 while ((skb = skb_dequeue(&sk->sk_receive_queue)))
657 kfree_skb(skb);
658
659 /* Clean up any sockets that never were accepted. */
660 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
661 __vsock_release(pending);
662 sock_put(pending);
663 }
664
665 release_sock(sk);
666 sock_put(sk);
667 }
668}
669
670static void vsock_sk_destruct(struct sock *sk)
671{
672 struct vsock_sock *vsk = vsock_sk(sk);
673
674 transport->destruct(vsk);
675
676 /* When clearing these addresses, there's no need to set the family and
677 * possibly register the address family with the kernel.
678 */
679 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
680 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
681
682 put_cred(vsk->owner);
683}
684
685static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
686{
687 int err;
688
689 err = sock_queue_rcv_skb(sk, skb);
690 if (err)
691 kfree_skb(skb);
692
693 return err;
694}
695
696s64 vsock_stream_has_data(struct vsock_sock *vsk)
697{
698 return transport->stream_has_data(vsk);
699}
700EXPORT_SYMBOL_GPL(vsock_stream_has_data);
701
702s64 vsock_stream_has_space(struct vsock_sock *vsk)
703{
704 return transport->stream_has_space(vsk);
705}
706EXPORT_SYMBOL_GPL(vsock_stream_has_space);
707
708static int vsock_release(struct socket *sock)
709{
710 __vsock_release(sock->sk);
711 sock->sk = NULL;
712 sock->state = SS_FREE;
713
714 return 0;
715}
716
717static int
718vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
719{
720 int err;
721 struct sock *sk;
722 struct sockaddr_vm *vm_addr;
723
724 sk = sock->sk;
725
726 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
727 return -EINVAL;
728
729 lock_sock(sk);
730 err = __vsock_bind(sk, vm_addr);
731 release_sock(sk);
732
733 return err;
734}
735
736static int vsock_getname(struct socket *sock,
737 struct sockaddr *addr, int *addr_len, int peer)
738{
739 int err;
740 struct sock *sk;
741 struct vsock_sock *vsk;
742 struct sockaddr_vm *vm_addr;
743
744 sk = sock->sk;
745 vsk = vsock_sk(sk);
746 err = 0;
747
748 lock_sock(sk);
749
750 if (peer) {
751 if (sock->state != SS_CONNECTED) {
752 err = -ENOTCONN;
753 goto out;
754 }
755 vm_addr = &vsk->remote_addr;
756 } else {
757 vm_addr = &vsk->local_addr;
758 }
759
760 if (!vm_addr) {
761 err = -EINVAL;
762 goto out;
763 }
764
765 /* sys_getsockname() and sys_getpeername() pass us a
766 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
767 * that macro is defined in socket.c instead of .h, so we hardcode its
768 * value here.
769 */
770 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
771 memcpy(addr, vm_addr, sizeof(*vm_addr));
772 *addr_len = sizeof(*vm_addr);
773
774out:
775 release_sock(sk);
776 return err;
777}
778
779static int vsock_shutdown(struct socket *sock, int mode)
780{
781 int err;
782 struct sock *sk;
783
784 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
785 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
786 * here like the other address families do. Note also that the
787 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
788 * which is what we want.
789 */
790 mode++;
791
792 if ((mode & ~SHUTDOWN_MASK) || !mode)
793 return -EINVAL;
794
795 /* If this is a STREAM socket and it is not connected then bail out
796 * immediately. If it is a DGRAM socket then we must first kick the
797 * socket so that it wakes up from any sleeping calls, for example
798 * recv(), and then afterwards return the error.
799 */
800
801 sk = sock->sk;
802 if (sock->state == SS_UNCONNECTED) {
803 err = -ENOTCONN;
804 if (sk->sk_type == SOCK_STREAM)
805 return err;
806 } else {
807 sock->state = SS_DISCONNECTING;
808 err = 0;
809 }
810
811 /* Receive and send shutdowns are treated alike. */
812 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
813 if (mode) {
814 lock_sock(sk);
815 sk->sk_shutdown |= mode;
816 sk->sk_state_change(sk);
817 release_sock(sk);
818
819 if (sk->sk_type == SOCK_STREAM) {
820 sock_reset_flag(sk, SOCK_DONE);
821 vsock_send_shutdown(sk, mode);
822 }
823 }
824
825 return err;
826}
827
828static unsigned int vsock_poll(struct file *file, struct socket *sock,
829 poll_table *wait)
830{
831 struct sock *sk;
832 unsigned int mask;
833 struct vsock_sock *vsk;
834
835 sk = sock->sk;
836 vsk = vsock_sk(sk);
837
838 poll_wait(file, sk_sleep(sk), wait);
839 mask = 0;
840
841 if (sk->sk_err)
842 /* Signify that there has been an error on this socket. */
843 mask |= POLLERR;
844
845 /* INET sockets treat local write shutdown and peer write shutdown as a
846 * case of POLLHUP set.
847 */
848 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
849 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
850 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
851 mask |= POLLHUP;
852 }
853
854 if (sk->sk_shutdown & RCV_SHUTDOWN ||
855 vsk->peer_shutdown & SEND_SHUTDOWN) {
856 mask |= POLLRDHUP;
857 }
858
859 if (sock->type == SOCK_DGRAM) {
860 /* For datagram sockets we can read if there is something in
861 * the queue and write as long as the socket isn't shutdown for
862 * sending.
863 */
864 if (!skb_queue_empty(&sk->sk_receive_queue) ||
865 (sk->sk_shutdown & RCV_SHUTDOWN)) {
866 mask |= POLLIN | POLLRDNORM;
867 }
868
869 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
870 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
871
872 } else if (sock->type == SOCK_STREAM) {
873 lock_sock(sk);
874
875 /* Listening sockets that have connections in their accept
876 * queue can be read.
877 */
878 if (sk->sk_state == SS_LISTEN
879 && !vsock_is_accept_queue_empty(sk))
880 mask |= POLLIN | POLLRDNORM;
881
882 /* If there is something in the queue then we can read. */
883 if (transport->stream_is_active(vsk) &&
884 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
885 bool data_ready_now = false;
886 int ret = transport->notify_poll_in(
887 vsk, 1, &data_ready_now);
888 if (ret < 0) {
889 mask |= POLLERR;
890 } else {
891 if (data_ready_now)
892 mask |= POLLIN | POLLRDNORM;
893
894 }
895 }
896
897 /* Sockets whose connections have been closed, reset, or
898 * terminated should also be considered read, and we check the
899 * shutdown flag for that.
900 */
901 if (sk->sk_shutdown & RCV_SHUTDOWN ||
902 vsk->peer_shutdown & SEND_SHUTDOWN) {
903 mask |= POLLIN | POLLRDNORM;
904 }
905
906 /* Connected sockets that can produce data can be written. */
907 if (sk->sk_state == SS_CONNECTED) {
908 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
909 bool space_avail_now = false;
910 int ret = transport->notify_poll_out(
911 vsk, 1, &space_avail_now);
912 if (ret < 0) {
913 mask |= POLLERR;
914 } else {
915 if (space_avail_now)
916 /* Remove POLLWRBAND since INET
917 * sockets are not setting it.
918 */
919 mask |= POLLOUT | POLLWRNORM;
920
921 }
922 }
923 }
924
925 /* Simulate INET socket poll behaviors, which sets
926 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
927 * but local send is not shutdown.
928 */
929 if (sk->sk_state == SS_UNCONNECTED) {
930 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
931 mask |= POLLOUT | POLLWRNORM;
932
933 }
934
935 release_sock(sk);
936 }
937
938 return mask;
939}
940
941static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
942 struct msghdr *msg, size_t len)
943{
944 int err;
945 struct sock *sk;
946 struct vsock_sock *vsk;
947 struct sockaddr_vm *remote_addr;
948
949 if (msg->msg_flags & MSG_OOB)
950 return -EOPNOTSUPP;
951
952 /* For now, MSG_DONTWAIT is always assumed... */
953 err = 0;
954 sk = sock->sk;
955 vsk = vsock_sk(sk);
956
957 lock_sock(sk);
958
959 if (!vsock_addr_bound(&vsk->local_addr)) {
960 struct sockaddr_vm local_addr;
961
962 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
963 err = __vsock_bind(sk, &local_addr);
964 if (err != 0)
965 goto out;
966
967 }
968
969 /* If the provided message contains an address, use that. Otherwise
970 * fall back on the socket's remote handle (if it has been connected).
971 */
972 if (msg->msg_name &&
973 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
974 &remote_addr) == 0) {
975 /* Ensure this address is of the right type and is a valid
976 * destination.
977 */
978
979 if (remote_addr->svm_cid == VMADDR_CID_ANY)
980 remote_addr->svm_cid = transport->get_local_cid();
981
982 if (!vsock_addr_bound(remote_addr)) {
983 err = -EINVAL;
984 goto out;
985 }
986 } else if (sock->state == SS_CONNECTED) {
987 remote_addr = &vsk->remote_addr;
988
989 if (remote_addr->svm_cid == VMADDR_CID_ANY)
990 remote_addr->svm_cid = transport->get_local_cid();
991
992 /* XXX Should connect() or this function ensure remote_addr is
993 * bound?
994 */
995 if (!vsock_addr_bound(&vsk->remote_addr)) {
996 err = -EINVAL;
997 goto out;
998 }
999 } else {
1000 err = -EINVAL;
1001 goto out;
1002 }
1003
1004 if (!transport->dgram_allow(remote_addr->svm_cid,
1005 remote_addr->svm_port)) {
1006 err = -EINVAL;
1007 goto out;
1008 }
1009
1010 err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len);
1011
1012out:
1013 release_sock(sk);
1014 return err;
1015}
1016
1017static int vsock_dgram_connect(struct socket *sock,
1018 struct sockaddr *addr, int addr_len, int flags)
1019{
1020 int err;
1021 struct sock *sk;
1022 struct vsock_sock *vsk;
1023 struct sockaddr_vm *remote_addr;
1024
1025 sk = sock->sk;
1026 vsk = vsock_sk(sk);
1027
1028 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1029 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1030 lock_sock(sk);
1031 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1032 VMADDR_PORT_ANY);
1033 sock->state = SS_UNCONNECTED;
1034 release_sock(sk);
1035 return 0;
1036 } else if (err != 0)
1037 return -EINVAL;
1038
1039 lock_sock(sk);
1040
1041 if (!vsock_addr_bound(&vsk->local_addr)) {
1042 struct sockaddr_vm local_addr;
1043
1044 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
1045 err = __vsock_bind(sk, &local_addr);
1046 if (err != 0)
1047 goto out;
1048
1049 }
1050
1051 if (!transport->dgram_allow(remote_addr->svm_cid,
1052 remote_addr->svm_port)) {
1053 err = -EINVAL;
1054 goto out;
1055 }
1056
1057 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1058 sock->state = SS_CONNECTED;
1059
1060out:
1061 release_sock(sk);
1062 return err;
1063}
1064
1065static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock,
1066 struct msghdr *msg, size_t len, int flags)
1067{
1068 return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len,
1069 flags);
1070}
1071
1072static const struct proto_ops vsock_dgram_ops = {
1073 .family = PF_VSOCK,
1074 .owner = THIS_MODULE,
1075 .release = vsock_release,
1076 .bind = vsock_bind,
1077 .connect = vsock_dgram_connect,
1078 .socketpair = sock_no_socketpair,
1079 .accept = sock_no_accept,
1080 .getname = vsock_getname,
1081 .poll = vsock_poll,
1082 .ioctl = sock_no_ioctl,
1083 .listen = sock_no_listen,
1084 .shutdown = vsock_shutdown,
1085 .setsockopt = sock_no_setsockopt,
1086 .getsockopt = sock_no_getsockopt,
1087 .sendmsg = vsock_dgram_sendmsg,
1088 .recvmsg = vsock_dgram_recvmsg,
1089 .mmap = sock_no_mmap,
1090 .sendpage = sock_no_sendpage,
1091};
1092
1093static void vsock_connect_timeout(struct work_struct *work)
1094{
1095 struct sock *sk;
1096 struct vsock_sock *vsk;
1097
1098 vsk = container_of(work, struct vsock_sock, dwork.work);
1099 sk = sk_vsock(vsk);
1100
1101 lock_sock(sk);
1102 if (sk->sk_state == SS_CONNECTING &&
1103 (sk->sk_shutdown != SHUTDOWN_MASK)) {
1104 sk->sk_state = SS_UNCONNECTED;
1105 sk->sk_err = ETIMEDOUT;
1106 sk->sk_error_report(sk);
1107 }
1108 release_sock(sk);
1109
1110 sock_put(sk);
1111}
1112
1113static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1114 int addr_len, int flags)
1115{
1116 int err;
1117 struct sock *sk;
1118 struct vsock_sock *vsk;
1119 struct sockaddr_vm *remote_addr;
1120 long timeout;
1121 DEFINE_WAIT(wait);
1122
1123 err = 0;
1124 sk = sock->sk;
1125 vsk = vsock_sk(sk);
1126
1127 lock_sock(sk);
1128
1129 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1130 switch (sock->state) {
1131 case SS_CONNECTED:
1132 err = -EISCONN;
1133 goto out;
1134 case SS_DISCONNECTING:
1135 err = -EINVAL;
1136 goto out;
1137 case SS_CONNECTING:
1138 /* This continues on so we can move sock into the SS_CONNECTED
1139 * state once the connection has completed (at which point err
1140 * will be set to zero also). Otherwise, we will either wait
1141 * for the connection or return -EALREADY should this be a
1142 * non-blocking call.
1143 */
1144 err = -EALREADY;
1145 break;
1146 default:
1147 if ((sk->sk_state == SS_LISTEN) ||
1148 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1149 err = -EINVAL;
1150 goto out;
1151 }
1152
1153 /* The hypervisor and well-known contexts do not have socket
1154 * endpoints.
1155 */
1156 if (!transport->stream_allow(remote_addr->svm_cid,
1157 remote_addr->svm_port)) {
1158 err = -ENETUNREACH;
1159 goto out;
1160 }
1161
1162 /* Set the remote address that we are connecting to. */
1163 memcpy(&vsk->remote_addr, remote_addr,
1164 sizeof(vsk->remote_addr));
1165
1166 /* Autobind this socket to the local address if necessary. */
1167 if (!vsock_addr_bound(&vsk->local_addr)) {
1168 struct sockaddr_vm local_addr;
1169
1170 vsock_addr_init(&local_addr, VMADDR_CID_ANY,
1171 VMADDR_PORT_ANY);
1172 err = __vsock_bind(sk, &local_addr);
1173 if (err != 0)
1174 goto out;
1175
1176 }
1177
1178 sk->sk_state = SS_CONNECTING;
1179
1180 err = transport->connect(vsk);
1181 if (err < 0)
1182 goto out;
1183
1184 /* Mark sock as connecting and set the error code to in
1185 * progress in case this is a non-blocking connect.
1186 */
1187 sock->state = SS_CONNECTING;
1188 err = -EINPROGRESS;
1189 }
1190
1191 /* The receive path will handle all communication until we are able to
1192 * enter the connected state. Here we wait for the connection to be
1193 * completed or a notification of an error.
1194 */
1195 timeout = vsk->connect_timeout;
1196 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1197
1198 while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
1199 if (flags & O_NONBLOCK) {
1200 /* If we're not going to block, we schedule a timeout
1201 * function to generate a timeout on the connection
1202 * attempt, in case the peer doesn't respond in a
1203 * timely manner. We hold on to the socket until the
1204 * timeout fires.
1205 */
1206 sock_hold(sk);
1207 INIT_DELAYED_WORK(&vsk->dwork,
1208 vsock_connect_timeout);
1209 schedule_delayed_work(&vsk->dwork, timeout);
1210
1211 /* Skip ahead to preserve error code set above. */
1212 goto out_wait;
1213 }
1214
1215 release_sock(sk);
1216 timeout = schedule_timeout(timeout);
1217 lock_sock(sk);
1218
1219 if (signal_pending(current)) {
1220 err = sock_intr_errno(timeout);
1221 goto out_wait_error;
1222 } else if (timeout == 0) {
1223 err = -ETIMEDOUT;
1224 goto out_wait_error;
1225 }
1226
1227 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1228 }
1229
1230 if (sk->sk_err) {
1231 err = -sk->sk_err;
1232 goto out_wait_error;
1233 } else
1234 err = 0;
1235
1236out_wait:
1237 finish_wait(sk_sleep(sk), &wait);
1238out:
1239 release_sock(sk);
1240 return err;
1241
1242out_wait_error:
1243 sk->sk_state = SS_UNCONNECTED;
1244 sock->state = SS_UNCONNECTED;
1245 goto out_wait;
1246}
1247
1248static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
1249{
1250 struct sock *listener;
1251 int err;
1252 struct sock *connected;
1253 struct vsock_sock *vconnected;
1254 long timeout;
1255 DEFINE_WAIT(wait);
1256
1257 err = 0;
1258 listener = sock->sk;
1259
1260 lock_sock(listener);
1261
1262 if (sock->type != SOCK_STREAM) {
1263 err = -EOPNOTSUPP;
1264 goto out;
1265 }
1266
1267 if (listener->sk_state != SS_LISTEN) {
1268 err = -EINVAL;
1269 goto out;
1270 }
1271
1272 /* Wait for children sockets to appear; these are the new sockets
1273 * created upon connection establishment.
1274 */
1275 timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
1276 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1277
1278 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1279 listener->sk_err == 0) {
1280 release_sock(listener);
1281 timeout = schedule_timeout(timeout);
1282 lock_sock(listener);
1283
1284 if (signal_pending(current)) {
1285 err = sock_intr_errno(timeout);
1286 goto out_wait;
1287 } else if (timeout == 0) {
1288 err = -EAGAIN;
1289 goto out_wait;
1290 }
1291
1292 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1293 }
1294
1295 if (listener->sk_err)
1296 err = -listener->sk_err;
1297
1298 if (connected) {
1299 listener->sk_ack_backlog--;
1300
1301 lock_sock(connected);
1302 vconnected = vsock_sk(connected);
1303
1304 /* If the listener socket has received an error, then we should
1305 * reject this socket and return. Note that we simply mark the
1306 * socket rejected, drop our reference, and let the cleanup
1307 * function handle the cleanup; the fact that we found it in
1308 * the listener's accept queue guarantees that the cleanup
1309 * function hasn't run yet.
1310 */
1311 if (err) {
1312 vconnected->rejected = true;
1313 release_sock(connected);
1314 sock_put(connected);
1315 goto out_wait;
1316 }
1317
1318 newsock->state = SS_CONNECTED;
1319 sock_graft(connected, newsock);
1320 release_sock(connected);
1321 sock_put(connected);
1322 }
1323
1324out_wait:
1325 finish_wait(sk_sleep(listener), &wait);
1326out:
1327 release_sock(listener);
1328 return err;
1329}
1330
1331static int vsock_listen(struct socket *sock, int backlog)
1332{
1333 int err;
1334 struct sock *sk;
1335 struct vsock_sock *vsk;
1336
1337 sk = sock->sk;
1338
1339 lock_sock(sk);
1340
1341 if (sock->type != SOCK_STREAM) {
1342 err = -EOPNOTSUPP;
1343 goto out;
1344 }
1345
1346 if (sock->state != SS_UNCONNECTED) {
1347 err = -EINVAL;
1348 goto out;
1349 }
1350
1351 vsk = vsock_sk(sk);
1352
1353 if (!vsock_addr_bound(&vsk->local_addr)) {
1354 err = -EINVAL;
1355 goto out;
1356 }
1357
1358 sk->sk_max_ack_backlog = backlog;
1359 sk->sk_state = SS_LISTEN;
1360
1361 err = 0;
1362
1363out:
1364 release_sock(sk);
1365 return err;
1366}
1367
1368static int vsock_stream_setsockopt(struct socket *sock,
1369 int level,
1370 int optname,
1371 char __user *optval,
1372 unsigned int optlen)
1373{
1374 int err;
1375 struct sock *sk;
1376 struct vsock_sock *vsk;
1377 u64 val;
1378
1379 if (level != AF_VSOCK)
1380 return -ENOPROTOOPT;
1381
1382#define COPY_IN(_v) \
1383 do { \
1384 if (optlen < sizeof(_v)) { \
1385 err = -EINVAL; \
1386 goto exit; \
1387 } \
1388 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
1389 err = -EFAULT; \
1390 goto exit; \
1391 } \
1392 } while (0)
1393
1394 err = 0;
1395 sk = sock->sk;
1396 vsk = vsock_sk(sk);
1397
1398 lock_sock(sk);
1399
1400 switch (optname) {
1401 case SO_VM_SOCKETS_BUFFER_SIZE:
1402 COPY_IN(val);
1403 transport->set_buffer_size(vsk, val);
1404 break;
1405
1406 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1407 COPY_IN(val);
1408 transport->set_max_buffer_size(vsk, val);
1409 break;
1410
1411 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1412 COPY_IN(val);
1413 transport->set_min_buffer_size(vsk, val);
1414 break;
1415
1416 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1417 struct timeval tv;
1418 COPY_IN(tv);
1419 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1420 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1421 vsk->connect_timeout = tv.tv_sec * HZ +
1422 DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
1423 if (vsk->connect_timeout == 0)
1424 vsk->connect_timeout =
1425 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1426
1427 } else {
1428 err = -ERANGE;
1429 }
1430 break;
1431 }
1432
1433 default:
1434 err = -ENOPROTOOPT;
1435 break;
1436 }
1437
1438#undef COPY_IN
1439
1440exit:
1441 release_sock(sk);
1442 return err;
1443}
1444
1445static int vsock_stream_getsockopt(struct socket *sock,
1446 int level, int optname,
1447 char __user *optval,
1448 int __user *optlen)
1449{
1450 int err;
1451 int len;
1452 struct sock *sk;
1453 struct vsock_sock *vsk;
1454 u64 val;
1455
1456 if (level != AF_VSOCK)
1457 return -ENOPROTOOPT;
1458
1459 err = get_user(len, optlen);
1460 if (err != 0)
1461 return err;
1462
1463#define COPY_OUT(_v) \
1464 do { \
1465 if (len < sizeof(_v)) \
1466 return -EINVAL; \
1467 \
1468 len = sizeof(_v); \
1469 if (copy_to_user(optval, &_v, len) != 0) \
1470 return -EFAULT; \
1471 \
1472 } while (0)
1473
1474 err = 0;
1475 sk = sock->sk;
1476 vsk = vsock_sk(sk);
1477
1478 switch (optname) {
1479 case SO_VM_SOCKETS_BUFFER_SIZE:
1480 val = transport->get_buffer_size(vsk);
1481 COPY_OUT(val);
1482 break;
1483
1484 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1485 val = transport->get_max_buffer_size(vsk);
1486 COPY_OUT(val);
1487 break;
1488
1489 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1490 val = transport->get_min_buffer_size(vsk);
1491 COPY_OUT(val);
1492 break;
1493
1494 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1495 struct timeval tv;
1496 tv.tv_sec = vsk->connect_timeout / HZ;
1497 tv.tv_usec =
1498 (vsk->connect_timeout -
1499 tv.tv_sec * HZ) * (1000000 / HZ);
1500 COPY_OUT(tv);
1501 break;
1502 }
1503 default:
1504 return -ENOPROTOOPT;
1505 }
1506
1507 err = put_user(len, optlen);
1508 if (err != 0)
1509 return -EFAULT;
1510
1511#undef COPY_OUT
1512
1513 return 0;
1514}
1515
1516static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1517 struct msghdr *msg, size_t len)
1518{
1519 struct sock *sk;
1520 struct vsock_sock *vsk;
1521 ssize_t total_written;
1522 long timeout;
1523 int err;
1524 struct vsock_transport_send_notify_data send_data;
1525
1526 DEFINE_WAIT(wait);
1527
1528 sk = sock->sk;
1529 vsk = vsock_sk(sk);
1530 total_written = 0;
1531 err = 0;
1532
1533 if (msg->msg_flags & MSG_OOB)
1534 return -EOPNOTSUPP;
1535
1536 lock_sock(sk);
1537
1538 /* Callers should not provide a destination with stream sockets. */
1539 if (msg->msg_namelen) {
1540 err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP;
1541 goto out;
1542 }
1543
1544 /* Send data only if both sides are not shutdown in the direction. */
1545 if (sk->sk_shutdown & SEND_SHUTDOWN ||
1546 vsk->peer_shutdown & RCV_SHUTDOWN) {
1547 err = -EPIPE;
1548 goto out;
1549 }
1550
1551 if (sk->sk_state != SS_CONNECTED ||
1552 !vsock_addr_bound(&vsk->local_addr)) {
1553 err = -ENOTCONN;
1554 goto out;
1555 }
1556
1557 if (!vsock_addr_bound(&vsk->remote_addr)) {
1558 err = -EDESTADDRREQ;
1559 goto out;
1560 }
1561
1562 /* Wait for room in the produce queue to enqueue our user's data. */
1563 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1564
1565 err = transport->notify_send_init(vsk, &send_data);
1566 if (err < 0)
1567 goto out;
1568
1569 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1570
1571 while (total_written < len) {
1572 ssize_t written;
1573
1574 while (vsock_stream_has_space(vsk) == 0 &&
1575 sk->sk_err == 0 &&
1576 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1577 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1578
1579 /* Don't wait for non-blocking sockets. */
1580 if (timeout == 0) {
1581 err = -EAGAIN;
1582 goto out_wait;
1583 }
1584
1585 err = transport->notify_send_pre_block(vsk, &send_data);
1586 if (err < 0)
1587 goto out_wait;
1588
1589 release_sock(sk);
1590 timeout = schedule_timeout(timeout);
1591 lock_sock(sk);
1592 if (signal_pending(current)) {
1593 err = sock_intr_errno(timeout);
1594 goto out_wait;
1595 } else if (timeout == 0) {
1596 err = -EAGAIN;
1597 goto out_wait;
1598 }
1599
1600 prepare_to_wait(sk_sleep(sk), &wait,
1601 TASK_INTERRUPTIBLE);
1602 }
1603
1604 /* These checks occur both as part of and after the loop
1605 * conditional since we need to check before and after
1606 * sleeping.
1607 */
1608 if (sk->sk_err) {
1609 err = -sk->sk_err;
1610 goto out_wait;
1611 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1612 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1613 err = -EPIPE;
1614 goto out_wait;
1615 }
1616
1617 err = transport->notify_send_pre_enqueue(vsk, &send_data);
1618 if (err < 0)
1619 goto out_wait;
1620
1621 /* Note that enqueue will only write as many bytes as are free
1622 * in the produce queue, so we don't need to ensure len is
1623 * smaller than the queue size. It is the caller's
1624 * responsibility to check how many bytes we were able to send.
1625 */
1626
1627 written = transport->stream_enqueue(
1628 vsk, msg->msg_iov,
1629 len - total_written);
1630 if (written < 0) {
1631 err = -ENOMEM;
1632 goto out_wait;
1633 }
1634
1635 total_written += written;
1636
1637 err = transport->notify_send_post_enqueue(
1638 vsk, written, &send_data);
1639 if (err < 0)
1640 goto out_wait;
1641
1642 }
1643
1644out_wait:
1645 if (total_written > 0)
1646 err = total_written;
1647 finish_wait(sk_sleep(sk), &wait);
1648out:
1649 release_sock(sk);
1650 return err;
1651}
1652
1653
1654static int
1655vsock_stream_recvmsg(struct kiocb *kiocb,
1656 struct socket *sock,
1657 struct msghdr *msg, size_t len, int flags)
1658{
1659 struct sock *sk;
1660 struct vsock_sock *vsk;
1661 int err;
1662 size_t target;
1663 ssize_t copied;
1664 long timeout;
1665 struct vsock_transport_recv_notify_data recv_data;
1666
1667 DEFINE_WAIT(wait);
1668
1669 sk = sock->sk;
1670 vsk = vsock_sk(sk);
1671 err = 0;
1672
1673 lock_sock(sk);
1674
1675 if (sk->sk_state != SS_CONNECTED) {
1676 /* Recvmsg is supposed to return 0 if a peer performs an
1677 * orderly shutdown. Differentiate between that case and when a
1678 * peer has not connected or a local shutdown occured with the
1679 * SOCK_DONE flag.
1680 */
1681 if (sock_flag(sk, SOCK_DONE))
1682 err = 0;
1683 else
1684 err = -ENOTCONN;
1685
1686 goto out;
1687 }
1688
1689 if (flags & MSG_OOB) {
1690 err = -EOPNOTSUPP;
1691 goto out;
1692 }
1693
1694 /* We don't check peer_shutdown flag here since peer may actually shut
1695 * down, but there can be data in the queue that a local socket can
1696 * receive.
1697 */
1698 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1699 err = 0;
1700 goto out;
1701 }
1702
1703 /* It is valid on Linux to pass in a zero-length receive buffer. This
1704 * is not an error. We may as well bail out now.
1705 */
1706 if (!len) {
1707 err = 0;
1708 goto out;
1709 }
1710
1711 /* We must not copy less than target bytes into the user's buffer
1712 * before returning successfully, so we wait for the consume queue to
1713 * have that much data to consume before dequeueing. Note that this
1714 * makes it impossible to handle cases where target is greater than the
1715 * queue size.
1716 */
1717 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1718 if (target >= transport->stream_rcvhiwat(vsk)) {
1719 err = -ENOMEM;
1720 goto out;
1721 }
1722 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1723 copied = 0;
1724
1725 err = transport->notify_recv_init(vsk, target, &recv_data);
1726 if (err < 0)
1727 goto out;
1728
1729 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1730
1731 while (1) {
1732 s64 ready = vsock_stream_has_data(vsk);
1733
1734 if (ready < 0) {
1735 /* Invalid queue pair content. XXX This should be
1736 * changed to a connection reset in a later change.
1737 */
1738
1739 err = -ENOMEM;
1740 goto out_wait;
1741 } else if (ready > 0) {
1742 ssize_t read;
1743
1744 err = transport->notify_recv_pre_dequeue(
1745 vsk, target, &recv_data);
1746 if (err < 0)
1747 break;
1748
1749 read = transport->stream_dequeue(
1750 vsk, msg->msg_iov,
1751 len - copied, flags);
1752 if (read < 0) {
1753 err = -ENOMEM;
1754 break;
1755 }
1756
1757 copied += read;
1758
1759 err = transport->notify_recv_post_dequeue(
1760 vsk, target, read,
1761 !(flags & MSG_PEEK), &recv_data);
1762 if (err < 0)
1763 goto out_wait;
1764
1765 if (read >= target || flags & MSG_PEEK)
1766 break;
1767
1768 target -= read;
1769 } else {
1770 if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN)
1771 || (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1772 break;
1773 }
1774 /* Don't wait for non-blocking sockets. */
1775 if (timeout == 0) {
1776 err = -EAGAIN;
1777 break;
1778 }
1779
1780 err = transport->notify_recv_pre_block(
1781 vsk, target, &recv_data);
1782 if (err < 0)
1783 break;
1784
1785 release_sock(sk);
1786 timeout = schedule_timeout(timeout);
1787 lock_sock(sk);
1788
1789 if (signal_pending(current)) {
1790 err = sock_intr_errno(timeout);
1791 break;
1792 } else if (timeout == 0) {
1793 err = -EAGAIN;
1794 break;
1795 }
1796
1797 prepare_to_wait(sk_sleep(sk), &wait,
1798 TASK_INTERRUPTIBLE);
1799 }
1800 }
1801
1802 if (sk->sk_err)
1803 err = -sk->sk_err;
1804 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1805 err = 0;
1806
1807 if (copied > 0) {
1808 /* We only do these additional bookkeeping/notification steps
1809 * if we actually copied something out of the queue pair
1810 * instead of just peeking ahead.
1811 */
1812
1813 if (!(flags & MSG_PEEK)) {
1814 /* If the other side has shutdown for sending and there
1815 * is nothing more to read, then modify the socket
1816 * state.
1817 */
1818 if (vsk->peer_shutdown & SEND_SHUTDOWN) {
1819 if (vsock_stream_has_data(vsk) <= 0) {
1820 sk->sk_state = SS_UNCONNECTED;
1821 sock_set_flag(sk, SOCK_DONE);
1822 sk->sk_state_change(sk);
1823 }
1824 }
1825 }
1826 err = copied;
1827 }
1828
1829out_wait:
1830 finish_wait(sk_sleep(sk), &wait);
1831out:
1832 release_sock(sk);
1833 return err;
1834}
1835
1836static const struct proto_ops vsock_stream_ops = {
1837 .family = PF_VSOCK,
1838 .owner = THIS_MODULE,
1839 .release = vsock_release,
1840 .bind = vsock_bind,
1841 .connect = vsock_stream_connect,
1842 .socketpair = sock_no_socketpair,
1843 .accept = vsock_accept,
1844 .getname = vsock_getname,
1845 .poll = vsock_poll,
1846 .ioctl = sock_no_ioctl,
1847 .listen = vsock_listen,
1848 .shutdown = vsock_shutdown,
1849 .setsockopt = vsock_stream_setsockopt,
1850 .getsockopt = vsock_stream_getsockopt,
1851 .sendmsg = vsock_stream_sendmsg,
1852 .recvmsg = vsock_stream_recvmsg,
1853 .mmap = sock_no_mmap,
1854 .sendpage = sock_no_sendpage,
1855};
1856
1857static int vsock_create(struct net *net, struct socket *sock,
1858 int protocol, int kern)
1859{
1860 if (!sock)
1861 return -EINVAL;
1862
1863 if (protocol && protocol != PF_VSOCK)
1864 return -EPROTONOSUPPORT;
1865
1866 switch (sock->type) {
1867 case SOCK_DGRAM:
1868 sock->ops = &vsock_dgram_ops;
1869 break;
1870 case SOCK_STREAM:
1871 sock->ops = &vsock_stream_ops;
1872 break;
1873 default:
1874 return -ESOCKTNOSUPPORT;
1875 }
1876
1877 sock->state = SS_UNCONNECTED;
1878
1879 return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM;
1880}
1881
1882static const struct net_proto_family vsock_family_ops = {
1883 .family = AF_VSOCK,
1884 .create = vsock_create,
1885 .owner = THIS_MODULE,
1886};
1887
1888static long vsock_dev_do_ioctl(struct file *filp,
1889 unsigned int cmd, void __user *ptr)
1890{
1891 u32 __user *p = ptr;
1892 int retval = 0;
1893
1894 switch (cmd) {
1895 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
1896 if (put_user(transport->get_local_cid(), p) != 0)
1897 retval = -EFAULT;
1898 break;
1899
1900 default:
1901 pr_err("Unknown ioctl %d\n", cmd);
1902 retval = -EINVAL;
1903 }
1904
1905 return retval;
1906}
1907
1908static long vsock_dev_ioctl(struct file *filp,
1909 unsigned int cmd, unsigned long arg)
1910{
1911 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
1912}
1913
1914#ifdef CONFIG_COMPAT
1915static long vsock_dev_compat_ioctl(struct file *filp,
1916 unsigned int cmd, unsigned long arg)
1917{
1918 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
1919}
1920#endif
1921
1922static const struct file_operations vsock_device_ops = {
1923 .owner = THIS_MODULE,
1924 .unlocked_ioctl = vsock_dev_ioctl,
1925#ifdef CONFIG_COMPAT
1926 .compat_ioctl = vsock_dev_compat_ioctl,
1927#endif
1928 .open = nonseekable_open,
1929};
1930
1931static struct miscdevice vsock_device = {
1932 .name = "vsock",
1933 .minor = MISC_DYNAMIC_MINOR,
1934 .fops = &vsock_device_ops,
1935};
1936
1937static int __vsock_core_init(void)
1938{
1939 int err;
1940
1941 vsock_init_tables();
1942
1943 err = misc_register(&vsock_device);
1944 if (err) {
1945 pr_err("Failed to register misc device\n");
1946 return -ENOENT;
1947 }
1948
1949 err = proto_register(&vsock_proto, 1); /* we want our slab */
1950 if (err) {
1951 pr_err("Cannot register vsock protocol\n");
1952 goto err_misc_deregister;
1953 }
1954
1955 err = sock_register(&vsock_family_ops);
1956 if (err) {
1957 pr_err("could not register af_vsock (%d) address family: %d\n",
1958 AF_VSOCK, err);
1959 goto err_unregister_proto;
1960 }
1961
1962 return 0;
1963
1964err_unregister_proto:
1965 proto_unregister(&vsock_proto);
1966err_misc_deregister:
1967 misc_deregister(&vsock_device);
1968 return err;
1969}
1970
1971int vsock_core_init(const struct vsock_transport *t)
1972{
1973 int retval = mutex_lock_interruptible(&vsock_register_mutex);
1974 if (retval)
1975 return retval;
1976
1977 if (transport) {
1978 retval = -EBUSY;
1979 goto out;
1980 }
1981
1982 transport = t;
1983 retval = __vsock_core_init();
1984 if (retval)
1985 transport = NULL;
1986
1987out:
1988 mutex_unlock(&vsock_register_mutex);
1989 return retval;
1990}
1991EXPORT_SYMBOL_GPL(vsock_core_init);
1992
1993void vsock_core_exit(void)
1994{
1995 mutex_lock(&vsock_register_mutex);
1996
1997 misc_deregister(&vsock_device);
1998 sock_unregister(AF_VSOCK);
1999 proto_unregister(&vsock_proto);
2000
2001 /* We do not want the assignment below re-ordered. */
2002 mb();
2003 transport = NULL;
2004
2005 mutex_unlock(&vsock_register_mutex);
2006}
2007EXPORT_SYMBOL_GPL(vsock_core_exit);
2008
2009MODULE_AUTHOR("VMware, Inc.");
2010MODULE_DESCRIPTION("VMware Virtual Socket Family");
2011MODULE_VERSION("1.0.0.0-k");
2012MODULE_LICENSE("GPL v2");
diff --git a/net/vmw_vsock/af_vsock.h b/net/vmw_vsock/af_vsock.h
new file mode 100644
index 000000000000..7d64d3609ec9
--- /dev/null
+++ b/net/vmw_vsock/af_vsock.h
@@ -0,0 +1,175 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __AF_VSOCK_H__
17#define __AF_VSOCK_H__
18
19#include <linux/kernel.h>
20#include <linux/workqueue.h>
21#include <linux/vm_sockets.h>
22
23#include "vsock_addr.h"
24
25#define LAST_RESERVED_PORT 1023
26
27#define vsock_sk(__sk) ((struct vsock_sock *)__sk)
28#define sk_vsock(__vsk) (&(__vsk)->sk)
29
30struct vsock_sock {
31 /* sk must be the first member. */
32 struct sock sk;
33 struct sockaddr_vm local_addr;
34 struct sockaddr_vm remote_addr;
35 /* Links for the global tables of bound and connected sockets. */
36 struct list_head bound_table;
37 struct list_head connected_table;
38 /* Accessed without the socket lock held. This means it can never be
39 * modified outsided of socket create or destruct.
40 */
41 bool trusted;
42 bool cached_peer_allow_dgram; /* Dgram communication allowed to
43 * cached peer?
44 */
45 u32 cached_peer; /* Context ID of last dgram destination check. */
46 const struct cred *owner;
47 /* Rest are SOCK_STREAM only. */
48 long connect_timeout;
49 /* Listening socket that this came from. */
50 struct sock *listener;
51 /* Used for pending list and accept queue during connection handshake.
52 * The listening socket is the head for both lists. Sockets created
53 * for connection requests are placed in the pending list until they
54 * are connected, at which point they are put in the accept queue list
55 * so they can be accepted in accept(). If accept() cannot accept the
56 * connection, it is marked as rejected so the cleanup function knows
57 * to clean up the socket.
58 */
59 struct list_head pending_links;
60 struct list_head accept_queue;
61 bool rejected;
62 struct delayed_work dwork;
63 u32 peer_shutdown;
64 bool sent_request;
65 bool ignore_connecting_rst;
66
67 /* Private to transport. */
68 void *trans;
69};
70
71s64 vsock_stream_has_data(struct vsock_sock *vsk);
72s64 vsock_stream_has_space(struct vsock_sock *vsk);
73void vsock_pending_work(struct work_struct *work);
74struct sock *__vsock_create(struct net *net,
75 struct socket *sock,
76 struct sock *parent,
77 gfp_t priority, unsigned short type);
78
79/**** TRANSPORT ****/
80
81struct vsock_transport_recv_notify_data {
82 u64 data1; /* Transport-defined. */
83 u64 data2; /* Transport-defined. */
84 bool notify_on_block;
85};
86
87struct vsock_transport_send_notify_data {
88 u64 data1; /* Transport-defined. */
89 u64 data2; /* Transport-defined. */
90};
91
92struct vsock_transport {
93 /* Initialize/tear-down socket. */
94 int (*init)(struct vsock_sock *, struct vsock_sock *);
95 void (*destruct)(struct vsock_sock *);
96 void (*release)(struct vsock_sock *);
97
98 /* Connections. */
99 int (*connect)(struct vsock_sock *);
100
101 /* DGRAM. */
102 int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
103 int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
104 struct msghdr *msg, size_t len, int flags);
105 int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
106 struct iovec *, size_t len);
107 bool (*dgram_allow)(u32 cid, u32 port);
108
109 /* STREAM. */
110 /* TODO: stream_bind() */
111 ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *,
112 size_t len, int flags);
113 ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *,
114 size_t len);
115 s64 (*stream_has_data)(struct vsock_sock *);
116 s64 (*stream_has_space)(struct vsock_sock *);
117 u64 (*stream_rcvhiwat)(struct vsock_sock *);
118 bool (*stream_is_active)(struct vsock_sock *);
119 bool (*stream_allow)(u32 cid, u32 port);
120
121 /* Notification. */
122 int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
123 int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
124 int (*notify_recv_init)(struct vsock_sock *, size_t,
125 struct vsock_transport_recv_notify_data *);
126 int (*notify_recv_pre_block)(struct vsock_sock *, size_t,
127 struct vsock_transport_recv_notify_data *);
128 int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t,
129 struct vsock_transport_recv_notify_data *);
130 int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t,
131 ssize_t, bool, struct vsock_transport_recv_notify_data *);
132 int (*notify_send_init)(struct vsock_sock *,
133 struct vsock_transport_send_notify_data *);
134 int (*notify_send_pre_block)(struct vsock_sock *,
135 struct vsock_transport_send_notify_data *);
136 int (*notify_send_pre_enqueue)(struct vsock_sock *,
137 struct vsock_transport_send_notify_data *);
138 int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
139 struct vsock_transport_send_notify_data *);
140
141 /* Shutdown. */
142 int (*shutdown)(struct vsock_sock *, int);
143
144 /* Buffer sizes. */
145 void (*set_buffer_size)(struct vsock_sock *, u64);
146 void (*set_min_buffer_size)(struct vsock_sock *, u64);
147 void (*set_max_buffer_size)(struct vsock_sock *, u64);
148 u64 (*get_buffer_size)(struct vsock_sock *);
149 u64 (*get_min_buffer_size)(struct vsock_sock *);
150 u64 (*get_max_buffer_size)(struct vsock_sock *);
151
152 /* Addressing. */
153 u32 (*get_local_cid)(void);
154};
155
156/**** CORE ****/
157
158int vsock_core_init(const struct vsock_transport *t);
159void vsock_core_exit(void);
160
161/**** UTILS ****/
162
163void vsock_release_pending(struct sock *pending);
164void vsock_add_pending(struct sock *listener, struct sock *pending);
165void vsock_remove_pending(struct sock *listener, struct sock *pending);
166void vsock_enqueue_accept(struct sock *listener, struct sock *connected);
167void vsock_insert_connected(struct vsock_sock *vsk);
168void vsock_remove_bound(struct vsock_sock *vsk);
169void vsock_remove_connected(struct vsock_sock *vsk);
170struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
171struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
172 struct sockaddr_vm *dst);
173void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
174
175#endif /* __AF_VSOCK_H__ */
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
new file mode 100644
index 000000000000..a70ace83a153
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport.c
@@ -0,0 +1,2155 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/bitops.h>
18#include <linux/cred.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/kmod.h>
23#include <linux/list.h>
24#include <linux/miscdevice.h>
25#include <linux/module.h>
26#include <linux/mutex.h>
27#include <linux/net.h>
28#include <linux/poll.h>
29#include <linux/skbuff.h>
30#include <linux/smp.h>
31#include <linux/socket.h>
32#include <linux/stddef.h>
33#include <linux/unistd.h>
34#include <linux/wait.h>
35#include <linux/workqueue.h>
36#include <net/sock.h>
37
38#include "af_vsock.h"
39#include "vmci_transport_notify.h"
40
41static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
42static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
43static void vmci_transport_peer_attach_cb(u32 sub_id,
44 const struct vmci_event_data *ed,
45 void *client_data);
46static void vmci_transport_peer_detach_cb(u32 sub_id,
47 const struct vmci_event_data *ed,
48 void *client_data);
49static void vmci_transport_recv_pkt_work(struct work_struct *work);
50static int vmci_transport_recv_listen(struct sock *sk,
51 struct vmci_transport_packet *pkt);
52static int vmci_transport_recv_connecting_server(
53 struct sock *sk,
54 struct sock *pending,
55 struct vmci_transport_packet *pkt);
56static int vmci_transport_recv_connecting_client(
57 struct sock *sk,
58 struct vmci_transport_packet *pkt);
59static int vmci_transport_recv_connecting_client_negotiate(
60 struct sock *sk,
61 struct vmci_transport_packet *pkt);
62static int vmci_transport_recv_connecting_client_invalid(
63 struct sock *sk,
64 struct vmci_transport_packet *pkt);
65static int vmci_transport_recv_connected(struct sock *sk,
66 struct vmci_transport_packet *pkt);
67static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
68static u16 vmci_transport_new_proto_supported_versions(void);
69static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
70 bool old_pkt_proto);
71
72struct vmci_transport_recv_pkt_info {
73 struct work_struct work;
74 struct sock *sk;
75 struct vmci_transport_packet pkt;
76};
77
78static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
79 VMCI_INVALID_ID };
80static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
81
82static int PROTOCOL_OVERRIDE = -1;
83
84#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN 128
85#define VMCI_TRANSPORT_DEFAULT_QP_SIZE 262144
86#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX 262144
87
88/* The default peer timeout indicates how long we will wait for a peer response
89 * to a control message.
90 */
91#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
92
93#define SS_LISTEN 255
94
95/* Helper function to convert from a VMCI error code to a VSock error code. */
96
97static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
98{
99 int err;
100
101 switch (vmci_error) {
102 case VMCI_ERROR_NO_MEM:
103 err = ENOMEM;
104 break;
105 case VMCI_ERROR_DUPLICATE_ENTRY:
106 case VMCI_ERROR_ALREADY_EXISTS:
107 err = EADDRINUSE;
108 break;
109 case VMCI_ERROR_NO_ACCESS:
110 err = EPERM;
111 break;
112 case VMCI_ERROR_NO_RESOURCES:
113 err = ENOBUFS;
114 break;
115 case VMCI_ERROR_INVALID_RESOURCE:
116 err = EHOSTUNREACH;
117 break;
118 case VMCI_ERROR_INVALID_ARGS:
119 default:
120 err = EINVAL;
121 }
122
123 return err > 0 ? -err : err;
124}
125
126static inline void
127vmci_transport_packet_init(struct vmci_transport_packet *pkt,
128 struct sockaddr_vm *src,
129 struct sockaddr_vm *dst,
130 u8 type,
131 u64 size,
132 u64 mode,
133 struct vmci_transport_waiting_info *wait,
134 u16 proto,
135 struct vmci_handle handle)
136{
137 /* We register the stream control handler as an any cid handle so we
138 * must always send from a source address of VMADDR_CID_ANY
139 */
140 pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
141 VMCI_TRANSPORT_PACKET_RID);
142 pkt->dg.dst = vmci_make_handle(dst->svm_cid,
143 VMCI_TRANSPORT_PACKET_RID);
144 pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
145 pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
146 pkt->type = type;
147 pkt->src_port = src->svm_port;
148 pkt->dst_port = dst->svm_port;
149 memset(&pkt->proto, 0, sizeof(pkt->proto));
150 memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
151
152 switch (pkt->type) {
153 case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
154 pkt->u.size = 0;
155 break;
156
157 case VMCI_TRANSPORT_PACKET_TYPE_REQUEST:
158 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
159 pkt->u.size = size;
160 break;
161
162 case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
163 case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
164 pkt->u.handle = handle;
165 break;
166
167 case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
168 case VMCI_TRANSPORT_PACKET_TYPE_READ:
169 case VMCI_TRANSPORT_PACKET_TYPE_RST:
170 pkt->u.size = 0;
171 break;
172
173 case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
174 pkt->u.mode = mode;
175 break;
176
177 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
178 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
179 memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait));
180 break;
181
182 case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2:
183 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
184 pkt->u.size = size;
185 pkt->proto = proto;
186 break;
187 }
188}
189
190static inline void
191vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt,
192 struct sockaddr_vm *local,
193 struct sockaddr_vm *remote)
194{
195 vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port);
196 vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port);
197}
198
199static int
200__vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt,
201 struct sockaddr_vm *src,
202 struct sockaddr_vm *dst,
203 enum vmci_transport_packet_type type,
204 u64 size,
205 u64 mode,
206 struct vmci_transport_waiting_info *wait,
207 u16 proto,
208 struct vmci_handle handle,
209 bool convert_error)
210{
211 int err;
212
213 vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait,
214 proto, handle);
215 err = vmci_datagram_send(&pkt->dg);
216 if (convert_error && (err < 0))
217 return vmci_transport_error_to_vsock_error(err);
218
219 return err;
220}
221
222static int
223vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt,
224 enum vmci_transport_packet_type type,
225 u64 size,
226 u64 mode,
227 struct vmci_transport_waiting_info *wait,
228 struct vmci_handle handle)
229{
230 struct vmci_transport_packet reply;
231 struct sockaddr_vm src, dst;
232
233 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) {
234 return 0;
235 } else {
236 vmci_transport_packet_get_addresses(pkt, &src, &dst);
237 return __vmci_transport_send_control_pkt(&reply, &src, &dst,
238 type,
239 size, mode, wait,
240 VSOCK_PROTO_INVALID,
241 handle, true);
242 }
243}
244
245static int
246vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
247 struct sockaddr_vm *dst,
248 enum vmci_transport_packet_type type,
249 u64 size,
250 u64 mode,
251 struct vmci_transport_waiting_info *wait,
252 struct vmci_handle handle)
253{
254 /* Note that it is safe to use a single packet across all CPUs since
255 * two tasklets of the same type are guaranteed to not ever run
256 * simultaneously. If that ever changes, or VMCI stops using tasklets,
257 * we can use per-cpu packets.
258 */
259 static struct vmci_transport_packet pkt;
260
261 return __vmci_transport_send_control_pkt(&pkt, src, dst, type,
262 size, mode, wait,
263 VSOCK_PROTO_INVALID, handle,
264 false);
265}
266
267static int
268vmci_transport_send_control_pkt(struct sock *sk,
269 enum vmci_transport_packet_type type,
270 u64 size,
271 u64 mode,
272 struct vmci_transport_waiting_info *wait,
273 u16 proto,
274 struct vmci_handle handle)
275{
276 struct vmci_transport_packet *pkt;
277 struct vsock_sock *vsk;
278 int err;
279
280 vsk = vsock_sk(sk);
281
282 if (!vsock_addr_bound(&vsk->local_addr))
283 return -EINVAL;
284
285 if (!vsock_addr_bound(&vsk->remote_addr))
286 return -EINVAL;
287
288 pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
289 if (!pkt)
290 return -ENOMEM;
291
292 err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
293 &vsk->remote_addr, type, size,
294 mode, wait, proto, handle,
295 true);
296 kfree(pkt);
297
298 return err;
299}
300
301static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
302 struct sockaddr_vm *src,
303 struct vmci_transport_packet *pkt)
304{
305 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
306 return 0;
307 return vmci_transport_send_control_pkt_bh(
308 dst, src,
309 VMCI_TRANSPORT_PACKET_TYPE_RST, 0,
310 0, NULL, VMCI_INVALID_HANDLE);
311}
312
313static int vmci_transport_send_reset(struct sock *sk,
314 struct vmci_transport_packet *pkt)
315{
316 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
317 return 0;
318 return vmci_transport_send_control_pkt(sk,
319 VMCI_TRANSPORT_PACKET_TYPE_RST,
320 0, 0, NULL, VSOCK_PROTO_INVALID,
321 VMCI_INVALID_HANDLE);
322}
323
324static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
325{
326 return vmci_transport_send_control_pkt(
327 sk,
328 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
329 size, 0, NULL,
330 VSOCK_PROTO_INVALID,
331 VMCI_INVALID_HANDLE);
332}
333
334static int vmci_transport_send_negotiate2(struct sock *sk, size_t size,
335 u16 version)
336{
337 return vmci_transport_send_control_pkt(
338 sk,
339 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
340 size, 0, NULL, version,
341 VMCI_INVALID_HANDLE);
342}
343
344static int vmci_transport_send_qp_offer(struct sock *sk,
345 struct vmci_handle handle)
346{
347 return vmci_transport_send_control_pkt(
348 sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0,
349 0, NULL,
350 VSOCK_PROTO_INVALID, handle);
351}
352
353static int vmci_transport_send_attach(struct sock *sk,
354 struct vmci_handle handle)
355{
356 return vmci_transport_send_control_pkt(
357 sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
358 0, 0, NULL, VSOCK_PROTO_INVALID,
359 handle);
360}
361
362static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
363{
364 return vmci_transport_reply_control_pkt_fast(
365 pkt,
366 VMCI_TRANSPORT_PACKET_TYPE_RST,
367 0, 0, NULL,
368 VMCI_INVALID_HANDLE);
369}
370
371static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst,
372 struct sockaddr_vm *src)
373{
374 return vmci_transport_send_control_pkt_bh(
375 dst, src,
376 VMCI_TRANSPORT_PACKET_TYPE_INVALID,
377 0, 0, NULL, VMCI_INVALID_HANDLE);
378}
379
380int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
381 struct sockaddr_vm *src)
382{
383 return vmci_transport_send_control_pkt_bh(
384 dst, src,
385 VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
386 0, NULL, VMCI_INVALID_HANDLE);
387}
388
389int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
390 struct sockaddr_vm *src)
391{
392 return vmci_transport_send_control_pkt_bh(
393 dst, src,
394 VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
395 0, NULL, VMCI_INVALID_HANDLE);
396}
397
398int vmci_transport_send_wrote(struct sock *sk)
399{
400 return vmci_transport_send_control_pkt(
401 sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
402 0, NULL, VSOCK_PROTO_INVALID,
403 VMCI_INVALID_HANDLE);
404}
405
406int vmci_transport_send_read(struct sock *sk)
407{
408 return vmci_transport_send_control_pkt(
409 sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
410 0, NULL, VSOCK_PROTO_INVALID,
411 VMCI_INVALID_HANDLE);
412}
413
414int vmci_transport_send_waiting_write(struct sock *sk,
415 struct vmci_transport_waiting_info *wait)
416{
417 return vmci_transport_send_control_pkt(
418 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
419 0, 0, wait, VSOCK_PROTO_INVALID,
420 VMCI_INVALID_HANDLE);
421}
422
423int vmci_transport_send_waiting_read(struct sock *sk,
424 struct vmci_transport_waiting_info *wait)
425{
426 return vmci_transport_send_control_pkt(
427 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
428 0, 0, wait, VSOCK_PROTO_INVALID,
429 VMCI_INVALID_HANDLE);
430}
431
432static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode)
433{
434 return vmci_transport_send_control_pkt(
435 &vsk->sk,
436 VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
437 0, mode, NULL,
438 VSOCK_PROTO_INVALID,
439 VMCI_INVALID_HANDLE);
440}
441
442static int vmci_transport_send_conn_request(struct sock *sk, size_t size)
443{
444 return vmci_transport_send_control_pkt(sk,
445 VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
446 size, 0, NULL,
447 VSOCK_PROTO_INVALID,
448 VMCI_INVALID_HANDLE);
449}
450
451static int vmci_transport_send_conn_request2(struct sock *sk, size_t size,
452 u16 version)
453{
454 return vmci_transport_send_control_pkt(
455 sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
456 size, 0, NULL, version,
457 VMCI_INVALID_HANDLE);
458}
459
460static struct sock *vmci_transport_get_pending(
461 struct sock *listener,
462 struct vmci_transport_packet *pkt)
463{
464 struct vsock_sock *vlistener;
465 struct vsock_sock *vpending;
466 struct sock *pending;
467
468 vlistener = vsock_sk(listener);
469
470 list_for_each_entry(vpending, &vlistener->pending_links,
471 pending_links) {
472 struct sockaddr_vm src;
473 struct sockaddr_vm dst;
474
475 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
476 vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
477
478 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
479 vsock_addr_equals_addr(&dst, &vpending->local_addr)) {
480 pending = sk_vsock(vpending);
481 sock_hold(pending);
482 goto found;
483 }
484 }
485
486 pending = NULL;
487found:
488 return pending;
489
490}
491
492static void vmci_transport_release_pending(struct sock *pending)
493{
494 sock_put(pending);
495}
496
497/* We allow two kinds of sockets to communicate with a restricted VM: 1)
498 * trusted sockets 2) sockets from applications running as the same user as the
499 * VM (this is only true for the host side and only when using hosted products)
500 */
501
502static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid)
503{
504 return vsock->trusted ||
505 vmci_is_context_owner(peer_cid, vsock->owner->uid);
506}
507
508/* We allow sending datagrams to and receiving datagrams from a restricted VM
509 * only if it is trusted as described in vmci_transport_is_trusted.
510 */
511
512static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid)
513{
514 if (vsock->cached_peer != peer_cid) {
515 vsock->cached_peer = peer_cid;
516 if (!vmci_transport_is_trusted(vsock, peer_cid) &&
517 (vmci_context_get_priv_flags(peer_cid) &
518 VMCI_PRIVILEGE_FLAG_RESTRICTED)) {
519 vsock->cached_peer_allow_dgram = false;
520 } else {
521 vsock->cached_peer_allow_dgram = true;
522 }
523 }
524
525 return vsock->cached_peer_allow_dgram;
526}
527
528static int
529vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
530 struct vmci_handle *handle,
531 u64 produce_size,
532 u64 consume_size,
533 u32 peer, u32 flags, bool trusted)
534{
535 int err = 0;
536
537 if (trusted) {
538 /* Try to allocate our queue pair as trusted. This will only
539 * work if vsock is running in the host.
540 */
541
542 err = vmci_qpair_alloc(qpair, handle, produce_size,
543 consume_size,
544 peer, flags,
545 VMCI_PRIVILEGE_FLAG_TRUSTED);
546 if (err != VMCI_ERROR_NO_ACCESS)
547 goto out;
548
549 }
550
551 err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size,
552 peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
553out:
554 if (err < 0) {
555 pr_err("Could not attach to queue pair with %d\n",
556 err);
557 err = vmci_transport_error_to_vsock_error(err);
558 }
559
560 return err;
561}
562
563static int
564vmci_transport_datagram_create_hnd(u32 resource_id,
565 u32 flags,
566 vmci_datagram_recv_cb recv_cb,
567 void *client_data,
568 struct vmci_handle *out_handle)
569{
570 int err = 0;
571
572 /* Try to allocate our datagram handler as trusted. This will only work
573 * if vsock is running in the host.
574 */
575
576 err = vmci_datagram_create_handle_priv(resource_id, flags,
577 VMCI_PRIVILEGE_FLAG_TRUSTED,
578 recv_cb,
579 client_data, out_handle);
580
581 if (err == VMCI_ERROR_NO_ACCESS)
582 err = vmci_datagram_create_handle(resource_id, flags,
583 recv_cb, client_data,
584 out_handle);
585
586 return err;
587}
588
589/* This is invoked as part of a tasklet that's scheduled when the VMCI
590 * interrupt fires. This is run in bottom-half context and if it ever needs to
591 * sleep it should defer that work to a work queue.
592 */
593
594static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
595{
596 struct sock *sk;
597 size_t size;
598 struct sk_buff *skb;
599 struct vsock_sock *vsk;
600
601 sk = (struct sock *)data;
602
603 /* This handler is privileged when this module is running on the host.
604 * We will get datagrams from all endpoints (even VMs that are in a
605 * restricted context). If we get one from a restricted context then
606 * the destination socket must be trusted.
607 *
608 * NOTE: We access the socket struct without holding the lock here.
609 * This is ok because the field we are interested is never modified
610 * outside of the create and destruct socket functions.
611 */
612 vsk = vsock_sk(sk);
613 if (!vmci_transport_allow_dgram(vsk, dg->src.context))
614 return VMCI_ERROR_NO_ACCESS;
615
616 size = VMCI_DG_SIZE(dg);
617
618 /* Attach the packet to the socket's receive queue as an sk_buff. */
619 skb = alloc_skb(size, GFP_ATOMIC);
620 if (skb) {
621 /* sk_receive_skb() will do a sock_put(), so hold here. */
622 sock_hold(sk);
623 skb_put(skb, size);
624 memcpy(skb->data, dg, size);
625 sk_receive_skb(sk, skb, 0);
626 }
627
628 return VMCI_SUCCESS;
629}
630
631static bool vmci_transport_stream_allow(u32 cid, u32 port)
632{
633 static const u32 non_socket_contexts[] = {
634 VMADDR_CID_HYPERVISOR,
635 VMADDR_CID_RESERVED,
636 };
637 int i;
638
639 BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts));
640
641 for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) {
642 if (cid == non_socket_contexts[i])
643 return false;
644 }
645
646 return true;
647}
648
649/* This is invoked as part of a tasklet that's scheduled when the VMCI
650 * interrupt fires. This is run in bottom-half context but it defers most of
651 * its work to the packet handling work queue.
652 */
653
654static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
655{
656 struct sock *sk;
657 struct sockaddr_vm dst;
658 struct sockaddr_vm src;
659 struct vmci_transport_packet *pkt;
660 struct vsock_sock *vsk;
661 bool bh_process_pkt;
662 int err;
663
664 sk = NULL;
665 err = VMCI_SUCCESS;
666 bh_process_pkt = false;
667
668 /* Ignore incoming packets from contexts without sockets, or resources
669 * that aren't vsock implementations.
670 */
671
672 if (!vmci_transport_stream_allow(dg->src.context, -1)
673 || VMCI_TRANSPORT_PACKET_RID != dg->src.resource)
674 return VMCI_ERROR_NO_ACCESS;
675
676 if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
677 /* Drop datagrams that do not contain full VSock packets. */
678 return VMCI_ERROR_INVALID_ARGS;
679
680 pkt = (struct vmci_transport_packet *)dg;
681
682 /* Find the socket that should handle this packet. First we look for a
683 * connected socket and if there is none we look for a socket bound to
684 * the destintation address.
685 */
686 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
687 vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
688
689 sk = vsock_find_connected_socket(&src, &dst);
690 if (!sk) {
691 sk = vsock_find_bound_socket(&dst);
692 if (!sk) {
693 /* We could not find a socket for this specified
694 * address. If this packet is a RST, we just drop it.
695 * If it is another packet, we send a RST. Note that
696 * we do not send a RST reply to RSTs so that we do not
697 * continually send RSTs between two endpoints.
698 *
699 * Note that since this is a reply, dst is src and src
700 * is dst.
701 */
702 if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
703 pr_err("unable to send reset\n");
704
705 err = VMCI_ERROR_NOT_FOUND;
706 goto out;
707 }
708 }
709
710 /* If the received packet type is beyond all types known to this
711 * implementation, reply with an invalid message. Hopefully this will
712 * help when implementing backwards compatibility in the future.
713 */
714 if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) {
715 vmci_transport_send_invalid_bh(&dst, &src);
716 err = VMCI_ERROR_INVALID_ARGS;
717 goto out;
718 }
719
720 /* This handler is privileged when this module is running on the host.
721 * We will get datagram connect requests from all endpoints (even VMs
722 * that are in a restricted context). If we get one from a restricted
723 * context then the destination socket must be trusted.
724 *
725 * NOTE: We access the socket struct without holding the lock here.
726 * This is ok because the field we are interested is never modified
727 * outside of the create and destruct socket functions.
728 */
729 vsk = vsock_sk(sk);
730 if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) {
731 err = VMCI_ERROR_NO_ACCESS;
732 goto out;
733 }
734
735 /* We do most everything in a work queue, but let's fast path the
736 * notification of reads and writes to help data transfer performance.
737 * We can only do this if there is no process context code executing
738 * for this socket since that may change the state.
739 */
740 bh_lock_sock(sk);
741
742 if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED)
743 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
744 sk, pkt, true, &dst, &src,
745 &bh_process_pkt);
746
747 bh_unlock_sock(sk);
748
749 if (!bh_process_pkt) {
750 struct vmci_transport_recv_pkt_info *recv_pkt_info;
751
752 recv_pkt_info = kmalloc(sizeof(*recv_pkt_info), GFP_ATOMIC);
753 if (!recv_pkt_info) {
754 if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
755 pr_err("unable to send reset\n");
756
757 err = VMCI_ERROR_NO_MEM;
758 goto out;
759 }
760
761 recv_pkt_info->sk = sk;
762 memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt));
763 INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work);
764
765 schedule_work(&recv_pkt_info->work);
766 /* Clear sk so that the reference count incremented by one of
767 * the Find functions above is not decremented below. We need
768 * that reference count for the packet handler we've scheduled
769 * to run.
770 */
771 sk = NULL;
772 }
773
774out:
775 if (sk)
776 sock_put(sk);
777
778 return err;
779}
780
781static void vmci_transport_peer_attach_cb(u32 sub_id,
782 const struct vmci_event_data *e_data,
783 void *client_data)
784{
785 struct sock *sk = client_data;
786 const struct vmci_event_payload_qp *e_payload;
787 struct vsock_sock *vsk;
788
789 e_payload = vmci_event_data_const_payload(e_data);
790
791 vsk = vsock_sk(sk);
792
793 /* We don't ask for delayed CBs when we subscribe to this event (we
794 * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
795 * guarantees in that case about what context we might be running in,
796 * so it could be BH or process, blockable or non-blockable. So we
797 * need to account for all possible contexts here.
798 */
799 local_bh_disable();
800 bh_lock_sock(sk);
801
802 /* XXX This is lame, we should provide a way to lookup sockets by
803 * qp_handle.
804 */
805 if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
806 e_payload->handle)) {
807 /* XXX This doesn't do anything, but in the future we may want
808 * to set a flag here to verify the attach really did occur and
809 * we weren't just sent a datagram claiming it was.
810 */
811 goto out;
812 }
813
814out:
815 bh_unlock_sock(sk);
816 local_bh_enable();
817}
818
819static void vmci_transport_handle_detach(struct sock *sk)
820{
821 struct vsock_sock *vsk;
822
823 vsk = vsock_sk(sk);
824 if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
825 sock_set_flag(sk, SOCK_DONE);
826
827 /* On a detach the peer will not be sending or receiving
828 * anymore.
829 */
830 vsk->peer_shutdown = SHUTDOWN_MASK;
831
832 /* We should not be sending anymore since the peer won't be
833 * there to receive, but we can still receive if there is data
834 * left in our consume queue.
835 */
836 if (vsock_stream_has_data(vsk) <= 0) {
837 if (sk->sk_state == SS_CONNECTING) {
838 /* The peer may detach from a queue pair while
839 * we are still in the connecting state, i.e.,
840 * if the peer VM is killed after attaching to
841 * a queue pair, but before we complete the
842 * handshake. In that case, we treat the detach
843 * event like a reset.
844 */
845
846 sk->sk_state = SS_UNCONNECTED;
847 sk->sk_err = ECONNRESET;
848 sk->sk_error_report(sk);
849 return;
850 }
851 sk->sk_state = SS_UNCONNECTED;
852 }
853 sk->sk_state_change(sk);
854 }
855}
856
857static void vmci_transport_peer_detach_cb(u32 sub_id,
858 const struct vmci_event_data *e_data,
859 void *client_data)
860{
861 struct sock *sk = client_data;
862 const struct vmci_event_payload_qp *e_payload;
863 struct vsock_sock *vsk;
864
865 e_payload = vmci_event_data_const_payload(e_data);
866 vsk = vsock_sk(sk);
867 if (vmci_handle_is_invalid(e_payload->handle))
868 return;
869
870 /* Same rules for locking as for peer_attach_cb(). */
871 local_bh_disable();
872 bh_lock_sock(sk);
873
874 /* XXX This is lame, we should provide a way to lookup sockets by
875 * qp_handle.
876 */
877 if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
878 e_payload->handle))
879 vmci_transport_handle_detach(sk);
880
881 bh_unlock_sock(sk);
882 local_bh_enable();
883}
884
885static void vmci_transport_qp_resumed_cb(u32 sub_id,
886 const struct vmci_event_data *e_data,
887 void *client_data)
888{
889 vsock_for_each_connected_socket(vmci_transport_handle_detach);
890}
891
892static void vmci_transport_recv_pkt_work(struct work_struct *work)
893{
894 struct vmci_transport_recv_pkt_info *recv_pkt_info;
895 struct vmci_transport_packet *pkt;
896 struct sock *sk;
897
898 recv_pkt_info =
899 container_of(work, struct vmci_transport_recv_pkt_info, work);
900 sk = recv_pkt_info->sk;
901 pkt = &recv_pkt_info->pkt;
902
903 lock_sock(sk);
904
905 switch (sk->sk_state) {
906 case SS_LISTEN:
907 vmci_transport_recv_listen(sk, pkt);
908 break;
909 case SS_CONNECTING:
910 /* Processing of pending connections for servers goes through
911 * the listening socket, so see vmci_transport_recv_listen()
912 * for that path.
913 */
914 vmci_transport_recv_connecting_client(sk, pkt);
915 break;
916 case SS_CONNECTED:
917 vmci_transport_recv_connected(sk, pkt);
918 break;
919 default:
920 /* Because this function does not run in the same context as
921 * vmci_transport_recv_stream_cb it is possible that the
922 * socket has closed. We need to let the other side know or it
923 * could be sitting in a connect and hang forever. Send a
924 * reset to prevent that.
925 */
926 vmci_transport_send_reset(sk, pkt);
927 goto out;
928 }
929
930out:
931 release_sock(sk);
932 kfree(recv_pkt_info);
933 /* Release reference obtained in the stream callback when we fetched
934 * this socket out of the bound or connected list.
935 */
936 sock_put(sk);
937}
938
939static int vmci_transport_recv_listen(struct sock *sk,
940 struct vmci_transport_packet *pkt)
941{
942 struct sock *pending;
943 struct vsock_sock *vpending;
944 int err;
945 u64 qp_size;
946 bool old_request = false;
947 bool old_pkt_proto = false;
948
949 err = 0;
950
951 /* Because we are in the listen state, we could be receiving a packet
952 * for ourself or any previous connection requests that we received.
953 * If it's the latter, we try to find a socket in our list of pending
954 * connections and, if we do, call the appropriate handler for the
955 * state that that socket is in. Otherwise we try to service the
956 * connection request.
957 */
958 pending = vmci_transport_get_pending(sk, pkt);
959 if (pending) {
960 lock_sock(pending);
961 switch (pending->sk_state) {
962 case SS_CONNECTING:
963 err = vmci_transport_recv_connecting_server(sk,
964 pending,
965 pkt);
966 break;
967 default:
968 vmci_transport_send_reset(pending, pkt);
969 err = -EINVAL;
970 }
971
972 if (err < 0)
973 vsock_remove_pending(sk, pending);
974
975 release_sock(pending);
976 vmci_transport_release_pending(pending);
977
978 return err;
979 }
980
981 /* The listen state only accepts connection requests. Reply with a
982 * reset unless we received a reset.
983 */
984
985 if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST ||
986 pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) {
987 vmci_transport_reply_reset(pkt);
988 return -EINVAL;
989 }
990
991 if (pkt->u.size == 0) {
992 vmci_transport_reply_reset(pkt);
993 return -EINVAL;
994 }
995
996 /* If this socket can't accommodate this connection request, we send a
997 * reset. Otherwise we create and initialize a child socket and reply
998 * with a connection negotiation.
999 */
1000 if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
1001 vmci_transport_reply_reset(pkt);
1002 return -ECONNREFUSED;
1003 }
1004
1005 pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
1006 sk->sk_type);
1007 if (!pending) {
1008 vmci_transport_send_reset(sk, pkt);
1009 return -ENOMEM;
1010 }
1011
1012 vpending = vsock_sk(pending);
1013
1014 vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context,
1015 pkt->dst_port);
1016 vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
1017 pkt->src_port);
1018
1019 /* If the proposed size fits within our min/max, accept it. Otherwise
1020 * propose our own size.
1021 */
1022 if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size &&
1023 pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) {
1024 qp_size = pkt->u.size;
1025 } else {
1026 qp_size = vmci_trans(vpending)->queue_pair_size;
1027 }
1028
1029 /* Figure out if we are using old or new requests based on the
1030 * overrides pkt types sent by our peer.
1031 */
1032 if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1033 old_request = old_pkt_proto;
1034 } else {
1035 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST)
1036 old_request = true;
1037 else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)
1038 old_request = false;
1039
1040 }
1041
1042 if (old_request) {
1043 /* Handle a REQUEST (or override) */
1044 u16 version = VSOCK_PROTO_INVALID;
1045 if (vmci_transport_proto_to_notify_struct(
1046 pending, &version, true))
1047 err = vmci_transport_send_negotiate(pending, qp_size);
1048 else
1049 err = -EINVAL;
1050
1051 } else {
1052 /* Handle a REQUEST2 (or override) */
1053 int proto_int = pkt->proto;
1054 int pos;
1055 u16 active_proto_version = 0;
1056
1057 /* The list of possible protocols is the intersection of all
1058 * protocols the client supports ... plus all the protocols we
1059 * support.
1060 */
1061 proto_int &= vmci_transport_new_proto_supported_versions();
1062
1063 /* We choose the highest possible protocol version and use that
1064 * one.
1065 */
1066 pos = fls(proto_int);
1067 if (pos) {
1068 active_proto_version = (1 << (pos - 1));
1069 if (vmci_transport_proto_to_notify_struct(
1070 pending, &active_proto_version, false))
1071 err = vmci_transport_send_negotiate2(pending,
1072 qp_size,
1073 active_proto_version);
1074 else
1075 err = -EINVAL;
1076
1077 } else {
1078 err = -EINVAL;
1079 }
1080 }
1081
1082 if (err < 0) {
1083 vmci_transport_send_reset(sk, pkt);
1084 sock_put(pending);
1085 err = vmci_transport_error_to_vsock_error(err);
1086 goto out;
1087 }
1088
1089 vsock_add_pending(sk, pending);
1090 sk->sk_ack_backlog++;
1091
1092 pending->sk_state = SS_CONNECTING;
1093 vmci_trans(vpending)->produce_size =
1094 vmci_trans(vpending)->consume_size = qp_size;
1095 vmci_trans(vpending)->queue_pair_size = qp_size;
1096
1097 vmci_trans(vpending)->notify_ops->process_request(pending);
1098
1099 /* We might never receive another message for this socket and it's not
1100 * connected to any process, so we have to ensure it gets cleaned up
1101 * ourself. Our delayed work function will take care of that. Note
1102 * that we do not ever cancel this function since we have few
1103 * guarantees about its state when calling cancel_delayed_work().
1104 * Instead we hold a reference on the socket for that function and make
1105 * it capable of handling cases where it needs to do nothing but
1106 * release that reference.
1107 */
1108 vpending->listener = sk;
1109 sock_hold(sk);
1110 sock_hold(pending);
1111 INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
1112 schedule_delayed_work(&vpending->dwork, HZ);
1113
1114out:
1115 return err;
1116}
1117
1118static int
1119vmci_transport_recv_connecting_server(struct sock *listener,
1120 struct sock *pending,
1121 struct vmci_transport_packet *pkt)
1122{
1123 struct vsock_sock *vpending;
1124 struct vmci_handle handle;
1125 struct vmci_qp *qpair;
1126 bool is_local;
1127 u32 flags;
1128 u32 detach_sub_id;
1129 int err;
1130 int skerr;
1131
1132 vpending = vsock_sk(pending);
1133 detach_sub_id = VMCI_INVALID_ID;
1134
1135 switch (pkt->type) {
1136 case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
1137 if (vmci_handle_is_invalid(pkt->u.handle)) {
1138 vmci_transport_send_reset(pending, pkt);
1139 skerr = EPROTO;
1140 err = -EINVAL;
1141 goto destroy;
1142 }
1143 break;
1144 default:
1145 /* Close and cleanup the connection. */
1146 vmci_transport_send_reset(pending, pkt);
1147 skerr = EPROTO;
1148 err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL;
1149 goto destroy;
1150 }
1151
1152 /* In order to complete the connection we need to attach to the offered
1153 * queue pair and send an attach notification. We also subscribe to the
1154 * detach event so we know when our peer goes away, and we do that
1155 * before attaching so we don't miss an event. If all this succeeds,
1156 * we update our state and wakeup anything waiting in accept() for a
1157 * connection.
1158 */
1159
1160 /* We don't care about attach since we ensure the other side has
1161 * attached by specifying the ATTACH_ONLY flag below.
1162 */
1163 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1164 vmci_transport_peer_detach_cb,
1165 pending, &detach_sub_id);
1166 if (err < VMCI_SUCCESS) {
1167 vmci_transport_send_reset(pending, pkt);
1168 err = vmci_transport_error_to_vsock_error(err);
1169 skerr = -err;
1170 goto destroy;
1171 }
1172
1173 vmci_trans(vpending)->detach_sub_id = detach_sub_id;
1174
1175 /* Now attach to the queue pair the client created. */
1176 handle = pkt->u.handle;
1177
1178 /* vpending->local_addr always has a context id so we do not need to
1179 * worry about VMADDR_CID_ANY in this case.
1180 */
1181 is_local =
1182 vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid;
1183 flags = VMCI_QPFLAG_ATTACH_ONLY;
1184 flags |= is_local ? VMCI_QPFLAG_LOCAL : 0;
1185
1186 err = vmci_transport_queue_pair_alloc(
1187 &qpair,
1188 &handle,
1189 vmci_trans(vpending)->produce_size,
1190 vmci_trans(vpending)->consume_size,
1191 pkt->dg.src.context,
1192 flags,
1193 vmci_transport_is_trusted(
1194 vpending,
1195 vpending->remote_addr.svm_cid));
1196 if (err < 0) {
1197 vmci_transport_send_reset(pending, pkt);
1198 skerr = -err;
1199 goto destroy;
1200 }
1201
1202 vmci_trans(vpending)->qp_handle = handle;
1203 vmci_trans(vpending)->qpair = qpair;
1204
1205 /* When we send the attach message, we must be ready to handle incoming
1206 * control messages on the newly connected socket. So we move the
1207 * pending socket to the connected state before sending the attach
1208 * message. Otherwise, an incoming packet triggered by the attach being
1209 * received by the peer may be processed concurrently with what happens
1210 * below after sending the attach message, and that incoming packet
1211 * will find the listening socket instead of the (currently) pending
1212 * socket. Note that enqueueing the socket increments the reference
1213 * count, so even if a reset comes before the connection is accepted,
1214 * the socket will be valid until it is removed from the queue.
1215 *
1216 * If we fail sending the attach below, we remove the socket from the
1217 * connected list and move the socket to SS_UNCONNECTED before
1218 * releasing the lock, so a pending slow path processing of an incoming
1219 * packet will not see the socket in the connected state in that case.
1220 */
1221 pending->sk_state = SS_CONNECTED;
1222
1223 vsock_insert_connected(vpending);
1224
1225 /* Notify our peer of our attach. */
1226 err = vmci_transport_send_attach(pending, handle);
1227 if (err < 0) {
1228 vsock_remove_connected(vpending);
1229 pr_err("Could not send attach\n");
1230 vmci_transport_send_reset(pending, pkt);
1231 err = vmci_transport_error_to_vsock_error(err);
1232 skerr = -err;
1233 goto destroy;
1234 }
1235
1236 /* We have a connection. Move the now connected socket from the
1237 * listener's pending list to the accept queue so callers of accept()
1238 * can find it.
1239 */
1240 vsock_remove_pending(listener, pending);
1241 vsock_enqueue_accept(listener, pending);
1242
1243 /* Callers of accept() will be be waiting on the listening socket, not
1244 * the pending socket.
1245 */
1246 listener->sk_state_change(listener);
1247
1248 return 0;
1249
1250destroy:
1251 pending->sk_err = skerr;
1252 pending->sk_state = SS_UNCONNECTED;
1253 /* As long as we drop our reference, all necessary cleanup will handle
1254 * when the cleanup function drops its reference and our destruct
1255 * implementation is called. Note that since the listen handler will
1256 * remove pending from the pending list upon our failure, the cleanup
1257 * function won't drop the additional reference, which is why we do it
1258 * here.
1259 */
1260 sock_put(pending);
1261
1262 return err;
1263}
1264
1265static int
1266vmci_transport_recv_connecting_client(struct sock *sk,
1267 struct vmci_transport_packet *pkt)
1268{
1269 struct vsock_sock *vsk;
1270 int err;
1271 int skerr;
1272
1273 vsk = vsock_sk(sk);
1274
1275 switch (pkt->type) {
1276 case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
1277 if (vmci_handle_is_invalid(pkt->u.handle) ||
1278 !vmci_handle_is_equal(pkt->u.handle,
1279 vmci_trans(vsk)->qp_handle)) {
1280 skerr = EPROTO;
1281 err = -EINVAL;
1282 goto destroy;
1283 }
1284
1285 /* Signify the socket is connected and wakeup the waiter in
1286 * connect(). Also place the socket in the connected table for
1287 * accounting (it can already be found since it's in the bound
1288 * table).
1289 */
1290 sk->sk_state = SS_CONNECTED;
1291 sk->sk_socket->state = SS_CONNECTED;
1292 vsock_insert_connected(vsk);
1293 sk->sk_state_change(sk);
1294
1295 break;
1296 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
1297 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
1298 if (pkt->u.size == 0
1299 || pkt->dg.src.context != vsk->remote_addr.svm_cid
1300 || pkt->src_port != vsk->remote_addr.svm_port
1301 || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)
1302 || vmci_trans(vsk)->qpair
1303 || vmci_trans(vsk)->produce_size != 0
1304 || vmci_trans(vsk)->consume_size != 0
1305 || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
1306 || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1307 skerr = EPROTO;
1308 err = -EINVAL;
1309
1310 goto destroy;
1311 }
1312
1313 err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
1314 if (err) {
1315 skerr = -err;
1316 goto destroy;
1317 }
1318
1319 break;
1320 case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
1321 err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
1322 if (err) {
1323 skerr = -err;
1324 goto destroy;
1325 }
1326
1327 break;
1328 case VMCI_TRANSPORT_PACKET_TYPE_RST:
1329 /* Older versions of the linux code (WS 6.5 / ESX 4.0) used to
1330 * continue processing here after they sent an INVALID packet.
1331 * This meant that we got a RST after the INVALID. We ignore a
1332 * RST after an INVALID. The common code doesn't send the RST
1333 * ... so we can hang if an old version of the common code
1334 * fails between getting a REQUEST and sending an OFFER back.
1335 * Not much we can do about it... except hope that it doesn't
1336 * happen.
1337 */
1338 if (vsk->ignore_connecting_rst) {
1339 vsk->ignore_connecting_rst = false;
1340 } else {
1341 skerr = ECONNRESET;
1342 err = 0;
1343 goto destroy;
1344 }
1345
1346 break;
1347 default:
1348 /* Close and cleanup the connection. */
1349 skerr = EPROTO;
1350 err = -EINVAL;
1351 goto destroy;
1352 }
1353
1354 return 0;
1355
1356destroy:
1357 vmci_transport_send_reset(sk, pkt);
1358
1359 sk->sk_state = SS_UNCONNECTED;
1360 sk->sk_err = skerr;
1361 sk->sk_error_report(sk);
1362 return err;
1363}
1364
1365static int vmci_transport_recv_connecting_client_negotiate(
1366 struct sock *sk,
1367 struct vmci_transport_packet *pkt)
1368{
1369 int err;
1370 struct vsock_sock *vsk;
1371 struct vmci_handle handle;
1372 struct vmci_qp *qpair;
1373 u32 attach_sub_id;
1374 u32 detach_sub_id;
1375 bool is_local;
1376 u32 flags;
1377 bool old_proto = true;
1378 bool old_pkt_proto;
1379 u16 version;
1380
1381 vsk = vsock_sk(sk);
1382 handle = VMCI_INVALID_HANDLE;
1383 attach_sub_id = VMCI_INVALID_ID;
1384 detach_sub_id = VMCI_INVALID_ID;
1385
1386 /* If we have gotten here then we should be past the point where old
1387 * linux vsock could have sent the bogus rst.
1388 */
1389 vsk->sent_request = false;
1390 vsk->ignore_connecting_rst = false;
1391
1392 /* Verify that we're OK with the proposed queue pair size */
1393 if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
1394 pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
1395 err = -EINVAL;
1396 goto destroy;
1397 }
1398
1399 /* At this point we know the CID the peer is using to talk to us. */
1400
1401 if (vsk->local_addr.svm_cid == VMADDR_CID_ANY)
1402 vsk->local_addr.svm_cid = pkt->dg.dst.context;
1403
1404 /* Setup the notify ops to be the highest supported version that both
1405 * the server and the client support.
1406 */
1407
1408 if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1409 old_proto = old_pkt_proto;
1410 } else {
1411 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
1412 old_proto = true;
1413 else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
1414 old_proto = false;
1415
1416 }
1417
1418 if (old_proto)
1419 version = VSOCK_PROTO_INVALID;
1420 else
1421 version = pkt->proto;
1422
1423 if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
1424 err = -EINVAL;
1425 goto destroy;
1426 }
1427
1428 /* Subscribe to attach and detach events first.
1429 *
1430 * XXX We attach once for each queue pair created for now so it is easy
1431 * to find the socket (it's provided), but later we should only
1432 * subscribe once and add a way to lookup sockets by queue pair handle.
1433 */
1434 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
1435 vmci_transport_peer_attach_cb,
1436 sk, &attach_sub_id);
1437 if (err < VMCI_SUCCESS) {
1438 err = vmci_transport_error_to_vsock_error(err);
1439 goto destroy;
1440 }
1441
1442 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1443 vmci_transport_peer_detach_cb,
1444 sk, &detach_sub_id);
1445 if (err < VMCI_SUCCESS) {
1446 err = vmci_transport_error_to_vsock_error(err);
1447 goto destroy;
1448 }
1449
1450 /* Make VMCI select the handle for us. */
1451 handle = VMCI_INVALID_HANDLE;
1452 is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid;
1453 flags = is_local ? VMCI_QPFLAG_LOCAL : 0;
1454
1455 err = vmci_transport_queue_pair_alloc(&qpair,
1456 &handle,
1457 pkt->u.size,
1458 pkt->u.size,
1459 vsk->remote_addr.svm_cid,
1460 flags,
1461 vmci_transport_is_trusted(
1462 vsk,
1463 vsk->
1464 remote_addr.svm_cid));
1465 if (err < 0)
1466 goto destroy;
1467
1468 err = vmci_transport_send_qp_offer(sk, handle);
1469 if (err < 0) {
1470 err = vmci_transport_error_to_vsock_error(err);
1471 goto destroy;
1472 }
1473
1474 vmci_trans(vsk)->qp_handle = handle;
1475 vmci_trans(vsk)->qpair = qpair;
1476
1477 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
1478 pkt->u.size;
1479
1480 vmci_trans(vsk)->attach_sub_id = attach_sub_id;
1481 vmci_trans(vsk)->detach_sub_id = detach_sub_id;
1482
1483 vmci_trans(vsk)->notify_ops->process_negotiate(sk);
1484
1485 return 0;
1486
1487destroy:
1488 if (attach_sub_id != VMCI_INVALID_ID)
1489 vmci_event_unsubscribe(attach_sub_id);
1490
1491 if (detach_sub_id != VMCI_INVALID_ID)
1492 vmci_event_unsubscribe(detach_sub_id);
1493
1494 if (!vmci_handle_is_invalid(handle))
1495 vmci_qpair_detach(&qpair);
1496
1497 return err;
1498}
1499
1500static int
1501vmci_transport_recv_connecting_client_invalid(struct sock *sk,
1502 struct vmci_transport_packet *pkt)
1503{
1504 int err = 0;
1505 struct vsock_sock *vsk = vsock_sk(sk);
1506
1507 if (vsk->sent_request) {
1508 vsk->sent_request = false;
1509 vsk->ignore_connecting_rst = true;
1510
1511 err = vmci_transport_send_conn_request(
1512 sk, vmci_trans(vsk)->queue_pair_size);
1513 if (err < 0)
1514 err = vmci_transport_error_to_vsock_error(err);
1515 else
1516 err = 0;
1517
1518 }
1519
1520 return err;
1521}
1522
1523static int vmci_transport_recv_connected(struct sock *sk,
1524 struct vmci_transport_packet *pkt)
1525{
1526 struct vsock_sock *vsk;
1527 bool pkt_processed = false;
1528
1529 /* In cases where we are closing the connection, it's sufficient to
1530 * mark the state change (and maybe error) and wake up any waiting
1531 * threads. Since this is a connected socket, it's owned by a user
1532 * process and will be cleaned up when the failure is passed back on
1533 * the current or next system call. Our system call implementations
1534 * must therefore check for error and state changes on entry and when
1535 * being awoken.
1536 */
1537 switch (pkt->type) {
1538 case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
1539 if (pkt->u.mode) {
1540 vsk = vsock_sk(sk);
1541
1542 vsk->peer_shutdown |= pkt->u.mode;
1543 sk->sk_state_change(sk);
1544 }
1545 break;
1546
1547 case VMCI_TRANSPORT_PACKET_TYPE_RST:
1548 vsk = vsock_sk(sk);
1549 /* It is possible that we sent our peer a message (e.g a
1550 * WAITING_READ) right before we got notified that the peer had
1551 * detached. If that happens then we can get a RST pkt back
1552 * from our peer even though there is data available for us to
1553 * read. In that case, don't shutdown the socket completely but
1554 * instead allow the local client to finish reading data off
1555 * the queuepair. Always treat a RST pkt in connected mode like
1556 * a clean shutdown.
1557 */
1558 sock_set_flag(sk, SOCK_DONE);
1559 vsk->peer_shutdown = SHUTDOWN_MASK;
1560 if (vsock_stream_has_data(vsk) <= 0)
1561 sk->sk_state = SS_DISCONNECTING;
1562
1563 sk->sk_state_change(sk);
1564 break;
1565
1566 default:
1567 vsk = vsock_sk(sk);
1568 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
1569 sk, pkt, false, NULL, NULL,
1570 &pkt_processed);
1571 if (!pkt_processed)
1572 return -EINVAL;
1573
1574 break;
1575 }
1576
1577 return 0;
1578}
1579
1580static int vmci_transport_socket_init(struct vsock_sock *vsk,
1581 struct vsock_sock *psk)
1582{
1583 vsk->trans = kmalloc(sizeof(struct vmci_transport), GFP_KERNEL);
1584 if (!vsk->trans)
1585 return -ENOMEM;
1586
1587 vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1588 vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1589 vmci_trans(vsk)->qpair = NULL;
1590 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
1591 vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
1592 VMCI_INVALID_ID;
1593 vmci_trans(vsk)->notify_ops = NULL;
1594 if (psk) {
1595 vmci_trans(vsk)->queue_pair_size =
1596 vmci_trans(psk)->queue_pair_size;
1597 vmci_trans(vsk)->queue_pair_min_size =
1598 vmci_trans(psk)->queue_pair_min_size;
1599 vmci_trans(vsk)->queue_pair_max_size =
1600 vmci_trans(psk)->queue_pair_max_size;
1601 } else {
1602 vmci_trans(vsk)->queue_pair_size =
1603 VMCI_TRANSPORT_DEFAULT_QP_SIZE;
1604 vmci_trans(vsk)->queue_pair_min_size =
1605 VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN;
1606 vmci_trans(vsk)->queue_pair_max_size =
1607 VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX;
1608 }
1609
1610 return 0;
1611}
1612
1613static void vmci_transport_destruct(struct vsock_sock *vsk)
1614{
1615 if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
1616 vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
1617 vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
1618 }
1619
1620 if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1621 vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
1622 vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
1623 }
1624
1625 if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
1626 vmci_qpair_detach(&vmci_trans(vsk)->qpair);
1627 vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1628 vmci_trans(vsk)->produce_size = 0;
1629 vmci_trans(vsk)->consume_size = 0;
1630 }
1631
1632 if (vmci_trans(vsk)->notify_ops)
1633 vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
1634
1635 kfree(vsk->trans);
1636 vsk->trans = NULL;
1637}
1638
1639static void vmci_transport_release(struct vsock_sock *vsk)
1640{
1641 if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
1642 vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
1643 vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1644 }
1645}
1646
1647static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
1648 struct sockaddr_vm *addr)
1649{
1650 u32 port;
1651 u32 flags;
1652 int err;
1653
1654 /* VMCI will select a resource ID for us if we provide
1655 * VMCI_INVALID_ID.
1656 */
1657 port = addr->svm_port == VMADDR_PORT_ANY ?
1658 VMCI_INVALID_ID : addr->svm_port;
1659
1660 if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE))
1661 return -EACCES;
1662
1663 flags = addr->svm_cid == VMADDR_CID_ANY ?
1664 VMCI_FLAG_ANYCID_DG_HND : 0;
1665
1666 err = vmci_transport_datagram_create_hnd(port, flags,
1667 vmci_transport_recv_dgram_cb,
1668 &vsk->sk,
1669 &vmci_trans(vsk)->dg_handle);
1670 if (err < VMCI_SUCCESS)
1671 return vmci_transport_error_to_vsock_error(err);
1672 vsock_addr_init(&vsk->local_addr, addr->svm_cid,
1673 vmci_trans(vsk)->dg_handle.resource);
1674
1675 return 0;
1676}
1677
1678static int vmci_transport_dgram_enqueue(
1679 struct vsock_sock *vsk,
1680 struct sockaddr_vm *remote_addr,
1681 struct iovec *iov,
1682 size_t len)
1683{
1684 int err;
1685 struct vmci_datagram *dg;
1686
1687 if (len > VMCI_MAX_DG_PAYLOAD_SIZE)
1688 return -EMSGSIZE;
1689
1690 if (!vmci_transport_allow_dgram(vsk, remote_addr->svm_cid))
1691 return -EPERM;
1692
1693 /* Allocate a buffer for the user's message and our packet header. */
1694 dg = kmalloc(len + sizeof(*dg), GFP_KERNEL);
1695 if (!dg)
1696 return -ENOMEM;
1697
1698 memcpy_fromiovec(VMCI_DG_PAYLOAD(dg), iov, len);
1699
1700 dg->dst = vmci_make_handle(remote_addr->svm_cid,
1701 remote_addr->svm_port);
1702 dg->src = vmci_make_handle(vsk->local_addr.svm_cid,
1703 vsk->local_addr.svm_port);
1704 dg->payload_size = len;
1705
1706 err = vmci_datagram_send(dg);
1707 kfree(dg);
1708 if (err < 0)
1709 return vmci_transport_error_to_vsock_error(err);
1710
1711 return err - sizeof(*dg);
1712}
1713
1714static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1715 struct vsock_sock *vsk,
1716 struct msghdr *msg, size_t len,
1717 int flags)
1718{
1719 int err;
1720 int noblock;
1721 struct vmci_datagram *dg;
1722 size_t payload_len;
1723 struct sk_buff *skb;
1724
1725 noblock = flags & MSG_DONTWAIT;
1726
1727 if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1728 return -EOPNOTSUPP;
1729
1730 /* Retrieve the head sk_buff from the socket's receive queue. */
1731 err = 0;
1732 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1733 if (err)
1734 return err;
1735
1736 if (!skb)
1737 return -EAGAIN;
1738
1739 dg = (struct vmci_datagram *)skb->data;
1740 if (!dg)
1741 /* err is 0, meaning we read zero bytes. */
1742 goto out;
1743
1744 payload_len = dg->payload_size;
1745 /* Ensure the sk_buff matches the payload size claimed in the packet. */
1746 if (payload_len != skb->len - sizeof(*dg)) {
1747 err = -EINVAL;
1748 goto out;
1749 }
1750
1751 if (payload_len > len) {
1752 payload_len = len;
1753 msg->msg_flags |= MSG_TRUNC;
1754 }
1755
1756 /* Place the datagram payload in the user's iovec. */
1757 err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,
1758 payload_len);
1759 if (err)
1760 goto out;
1761
1762 msg->msg_namelen = 0;
1763 if (msg->msg_name) {
1764 struct sockaddr_vm *vm_addr;
1765
1766 /* Provide the address of the sender. */
1767 vm_addr = (struct sockaddr_vm *)msg->msg_name;
1768 vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
1769 msg->msg_namelen = sizeof(*vm_addr);
1770 }
1771 err = payload_len;
1772
1773out:
1774 skb_free_datagram(&vsk->sk, skb);
1775 return err;
1776}
1777
1778static bool vmci_transport_dgram_allow(u32 cid, u32 port)
1779{
1780 if (cid == VMADDR_CID_HYPERVISOR) {
1781 /* Registrations of PBRPC Servers do not modify VMX/Hypervisor
1782 * state and are allowed.
1783 */
1784 return port == VMCI_UNITY_PBRPC_REGISTER;
1785 }
1786
1787 return true;
1788}
1789
1790static int vmci_transport_connect(struct vsock_sock *vsk)
1791{
1792 int err;
1793 bool old_pkt_proto = false;
1794 struct sock *sk = &vsk->sk;
1795
1796 if (vmci_transport_old_proto_override(&old_pkt_proto) &&
1797 old_pkt_proto) {
1798 err = vmci_transport_send_conn_request(
1799 sk, vmci_trans(vsk)->queue_pair_size);
1800 if (err < 0) {
1801 sk->sk_state = SS_UNCONNECTED;
1802 return err;
1803 }
1804 } else {
1805 int supported_proto_versions =
1806 vmci_transport_new_proto_supported_versions();
1807 err = vmci_transport_send_conn_request2(
1808 sk, vmci_trans(vsk)->queue_pair_size,
1809 supported_proto_versions);
1810 if (err < 0) {
1811 sk->sk_state = SS_UNCONNECTED;
1812 return err;
1813 }
1814
1815 vsk->sent_request = true;
1816 }
1817
1818 return err;
1819}
1820
1821static ssize_t vmci_transport_stream_dequeue(
1822 struct vsock_sock *vsk,
1823 struct iovec *iov,
1824 size_t len,
1825 int flags)
1826{
1827 if (flags & MSG_PEEK)
1828 return vmci_qpair_peekv(vmci_trans(vsk)->qpair, iov, len, 0);
1829 else
1830 return vmci_qpair_dequev(vmci_trans(vsk)->qpair, iov, len, 0);
1831}
1832
1833static ssize_t vmci_transport_stream_enqueue(
1834 struct vsock_sock *vsk,
1835 struct iovec *iov,
1836 size_t len)
1837{
1838 return vmci_qpair_enquev(vmci_trans(vsk)->qpair, iov, len, 0);
1839}
1840
1841static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
1842{
1843 return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair);
1844}
1845
1846static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk)
1847{
1848 return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair);
1849}
1850
1851static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk)
1852{
1853 return vmci_trans(vsk)->consume_size;
1854}
1855
1856static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
1857{
1858 return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
1859}
1860
1861static u64 vmci_transport_get_buffer_size(struct vsock_sock *vsk)
1862{
1863 return vmci_trans(vsk)->queue_pair_size;
1864}
1865
1866static u64 vmci_transport_get_min_buffer_size(struct vsock_sock *vsk)
1867{
1868 return vmci_trans(vsk)->queue_pair_min_size;
1869}
1870
1871static u64 vmci_transport_get_max_buffer_size(struct vsock_sock *vsk)
1872{
1873 return vmci_trans(vsk)->queue_pair_max_size;
1874}
1875
1876static void vmci_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
1877{
1878 if (val < vmci_trans(vsk)->queue_pair_min_size)
1879 vmci_trans(vsk)->queue_pair_min_size = val;
1880 if (val > vmci_trans(vsk)->queue_pair_max_size)
1881 vmci_trans(vsk)->queue_pair_max_size = val;
1882 vmci_trans(vsk)->queue_pair_size = val;
1883}
1884
1885static void vmci_transport_set_min_buffer_size(struct vsock_sock *vsk,
1886 u64 val)
1887{
1888 if (val > vmci_trans(vsk)->queue_pair_size)
1889 vmci_trans(vsk)->queue_pair_size = val;
1890 vmci_trans(vsk)->queue_pair_min_size = val;
1891}
1892
1893static void vmci_transport_set_max_buffer_size(struct vsock_sock *vsk,
1894 u64 val)
1895{
1896 if (val < vmci_trans(vsk)->queue_pair_size)
1897 vmci_trans(vsk)->queue_pair_size = val;
1898 vmci_trans(vsk)->queue_pair_max_size = val;
1899}
1900
1901static int vmci_transport_notify_poll_in(
1902 struct vsock_sock *vsk,
1903 size_t target,
1904 bool *data_ready_now)
1905{
1906 return vmci_trans(vsk)->notify_ops->poll_in(
1907 &vsk->sk, target, data_ready_now);
1908}
1909
1910static int vmci_transport_notify_poll_out(
1911 struct vsock_sock *vsk,
1912 size_t target,
1913 bool *space_available_now)
1914{
1915 return vmci_trans(vsk)->notify_ops->poll_out(
1916 &vsk->sk, target, space_available_now);
1917}
1918
1919static int vmci_transport_notify_recv_init(
1920 struct vsock_sock *vsk,
1921 size_t target,
1922 struct vsock_transport_recv_notify_data *data)
1923{
1924 return vmci_trans(vsk)->notify_ops->recv_init(
1925 &vsk->sk, target,
1926 (struct vmci_transport_recv_notify_data *)data);
1927}
1928
1929static int vmci_transport_notify_recv_pre_block(
1930 struct vsock_sock *vsk,
1931 size_t target,
1932 struct vsock_transport_recv_notify_data *data)
1933{
1934 return vmci_trans(vsk)->notify_ops->recv_pre_block(
1935 &vsk->sk, target,
1936 (struct vmci_transport_recv_notify_data *)data);
1937}
1938
1939static int vmci_transport_notify_recv_pre_dequeue(
1940 struct vsock_sock *vsk,
1941 size_t target,
1942 struct vsock_transport_recv_notify_data *data)
1943{
1944 return vmci_trans(vsk)->notify_ops->recv_pre_dequeue(
1945 &vsk->sk, target,
1946 (struct vmci_transport_recv_notify_data *)data);
1947}
1948
1949static int vmci_transport_notify_recv_post_dequeue(
1950 struct vsock_sock *vsk,
1951 size_t target,
1952 ssize_t copied,
1953 bool data_read,
1954 struct vsock_transport_recv_notify_data *data)
1955{
1956 return vmci_trans(vsk)->notify_ops->recv_post_dequeue(
1957 &vsk->sk, target, copied, data_read,
1958 (struct vmci_transport_recv_notify_data *)data);
1959}
1960
1961static int vmci_transport_notify_send_init(
1962 struct vsock_sock *vsk,
1963 struct vsock_transport_send_notify_data *data)
1964{
1965 return vmci_trans(vsk)->notify_ops->send_init(
1966 &vsk->sk,
1967 (struct vmci_transport_send_notify_data *)data);
1968}
1969
1970static int vmci_transport_notify_send_pre_block(
1971 struct vsock_sock *vsk,
1972 struct vsock_transport_send_notify_data *data)
1973{
1974 return vmci_trans(vsk)->notify_ops->send_pre_block(
1975 &vsk->sk,
1976 (struct vmci_transport_send_notify_data *)data);
1977}
1978
1979static int vmci_transport_notify_send_pre_enqueue(
1980 struct vsock_sock *vsk,
1981 struct vsock_transport_send_notify_data *data)
1982{
1983 return vmci_trans(vsk)->notify_ops->send_pre_enqueue(
1984 &vsk->sk,
1985 (struct vmci_transport_send_notify_data *)data);
1986}
1987
1988static int vmci_transport_notify_send_post_enqueue(
1989 struct vsock_sock *vsk,
1990 ssize_t written,
1991 struct vsock_transport_send_notify_data *data)
1992{
1993 return vmci_trans(vsk)->notify_ops->send_post_enqueue(
1994 &vsk->sk, written,
1995 (struct vmci_transport_send_notify_data *)data);
1996}
1997
1998static bool vmci_transport_old_proto_override(bool *old_pkt_proto)
1999{
2000 if (PROTOCOL_OVERRIDE != -1) {
2001 if (PROTOCOL_OVERRIDE == 0)
2002 *old_pkt_proto = true;
2003 else
2004 *old_pkt_proto = false;
2005
2006 pr_info("Proto override in use\n");
2007 return true;
2008 }
2009
2010 return false;
2011}
2012
2013static bool vmci_transport_proto_to_notify_struct(struct sock *sk,
2014 u16 *proto,
2015 bool old_pkt_proto)
2016{
2017 struct vsock_sock *vsk = vsock_sk(sk);
2018
2019 if (old_pkt_proto) {
2020 if (*proto != VSOCK_PROTO_INVALID) {
2021 pr_err("Can't set both an old and new protocol\n");
2022 return false;
2023 }
2024 vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops;
2025 goto exit;
2026 }
2027
2028 switch (*proto) {
2029 case VSOCK_PROTO_PKT_ON_NOTIFY:
2030 vmci_trans(vsk)->notify_ops =
2031 &vmci_transport_notify_pkt_q_state_ops;
2032 break;
2033 default:
2034 pr_err("Unknown notify protocol version\n");
2035 return false;
2036 }
2037
2038exit:
2039 vmci_trans(vsk)->notify_ops->socket_init(sk);
2040 return true;
2041}
2042
2043static u16 vmci_transport_new_proto_supported_versions(void)
2044{
2045 if (PROTOCOL_OVERRIDE != -1)
2046 return PROTOCOL_OVERRIDE;
2047
2048 return VSOCK_PROTO_ALL_SUPPORTED;
2049}
2050
2051static u32 vmci_transport_get_local_cid(void)
2052{
2053 return vmci_get_context_id();
2054}
2055
2056static struct vsock_transport vmci_transport = {
2057 .init = vmci_transport_socket_init,
2058 .destruct = vmci_transport_destruct,
2059 .release = vmci_transport_release,
2060 .connect = vmci_transport_connect,
2061 .dgram_bind = vmci_transport_dgram_bind,
2062 .dgram_dequeue = vmci_transport_dgram_dequeue,
2063 .dgram_enqueue = vmci_transport_dgram_enqueue,
2064 .dgram_allow = vmci_transport_dgram_allow,
2065 .stream_dequeue = vmci_transport_stream_dequeue,
2066 .stream_enqueue = vmci_transport_stream_enqueue,
2067 .stream_has_data = vmci_transport_stream_has_data,
2068 .stream_has_space = vmci_transport_stream_has_space,
2069 .stream_rcvhiwat = vmci_transport_stream_rcvhiwat,
2070 .stream_is_active = vmci_transport_stream_is_active,
2071 .stream_allow = vmci_transport_stream_allow,
2072 .notify_poll_in = vmci_transport_notify_poll_in,
2073 .notify_poll_out = vmci_transport_notify_poll_out,
2074 .notify_recv_init = vmci_transport_notify_recv_init,
2075 .notify_recv_pre_block = vmci_transport_notify_recv_pre_block,
2076 .notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue,
2077 .notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue,
2078 .notify_send_init = vmci_transport_notify_send_init,
2079 .notify_send_pre_block = vmci_transport_notify_send_pre_block,
2080 .notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
2081 .notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
2082 .shutdown = vmci_transport_shutdown,
2083 .set_buffer_size = vmci_transport_set_buffer_size,
2084 .set_min_buffer_size = vmci_transport_set_min_buffer_size,
2085 .set_max_buffer_size = vmci_transport_set_max_buffer_size,
2086 .get_buffer_size = vmci_transport_get_buffer_size,
2087 .get_min_buffer_size = vmci_transport_get_min_buffer_size,
2088 .get_max_buffer_size = vmci_transport_get_max_buffer_size,
2089 .get_local_cid = vmci_transport_get_local_cid,
2090};
2091
2092static int __init vmci_transport_init(void)
2093{
2094 int err;
2095
2096 /* Create the datagram handle that we will use to send and receive all
2097 * VSocket control messages for this context.
2098 */
2099 err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID,
2100 VMCI_FLAG_ANYCID_DG_HND,
2101 vmci_transport_recv_stream_cb,
2102 NULL,
2103 &vmci_transport_stream_handle);
2104 if (err < VMCI_SUCCESS) {
2105 pr_err("Unable to create datagram handle. (%d)\n", err);
2106 return vmci_transport_error_to_vsock_error(err);
2107 }
2108
2109 err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
2110 vmci_transport_qp_resumed_cb,
2111 NULL, &vmci_transport_qp_resumed_sub_id);
2112 if (err < VMCI_SUCCESS) {
2113 pr_err("Unable to subscribe to resumed event. (%d)\n", err);
2114 err = vmci_transport_error_to_vsock_error(err);
2115 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2116 goto err_destroy_stream_handle;
2117 }
2118
2119 err = vsock_core_init(&vmci_transport);
2120 if (err < 0)
2121 goto err_unsubscribe;
2122
2123 return 0;
2124
2125err_unsubscribe:
2126 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2127err_destroy_stream_handle:
2128 vmci_datagram_destroy_handle(vmci_transport_stream_handle);
2129 return err;
2130}
2131module_init(vmci_transport_init);
2132
2133static void __exit vmci_transport_exit(void)
2134{
2135 if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
2136 if (vmci_datagram_destroy_handle(
2137 vmci_transport_stream_handle) != VMCI_SUCCESS)
2138 pr_err("Couldn't destroy datagram handle\n");
2139 vmci_transport_stream_handle = VMCI_INVALID_HANDLE;
2140 }
2141
2142 if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) {
2143 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2144 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2145 }
2146
2147 vsock_core_exit();
2148}
2149module_exit(vmci_transport_exit);
2150
2151MODULE_AUTHOR("VMware, Inc.");
2152MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2153MODULE_LICENSE("GPL v2");
2154MODULE_ALIAS("vmware_vsock");
2155MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
new file mode 100644
index 000000000000..1bf991803ec0
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport.h
@@ -0,0 +1,139 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _VMCI_TRANSPORT_H_
17#define _VMCI_TRANSPORT_H_
18
19#include <linux/vmw_vmci_defs.h>
20#include <linux/vmw_vmci_api.h>
21
22#include "vsock_addr.h"
23#include "af_vsock.h"
24
25/* If the packet format changes in a release then this should change too. */
26#define VMCI_TRANSPORT_PACKET_VERSION 1
27
28/* The resource ID on which control packets are sent. */
29#define VMCI_TRANSPORT_PACKET_RID 1
30
31#define VSOCK_PROTO_INVALID 0
32#define VSOCK_PROTO_PKT_ON_NOTIFY (1 << 0)
33#define VSOCK_PROTO_ALL_SUPPORTED (VSOCK_PROTO_PKT_ON_NOTIFY)
34
35#define vmci_trans(_vsk) ((struct vmci_transport *)((_vsk)->trans))
36
37enum vmci_transport_packet_type {
38 VMCI_TRANSPORT_PACKET_TYPE_INVALID = 0,
39 VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
40 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
41 VMCI_TRANSPORT_PACKET_TYPE_OFFER,
42 VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
43 VMCI_TRANSPORT_PACKET_TYPE_WROTE,
44 VMCI_TRANSPORT_PACKET_TYPE_READ,
45 VMCI_TRANSPORT_PACKET_TYPE_RST,
46 VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
47 VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
48 VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
49 VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
50 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
51 VMCI_TRANSPORT_PACKET_TYPE_MAX
52};
53
54struct vmci_transport_waiting_info {
55 u64 generation;
56 u64 offset;
57};
58
59/* Control packet type for STREAM sockets. DGRAMs have no control packets nor
60 * special packet header for data packets, they are just raw VMCI DGRAM
61 * messages. For STREAMs, control packets are sent over the control channel
62 * while data is written and read directly from queue pairs with no packet
63 * format.
64 */
65struct vmci_transport_packet {
66 struct vmci_datagram dg;
67 u8 version;
68 u8 type;
69 u16 proto;
70 u32 src_port;
71 u32 dst_port;
72 u32 _reserved2;
73 union {
74 u64 size;
75 u64 mode;
76 struct vmci_handle handle;
77 struct vmci_transport_waiting_info wait;
78 } u;
79};
80
81struct vmci_transport_notify_pkt {
82 u64 write_notify_window;
83 u64 write_notify_min_window;
84 bool peer_waiting_read;
85 bool peer_waiting_write;
86 bool peer_waiting_write_detected;
87 bool sent_waiting_read;
88 bool sent_waiting_write;
89 struct vmci_transport_waiting_info peer_waiting_read_info;
90 struct vmci_transport_waiting_info peer_waiting_write_info;
91 u64 produce_q_generation;
92 u64 consume_q_generation;
93};
94
95struct vmci_transport_notify_pkt_q_state {
96 u64 write_notify_window;
97 u64 write_notify_min_window;
98 bool peer_waiting_write;
99 bool peer_waiting_write_detected;
100};
101
102union vmci_transport_notify {
103 struct vmci_transport_notify_pkt pkt;
104 struct vmci_transport_notify_pkt_q_state pkt_q_state;
105};
106
107/* Our transport-specific data. */
108struct vmci_transport {
109 /* For DGRAMs. */
110 struct vmci_handle dg_handle;
111 /* For STREAMs. */
112 struct vmci_handle qp_handle;
113 struct vmci_qp *qpair;
114 u64 produce_size;
115 u64 consume_size;
116 u64 queue_pair_size;
117 u64 queue_pair_min_size;
118 u64 queue_pair_max_size;
119 u32 attach_sub_id;
120 u32 detach_sub_id;
121 union vmci_transport_notify notify;
122 struct vmci_transport_notify_ops *notify_ops;
123};
124
125int vmci_transport_register(void);
126void vmci_transport_unregister(void);
127
128int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
129 struct sockaddr_vm *src);
130int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
131 struct sockaddr_vm *src);
132int vmci_transport_send_wrote(struct sock *sk);
133int vmci_transport_send_read(struct sock *sk);
134int vmci_transport_send_waiting_write(struct sock *sk,
135 struct vmci_transport_waiting_info *wait);
136int vmci_transport_send_waiting_read(struct sock *sk,
137 struct vmci_transport_waiting_info *wait);
138
139#endif
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
new file mode 100644
index 000000000000..9a730744e7bc
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport_notify.c
@@ -0,0 +1,680 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/socket.h>
18#include <linux/stddef.h>
19#include <net/sock.h>
20
21#include "vmci_transport_notify.h"
22
23#define PKT_FIELD(vsk, field_name) (vmci_trans(vsk)->notify.pkt.field_name)
24
25static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
26{
27#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
28 bool retval;
29 u64 notify_limit;
30
31 if (!PKT_FIELD(vsk, peer_waiting_write))
32 return false;
33
34#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
35 /* When the sender blocks, we take that as a sign that the sender is
36 * faster than the receiver. To reduce the transmit rate of the sender,
37 * we delay the sending of the read notification by decreasing the
38 * write_notify_window. The notification is delayed until the number of
39 * bytes used in the queue drops below the write_notify_window.
40 */
41
42 if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
43 PKT_FIELD(vsk, peer_waiting_write_detected) = true;
44 if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
45 PKT_FIELD(vsk, write_notify_window) =
46 PKT_FIELD(vsk, write_notify_min_window);
47 } else {
48 PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
49 if (PKT_FIELD(vsk, write_notify_window) <
50 PKT_FIELD(vsk, write_notify_min_window))
51 PKT_FIELD(vsk, write_notify_window) =
52 PKT_FIELD(vsk, write_notify_min_window);
53
54 }
55 }
56 notify_limit = vmci_trans(vsk)->consume_size -
57 PKT_FIELD(vsk, write_notify_window);
58#else
59 notify_limit = 0;
60#endif
61
62 /* For now we ignore the wait information and just see if the free
63 * space exceeds the notify limit. Note that improving this function
64 * to be more intelligent will not require a protocol change and will
65 * retain compatibility between endpoints with mixed versions of this
66 * function.
67 *
68 * The notify_limit is used to delay notifications in the case where
69 * flow control is enabled. Below the test is expressed in terms of
70 * free space in the queue: if free_space > ConsumeSize -
71 * write_notify_window then notify An alternate way of expressing this
72 * is to rewrite the expression to use the data ready in the receive
73 * queue: if write_notify_window > bufferReady then notify as
74 * free_space == ConsumeSize - bufferReady.
75 */
76 retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
77 notify_limit;
78#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
79 if (retval) {
80 /*
81 * Once we notify the peer, we reset the detected flag so the
82 * next wait will again cause a decrease in the window size.
83 */
84
85 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
86 }
87#endif
88 return retval;
89#else
90 return true;
91#endif
92}
93
94static bool vmci_transport_notify_waiting_read(struct vsock_sock *vsk)
95{
96#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
97 if (!PKT_FIELD(vsk, peer_waiting_read))
98 return false;
99
100 /* For now we ignore the wait information and just see if there is any
101 * data for our peer to read. Note that improving this function to be
102 * more intelligent will not require a protocol change and will retain
103 * compatibility between endpoints with mixed versions of this
104 * function.
105 */
106 return vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) > 0;
107#else
108 return true;
109#endif
110}
111
112static void
113vmci_transport_handle_waiting_read(struct sock *sk,
114 struct vmci_transport_packet *pkt,
115 bool bottom_half,
116 struct sockaddr_vm *dst,
117 struct sockaddr_vm *src)
118{
119#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
120 struct vsock_sock *vsk;
121
122 vsk = vsock_sk(sk);
123
124 PKT_FIELD(vsk, peer_waiting_read) = true;
125 memcpy(&PKT_FIELD(vsk, peer_waiting_read_info), &pkt->u.wait,
126 sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
127
128 if (vmci_transport_notify_waiting_read(vsk)) {
129 bool sent;
130
131 if (bottom_half)
132 sent = vmci_transport_send_wrote_bh(dst, src) > 0;
133 else
134 sent = vmci_transport_send_wrote(sk) > 0;
135
136 if (sent)
137 PKT_FIELD(vsk, peer_waiting_read) = false;
138 }
139#endif
140}
141
142static void
143vmci_transport_handle_waiting_write(struct sock *sk,
144 struct vmci_transport_packet *pkt,
145 bool bottom_half,
146 struct sockaddr_vm *dst,
147 struct sockaddr_vm *src)
148{
149#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
150 struct vsock_sock *vsk;
151
152 vsk = vsock_sk(sk);
153
154 PKT_FIELD(vsk, peer_waiting_write) = true;
155 memcpy(&PKT_FIELD(vsk, peer_waiting_write_info), &pkt->u.wait,
156 sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
157
158 if (vmci_transport_notify_waiting_write(vsk)) {
159 bool sent;
160
161 if (bottom_half)
162 sent = vmci_transport_send_read_bh(dst, src) > 0;
163 else
164 sent = vmci_transport_send_read(sk) > 0;
165
166 if (sent)
167 PKT_FIELD(vsk, peer_waiting_write) = false;
168 }
169#endif
170}
171
172static void
173vmci_transport_handle_read(struct sock *sk,
174 struct vmci_transport_packet *pkt,
175 bool bottom_half,
176 struct sockaddr_vm *dst, struct sockaddr_vm *src)
177{
178#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
179 struct vsock_sock *vsk;
180
181 vsk = vsock_sk(sk);
182 PKT_FIELD(vsk, sent_waiting_write) = false;
183#endif
184
185 sk->sk_write_space(sk);
186}
187
188static bool send_waiting_read(struct sock *sk, u64 room_needed)
189{
190#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
191 struct vsock_sock *vsk;
192 struct vmci_transport_waiting_info waiting_info;
193 u64 tail;
194 u64 head;
195 u64 room_left;
196 bool ret;
197
198 vsk = vsock_sk(sk);
199
200 if (PKT_FIELD(vsk, sent_waiting_read))
201 return true;
202
203 if (PKT_FIELD(vsk, write_notify_window) <
204 vmci_trans(vsk)->consume_size)
205 PKT_FIELD(vsk, write_notify_window) =
206 min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
207 vmci_trans(vsk)->consume_size);
208
209 vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair, &tail, &head);
210 room_left = vmci_trans(vsk)->consume_size - head;
211 if (room_needed >= room_left) {
212 waiting_info.offset = room_needed - room_left;
213 waiting_info.generation =
214 PKT_FIELD(vsk, consume_q_generation) + 1;
215 } else {
216 waiting_info.offset = head + room_needed;
217 waiting_info.generation = PKT_FIELD(vsk, consume_q_generation);
218 }
219
220 ret = vmci_transport_send_waiting_read(sk, &waiting_info) > 0;
221 if (ret)
222 PKT_FIELD(vsk, sent_waiting_read) = true;
223
224 return ret;
225#else
226 return true;
227#endif
228}
229
230static bool send_waiting_write(struct sock *sk, u64 room_needed)
231{
232#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
233 struct vsock_sock *vsk;
234 struct vmci_transport_waiting_info waiting_info;
235 u64 tail;
236 u64 head;
237 u64 room_left;
238 bool ret;
239
240 vsk = vsock_sk(sk);
241
242 if (PKT_FIELD(vsk, sent_waiting_write))
243 return true;
244
245 vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair, &tail, &head);
246 room_left = vmci_trans(vsk)->produce_size - tail;
247 if (room_needed + 1 >= room_left) {
248 /* Wraps around to current generation. */
249 waiting_info.offset = room_needed + 1 - room_left;
250 waiting_info.generation = PKT_FIELD(vsk, produce_q_generation);
251 } else {
252 waiting_info.offset = tail + room_needed + 1;
253 waiting_info.generation =
254 PKT_FIELD(vsk, produce_q_generation) - 1;
255 }
256
257 ret = vmci_transport_send_waiting_write(sk, &waiting_info) > 0;
258 if (ret)
259 PKT_FIELD(vsk, sent_waiting_write) = true;
260
261 return ret;
262#else
263 return true;
264#endif
265}
266
267static int vmci_transport_send_read_notification(struct sock *sk)
268{
269 struct vsock_sock *vsk;
270 bool sent_read;
271 unsigned int retries;
272 int err;
273
274 vsk = vsock_sk(sk);
275 sent_read = false;
276 retries = 0;
277 err = 0;
278
279 if (vmci_transport_notify_waiting_write(vsk)) {
280 /* Notify the peer that we have read, retrying the send on
281 * failure up to our maximum value. XXX For now we just log
282 * the failure, but later we should schedule a work item to
283 * handle the resend until it succeeds. That would require
284 * keeping track of work items in the vsk and cleaning them up
285 * upon socket close.
286 */
287 while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
288 !sent_read &&
289 retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
290 err = vmci_transport_send_read(sk);
291 if (err >= 0)
292 sent_read = true;
293
294 retries++;
295 }
296
297 if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS)
298 pr_err("%p unable to send read notify to peer\n", sk);
299 else
300#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
301 PKT_FIELD(vsk, peer_waiting_write) = false;
302#endif
303
304 }
305 return err;
306}
307
308static void
309vmci_transport_handle_wrote(struct sock *sk,
310 struct vmci_transport_packet *pkt,
311 bool bottom_half,
312 struct sockaddr_vm *dst, struct sockaddr_vm *src)
313{
314#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
315 struct vsock_sock *vsk = vsock_sk(sk);
316 PKT_FIELD(vsk, sent_waiting_read) = false;
317#endif
318 sk->sk_data_ready(sk, 0);
319}
320
321static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
322{
323 struct vsock_sock *vsk = vsock_sk(sk);
324
325 PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
326 PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
327 PKT_FIELD(vsk, peer_waiting_read) = false;
328 PKT_FIELD(vsk, peer_waiting_write) = false;
329 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
330 PKT_FIELD(vsk, sent_waiting_read) = false;
331 PKT_FIELD(vsk, sent_waiting_write) = false;
332 PKT_FIELD(vsk, produce_q_generation) = 0;
333 PKT_FIELD(vsk, consume_q_generation) = 0;
334
335 memset(&PKT_FIELD(vsk, peer_waiting_read_info), 0,
336 sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
337 memset(&PKT_FIELD(vsk, peer_waiting_write_info), 0,
338 sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
339}
340
341static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
342{
343}
344
345static int
346vmci_transport_notify_pkt_poll_in(struct sock *sk,
347 size_t target, bool *data_ready_now)
348{
349 struct vsock_sock *vsk = vsock_sk(sk);
350
351 if (vsock_stream_has_data(vsk)) {
352 *data_ready_now = true;
353 } else {
354 /* We can't read right now because there is nothing in the
355 * queue. Ask for notifications when there is something to
356 * read.
357 */
358 if (sk->sk_state == SS_CONNECTED) {
359 if (!send_waiting_read(sk, 1))
360 return -1;
361
362 }
363 *data_ready_now = false;
364 }
365
366 return 0;
367}
368
369static int
370vmci_transport_notify_pkt_poll_out(struct sock *sk,
371 size_t target, bool *space_avail_now)
372{
373 s64 produce_q_free_space;
374 struct vsock_sock *vsk = vsock_sk(sk);
375
376 produce_q_free_space = vsock_stream_has_space(vsk);
377 if (produce_q_free_space > 0) {
378 *space_avail_now = true;
379 return 0;
380 } else if (produce_q_free_space == 0) {
381 /* This is a connected socket but we can't currently send data.
382 * Notify the peer that we are waiting if the queue is full. We
383 * only send a waiting write if the queue is full because
384 * otherwise we end up in an infinite WAITING_WRITE, READ,
385 * WAITING_WRITE, READ, etc. loop. Treat failing to send the
386 * notification as a socket error, passing that back through
387 * the mask.
388 */
389 if (!send_waiting_write(sk, 1))
390 return -1;
391
392 *space_avail_now = false;
393 }
394
395 return 0;
396}
397
398static int
399vmci_transport_notify_pkt_recv_init(
400 struct sock *sk,
401 size_t target,
402 struct vmci_transport_recv_notify_data *data)
403{
404 struct vsock_sock *vsk = vsock_sk(sk);
405
406#ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
407 data->consume_head = 0;
408 data->produce_tail = 0;
409#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
410 data->notify_on_block = false;
411
412 if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
413 PKT_FIELD(vsk, write_notify_min_window) = target + 1;
414 if (PKT_FIELD(vsk, write_notify_window) <
415 PKT_FIELD(vsk, write_notify_min_window)) {
416 /* If the current window is smaller than the new
417 * minimal window size, we need to reevaluate whether
418 * we need to notify the sender. If the number of ready
419 * bytes are smaller than the new window, we need to
420 * send a notification to the sender before we block.
421 */
422
423 PKT_FIELD(vsk, write_notify_window) =
424 PKT_FIELD(vsk, write_notify_min_window);
425 data->notify_on_block = true;
426 }
427 }
428#endif
429#endif
430
431 return 0;
432}
433
434static int
435vmci_transport_notify_pkt_recv_pre_block(
436 struct sock *sk,
437 size_t target,
438 struct vmci_transport_recv_notify_data *data)
439{
440 int err = 0;
441
442 /* Notify our peer that we are waiting for data to read. */
443 if (!send_waiting_read(sk, target)) {
444 err = -EHOSTUNREACH;
445 return err;
446 }
447#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
448 if (data->notify_on_block) {
449 err = vmci_transport_send_read_notification(sk);
450 if (err < 0)
451 return err;
452
453 data->notify_on_block = false;
454 }
455#endif
456
457 return err;
458}
459
460static int
461vmci_transport_notify_pkt_recv_pre_dequeue(
462 struct sock *sk,
463 size_t target,
464 struct vmci_transport_recv_notify_data *data)
465{
466 struct vsock_sock *vsk = vsock_sk(sk);
467
468 /* Now consume up to len bytes from the queue. Note that since we have
469 * the socket locked we should copy at least ready bytes.
470 */
471#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
472 vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair,
473 &data->produce_tail,
474 &data->consume_head);
475#endif
476
477 return 0;
478}
479
480static int
481vmci_transport_notify_pkt_recv_post_dequeue(
482 struct sock *sk,
483 size_t target,
484 ssize_t copied,
485 bool data_read,
486 struct vmci_transport_recv_notify_data *data)
487{
488 struct vsock_sock *vsk;
489 int err;
490
491 vsk = vsock_sk(sk);
492 err = 0;
493
494 if (data_read) {
495#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
496 /* Detect a wrap-around to maintain queue generation. Note
497 * that this is safe since we hold the socket lock across the
498 * two queue pair operations.
499 */
500 if (copied >=
501 vmci_trans(vsk)->consume_size - data->consume_head)
502 PKT_FIELD(vsk, consume_q_generation)++;
503#endif
504
505 err = vmci_transport_send_read_notification(sk);
506 if (err < 0)
507 return err;
508
509 }
510 return err;
511}
512
513static int
514vmci_transport_notify_pkt_send_init(
515 struct sock *sk,
516 struct vmci_transport_send_notify_data *data)
517{
518#ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
519 data->consume_head = 0;
520 data->produce_tail = 0;
521#endif
522
523 return 0;
524}
525
526static int
527vmci_transport_notify_pkt_send_pre_block(
528 struct sock *sk,
529 struct vmci_transport_send_notify_data *data)
530{
531 /* Notify our peer that we are waiting for room to write. */
532 if (!send_waiting_write(sk, 1))
533 return -EHOSTUNREACH;
534
535 return 0;
536}
537
538static int
539vmci_transport_notify_pkt_send_pre_enqueue(
540 struct sock *sk,
541 struct vmci_transport_send_notify_data *data)
542{
543 struct vsock_sock *vsk = vsock_sk(sk);
544
545#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
546 vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair,
547 &data->produce_tail,
548 &data->consume_head);
549#endif
550
551 return 0;
552}
553
554static int
555vmci_transport_notify_pkt_send_post_enqueue(
556 struct sock *sk,
557 ssize_t written,
558 struct vmci_transport_send_notify_data *data)
559{
560 int err = 0;
561 struct vsock_sock *vsk;
562 bool sent_wrote = false;
563 int retries = 0;
564
565 vsk = vsock_sk(sk);
566
567#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
568 /* Detect a wrap-around to maintain queue generation. Note that this
569 * is safe since we hold the socket lock across the two queue pair
570 * operations.
571 */
572 if (written >= vmci_trans(vsk)->produce_size - data->produce_tail)
573 PKT_FIELD(vsk, produce_q_generation)++;
574
575#endif
576
577 if (vmci_transport_notify_waiting_read(vsk)) {
578 /* Notify the peer that we have written, retrying the send on
579 * failure up to our maximum value. See the XXX comment for the
580 * corresponding piece of code in StreamRecvmsg() for potential
581 * improvements.
582 */
583 while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
584 !sent_wrote &&
585 retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
586 err = vmci_transport_send_wrote(sk);
587 if (err >= 0)
588 sent_wrote = true;
589
590 retries++;
591 }
592
593 if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
594 pr_err("%p unable to send wrote notify to peer\n", sk);
595 return err;
596 } else {
597#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
598 PKT_FIELD(vsk, peer_waiting_read) = false;
599#endif
600 }
601 }
602 return err;
603}
604
605static void
606vmci_transport_notify_pkt_handle_pkt(
607 struct sock *sk,
608 struct vmci_transport_packet *pkt,
609 bool bottom_half,
610 struct sockaddr_vm *dst,
611 struct sockaddr_vm *src, bool *pkt_processed)
612{
613 bool processed = false;
614
615 switch (pkt->type) {
616 case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
617 vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
618 processed = true;
619 break;
620 case VMCI_TRANSPORT_PACKET_TYPE_READ:
621 vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
622 processed = true;
623 break;
624 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
625 vmci_transport_handle_waiting_write(sk, pkt, bottom_half,
626 dst, src);
627 processed = true;
628 break;
629
630 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
631 vmci_transport_handle_waiting_read(sk, pkt, bottom_half,
632 dst, src);
633 processed = true;
634 break;
635 }
636
637 if (pkt_processed)
638 *pkt_processed = processed;
639}
640
641static void vmci_transport_notify_pkt_process_request(struct sock *sk)
642{
643 struct vsock_sock *vsk = vsock_sk(sk);
644
645 PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
646 if (vmci_trans(vsk)->consume_size <
647 PKT_FIELD(vsk, write_notify_min_window))
648 PKT_FIELD(vsk, write_notify_min_window) =
649 vmci_trans(vsk)->consume_size;
650}
651
652static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
653{
654 struct vsock_sock *vsk = vsock_sk(sk);
655
656 PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
657 if (vmci_trans(vsk)->consume_size <
658 PKT_FIELD(vsk, write_notify_min_window))
659 PKT_FIELD(vsk, write_notify_min_window) =
660 vmci_trans(vsk)->consume_size;
661}
662
663/* Socket control packet based operations. */
664struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
665 vmci_transport_notify_pkt_socket_init,
666 vmci_transport_notify_pkt_socket_destruct,
667 vmci_transport_notify_pkt_poll_in,
668 vmci_transport_notify_pkt_poll_out,
669 vmci_transport_notify_pkt_handle_pkt,
670 vmci_transport_notify_pkt_recv_init,
671 vmci_transport_notify_pkt_recv_pre_block,
672 vmci_transport_notify_pkt_recv_pre_dequeue,
673 vmci_transport_notify_pkt_recv_post_dequeue,
674 vmci_transport_notify_pkt_send_init,
675 vmci_transport_notify_pkt_send_pre_block,
676 vmci_transport_notify_pkt_send_pre_enqueue,
677 vmci_transport_notify_pkt_send_post_enqueue,
678 vmci_transport_notify_pkt_process_request,
679 vmci_transport_notify_pkt_process_negotiate,
680};
diff --git a/net/vmw_vsock/vmci_transport_notify.h b/net/vmw_vsock/vmci_transport_notify.h
new file mode 100644
index 000000000000..7df793249b6c
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport_notify.h
@@ -0,0 +1,83 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __VMCI_TRANSPORT_NOTIFY_H__
17#define __VMCI_TRANSPORT_NOTIFY_H__
18
19#include <linux/types.h>
20#include <linux/vmw_vmci_defs.h>
21#include <linux/vmw_vmci_api.h>
22#include <linux/vm_sockets.h>
23
24#include "vmci_transport.h"
25
26/* Comment this out to compare with old protocol. */
27#define VSOCK_OPTIMIZATION_WAITING_NOTIFY 1
28#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
29/* Comment this out to remove flow control for "new" protocol */
30#define VSOCK_OPTIMIZATION_FLOW_CONTROL 1
31#endif
32
33#define VMCI_TRANSPORT_MAX_DGRAM_RESENDS 10
34
35struct vmci_transport_recv_notify_data {
36 u64 consume_head;
37 u64 produce_tail;
38 bool notify_on_block;
39};
40
41struct vmci_transport_send_notify_data {
42 u64 consume_head;
43 u64 produce_tail;
44};
45
46/* Socket notification callbacks. */
47struct vmci_transport_notify_ops {
48 void (*socket_init) (struct sock *sk);
49 void (*socket_destruct) (struct vsock_sock *vsk);
50 int (*poll_in) (struct sock *sk, size_t target,
51 bool *data_ready_now);
52 int (*poll_out) (struct sock *sk, size_t target,
53 bool *space_avail_now);
54 void (*handle_notify_pkt) (struct sock *sk,
55 struct vmci_transport_packet *pkt,
56 bool bottom_half, struct sockaddr_vm *dst,
57 struct sockaddr_vm *src,
58 bool *pkt_processed);
59 int (*recv_init) (struct sock *sk, size_t target,
60 struct vmci_transport_recv_notify_data *data);
61 int (*recv_pre_block) (struct sock *sk, size_t target,
62 struct vmci_transport_recv_notify_data *data);
63 int (*recv_pre_dequeue) (struct sock *sk, size_t target,
64 struct vmci_transport_recv_notify_data *data);
65 int (*recv_post_dequeue) (struct sock *sk, size_t target,
66 ssize_t copied, bool data_read,
67 struct vmci_transport_recv_notify_data *data);
68 int (*send_init) (struct sock *sk,
69 struct vmci_transport_send_notify_data *data);
70 int (*send_pre_block) (struct sock *sk,
71 struct vmci_transport_send_notify_data *data);
72 int (*send_pre_enqueue) (struct sock *sk,
73 struct vmci_transport_send_notify_data *data);
74 int (*send_post_enqueue) (struct sock *sk, ssize_t written,
75 struct vmci_transport_send_notify_data *data);
76 void (*process_request) (struct sock *sk);
77 void (*process_negotiate) (struct sock *sk);
78};
79
80extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops;
81extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops;
82
83#endif /* __VMCI_TRANSPORT_NOTIFY_H__ */
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
new file mode 100644
index 000000000000..622bd7aa1016
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
@@ -0,0 +1,438 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/socket.h>
18#include <linux/stddef.h>
19#include <net/sock.h>
20
21#include "vmci_transport_notify.h"
22
23#define PKT_FIELD(vsk, field_name) \
24 (vmci_trans(vsk)->notify.pkt_q_state.field_name)
25
26static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
27{
28 bool retval;
29 u64 notify_limit;
30
31 if (!PKT_FIELD(vsk, peer_waiting_write))
32 return false;
33
34 /* When the sender blocks, we take that as a sign that the sender is
35 * faster than the receiver. To reduce the transmit rate of the sender,
36 * we delay the sending of the read notification by decreasing the
37 * write_notify_window. The notification is delayed until the number of
38 * bytes used in the queue drops below the write_notify_window.
39 */
40
41 if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
42 PKT_FIELD(vsk, peer_waiting_write_detected) = true;
43 if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
44 PKT_FIELD(vsk, write_notify_window) =
45 PKT_FIELD(vsk, write_notify_min_window);
46 } else {
47 PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
48 if (PKT_FIELD(vsk, write_notify_window) <
49 PKT_FIELD(vsk, write_notify_min_window))
50 PKT_FIELD(vsk, write_notify_window) =
51 PKT_FIELD(vsk, write_notify_min_window);
52
53 }
54 }
55 notify_limit = vmci_trans(vsk)->consume_size -
56 PKT_FIELD(vsk, write_notify_window);
57
58 /* The notify_limit is used to delay notifications in the case where
59 * flow control is enabled. Below the test is expressed in terms of
60 * free space in the queue: if free_space > ConsumeSize -
61 * write_notify_window then notify An alternate way of expressing this
62 * is to rewrite the expression to use the data ready in the receive
63 * queue: if write_notify_window > bufferReady then notify as
64 * free_space == ConsumeSize - bufferReady.
65 */
66
67 retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
68 notify_limit;
69
70 if (retval) {
71 /* Once we notify the peer, we reset the detected flag so the
72 * next wait will again cause a decrease in the window size.
73 */
74
75 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
76 }
77 return retval;
78}
79
80static void
81vmci_transport_handle_read(struct sock *sk,
82 struct vmci_transport_packet *pkt,
83 bool bottom_half,
84 struct sockaddr_vm *dst, struct sockaddr_vm *src)
85{
86 sk->sk_write_space(sk);
87}
88
89static void
90vmci_transport_handle_wrote(struct sock *sk,
91 struct vmci_transport_packet *pkt,
92 bool bottom_half,
93 struct sockaddr_vm *dst, struct sockaddr_vm *src)
94{
95 sk->sk_data_ready(sk, 0);
96}
97
98static void vsock_block_update_write_window(struct sock *sk)
99{
100 struct vsock_sock *vsk = vsock_sk(sk);
101
102 if (PKT_FIELD(vsk, write_notify_window) < vmci_trans(vsk)->consume_size)
103 PKT_FIELD(vsk, write_notify_window) =
104 min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
105 vmci_trans(vsk)->consume_size);
106}
107
108static int vmci_transport_send_read_notification(struct sock *sk)
109{
110 struct vsock_sock *vsk;
111 bool sent_read;
112 unsigned int retries;
113 int err;
114
115 vsk = vsock_sk(sk);
116 sent_read = false;
117 retries = 0;
118 err = 0;
119
120 if (vmci_transport_notify_waiting_write(vsk)) {
121 /* Notify the peer that we have read, retrying the send on
122 * failure up to our maximum value. XXX For now we just log
123 * the failure, but later we should schedule a work item to
124 * handle the resend until it succeeds. That would require
125 * keeping track of work items in the vsk and cleaning them up
126 * upon socket close.
127 */
128 while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
129 !sent_read &&
130 retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
131 err = vmci_transport_send_read(sk);
132 if (err >= 0)
133 sent_read = true;
134
135 retries++;
136 }
137
138 if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_read)
139 pr_err("%p unable to send read notification to peer\n",
140 sk);
141 else
142 PKT_FIELD(vsk, peer_waiting_write) = false;
143
144 }
145 return err;
146}
147
148static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
149{
150 struct vsock_sock *vsk = vsock_sk(sk);
151
152 PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
153 PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
154 PKT_FIELD(vsk, peer_waiting_write) = false;
155 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
156}
157
158static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
159{
160 PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
161 PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
162 PKT_FIELD(vsk, peer_waiting_write) = false;
163 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
164}
165
166static int
167vmci_transport_notify_pkt_poll_in(struct sock *sk,
168 size_t target, bool *data_ready_now)
169{
170 struct vsock_sock *vsk = vsock_sk(sk);
171
172 if (vsock_stream_has_data(vsk)) {
173 *data_ready_now = true;
174 } else {
175 /* We can't read right now because there is nothing in the
176 * queue. Ask for notifications when there is something to
177 * read.
178 */
179 if (sk->sk_state == SS_CONNECTED)
180 vsock_block_update_write_window(sk);
181 *data_ready_now = false;
182 }
183
184 return 0;
185}
186
187static int
188vmci_transport_notify_pkt_poll_out(struct sock *sk,
189 size_t target, bool *space_avail_now)
190{
191 s64 produce_q_free_space;
192 struct vsock_sock *vsk = vsock_sk(sk);
193
194 produce_q_free_space = vsock_stream_has_space(vsk);
195 if (produce_q_free_space > 0) {
196 *space_avail_now = true;
197 return 0;
198 } else if (produce_q_free_space == 0) {
199 /* This is a connected socket but we can't currently send data.
200 * Nothing else to do.
201 */
202 *space_avail_now = false;
203 }
204
205 return 0;
206}
207
208static int
209vmci_transport_notify_pkt_recv_init(
210 struct sock *sk,
211 size_t target,
212 struct vmci_transport_recv_notify_data *data)
213{
214 struct vsock_sock *vsk = vsock_sk(sk);
215
216 data->consume_head = 0;
217 data->produce_tail = 0;
218 data->notify_on_block = false;
219
220 if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
221 PKT_FIELD(vsk, write_notify_min_window) = target + 1;
222 if (PKT_FIELD(vsk, write_notify_window) <
223 PKT_FIELD(vsk, write_notify_min_window)) {
224 /* If the current window is smaller than the new
225 * minimal window size, we need to reevaluate whether
226 * we need to notify the sender. If the number of ready
227 * bytes are smaller than the new window, we need to
228 * send a notification to the sender before we block.
229 */
230
231 PKT_FIELD(vsk, write_notify_window) =
232 PKT_FIELD(vsk, write_notify_min_window);
233 data->notify_on_block = true;
234 }
235 }
236
237 return 0;
238}
239
240static int
241vmci_transport_notify_pkt_recv_pre_block(
242 struct sock *sk,
243 size_t target,
244 struct vmci_transport_recv_notify_data *data)
245{
246 int err = 0;
247
248 vsock_block_update_write_window(sk);
249
250 if (data->notify_on_block) {
251 err = vmci_transport_send_read_notification(sk);
252 if (err < 0)
253 return err;
254 data->notify_on_block = false;
255 }
256
257 return err;
258}
259
260static int
261vmci_transport_notify_pkt_recv_post_dequeue(
262 struct sock *sk,
263 size_t target,
264 ssize_t copied,
265 bool data_read,
266 struct vmci_transport_recv_notify_data *data)
267{
268 struct vsock_sock *vsk;
269 int err;
270 bool was_full = false;
271 u64 free_space;
272
273 vsk = vsock_sk(sk);
274 err = 0;
275
276 if (data_read) {
277 smp_mb();
278
279 free_space =
280 vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair);
281 was_full = free_space == copied;
282
283 if (was_full)
284 PKT_FIELD(vsk, peer_waiting_write) = true;
285
286 err = vmci_transport_send_read_notification(sk);
287 if (err < 0)
288 return err;
289
290 /* See the comment in
291 * vmci_transport_notify_pkt_send_post_enqueue().
292 */
293 sk->sk_data_ready(sk, 0);
294 }
295
296 return err;
297}
298
299static int
300vmci_transport_notify_pkt_send_init(
301 struct sock *sk,
302 struct vmci_transport_send_notify_data *data)
303{
304 data->consume_head = 0;
305 data->produce_tail = 0;
306
307 return 0;
308}
309
310static int
311vmci_transport_notify_pkt_send_post_enqueue(
312 struct sock *sk,
313 ssize_t written,
314 struct vmci_transport_send_notify_data *data)
315{
316 int err = 0;
317 struct vsock_sock *vsk;
318 bool sent_wrote = false;
319 bool was_empty;
320 int retries = 0;
321
322 vsk = vsock_sk(sk);
323
324 smp_mb();
325
326 was_empty =
327 vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) == written;
328 if (was_empty) {
329 while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
330 !sent_wrote &&
331 retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
332 err = vmci_transport_send_wrote(sk);
333 if (err >= 0)
334 sent_wrote = true;
335
336 retries++;
337 }
338 }
339
340 if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_wrote) {
341 pr_err("%p unable to send wrote notification to peer\n",
342 sk);
343 return err;
344 }
345
346 return err;
347}
348
349static void
350vmci_transport_notify_pkt_handle_pkt(
351 struct sock *sk,
352 struct vmci_transport_packet *pkt,
353 bool bottom_half,
354 struct sockaddr_vm *dst,
355 struct sockaddr_vm *src, bool *pkt_processed)
356{
357 bool processed = false;
358
359 switch (pkt->type) {
360 case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
361 vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
362 processed = true;
363 break;
364 case VMCI_TRANSPORT_PACKET_TYPE_READ:
365 vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
366 processed = true;
367 break;
368 }
369
370 if (pkt_processed)
371 *pkt_processed = processed;
372}
373
374static void vmci_transport_notify_pkt_process_request(struct sock *sk)
375{
376 struct vsock_sock *vsk = vsock_sk(sk);
377
378 PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
379 if (vmci_trans(vsk)->consume_size <
380 PKT_FIELD(vsk, write_notify_min_window))
381 PKT_FIELD(vsk, write_notify_min_window) =
382 vmci_trans(vsk)->consume_size;
383}
384
385static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
386{
387 struct vsock_sock *vsk = vsock_sk(sk);
388
389 PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
390 if (vmci_trans(vsk)->consume_size <
391 PKT_FIELD(vsk, write_notify_min_window))
392 PKT_FIELD(vsk, write_notify_min_window) =
393 vmci_trans(vsk)->consume_size;
394}
395
396static int
397vmci_transport_notify_pkt_recv_pre_dequeue(
398 struct sock *sk,
399 size_t target,
400 struct vmci_transport_recv_notify_data *data)
401{
402 return 0; /* NOP for QState. */
403}
404
405static int
406vmci_transport_notify_pkt_send_pre_block(
407 struct sock *sk,
408 struct vmci_transport_send_notify_data *data)
409{
410 return 0; /* NOP for QState. */
411}
412
413static int
414vmci_transport_notify_pkt_send_pre_enqueue(
415 struct sock *sk,
416 struct vmci_transport_send_notify_data *data)
417{
418 return 0; /* NOP for QState. */
419}
420
421/* Socket always on control packet based operations. */
422struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
423 vmci_transport_notify_pkt_socket_init,
424 vmci_transport_notify_pkt_socket_destruct,
425 vmci_transport_notify_pkt_poll_in,
426 vmci_transport_notify_pkt_poll_out,
427 vmci_transport_notify_pkt_handle_pkt,
428 vmci_transport_notify_pkt_recv_init,
429 vmci_transport_notify_pkt_recv_pre_block,
430 vmci_transport_notify_pkt_recv_pre_dequeue,
431 vmci_transport_notify_pkt_recv_post_dequeue,
432 vmci_transport_notify_pkt_send_init,
433 vmci_transport_notify_pkt_send_pre_block,
434 vmci_transport_notify_pkt_send_pre_enqueue,
435 vmci_transport_notify_pkt_send_post_enqueue,
436 vmci_transport_notify_pkt_process_request,
437 vmci_transport_notify_pkt_process_negotiate,
438};
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
new file mode 100644
index 000000000000..b7df1aea7c59
--- /dev/null
+++ b/net/vmw_vsock/vsock_addr.c
@@ -0,0 +1,86 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/socket.h>
18#include <linux/stddef.h>
19#include <net/sock.h>
20
21#include "vsock_addr.h"
22
23void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port)
24{
25 memset(addr, 0, sizeof(*addr));
26 addr->svm_family = AF_VSOCK;
27 addr->svm_cid = cid;
28 addr->svm_port = port;
29}
30EXPORT_SYMBOL_GPL(vsock_addr_init);
31
32int vsock_addr_validate(const struct sockaddr_vm *addr)
33{
34 if (!addr)
35 return -EFAULT;
36
37 if (addr->svm_family != AF_VSOCK)
38 return -EAFNOSUPPORT;
39
40 if (addr->svm_zero[0] != 0)
41 return -EINVAL;
42
43 return 0;
44}
45EXPORT_SYMBOL_GPL(vsock_addr_validate);
46
47bool vsock_addr_bound(const struct sockaddr_vm *addr)
48{
49 return addr->svm_port != VMADDR_PORT_ANY;
50}
51EXPORT_SYMBOL_GPL(vsock_addr_bound);
52
53void vsock_addr_unbind(struct sockaddr_vm *addr)
54{
55 vsock_addr_init(addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
56}
57EXPORT_SYMBOL_GPL(vsock_addr_unbind);
58
59bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
60 const struct sockaddr_vm *other)
61{
62 return addr->svm_cid == other->svm_cid &&
63 addr->svm_port == other->svm_port;
64}
65EXPORT_SYMBOL_GPL(vsock_addr_equals_addr);
66
67bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
68 const struct sockaddr_vm *other)
69{
70 return (addr->svm_cid == VMADDR_CID_ANY ||
71 other->svm_cid == VMADDR_CID_ANY ||
72 addr->svm_cid == other->svm_cid) &&
73 addr->svm_port == other->svm_port;
74}
75EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any);
76
77int vsock_addr_cast(const struct sockaddr *addr,
78 size_t len, struct sockaddr_vm **out_addr)
79{
80 if (len < sizeof(**out_addr))
81 return -EFAULT;
82
83 *out_addr = (struct sockaddr_vm *)addr;
84 return vsock_addr_validate(*out_addr);
85}
86EXPORT_SYMBOL_GPL(vsock_addr_cast);
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h
new file mode 100644
index 000000000000..cdfbcefdf843
--- /dev/null
+++ b/net/vmw_vsock/vsock_addr.h
@@ -0,0 +1,32 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _VSOCK_ADDR_H_
17#define _VSOCK_ADDR_H_
18
19#include <linux/vm_sockets.h>
20
21void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
22int vsock_addr_validate(const struct sockaddr_vm *addr);
23bool vsock_addr_bound(const struct sockaddr_vm *addr);
24void vsock_addr_unbind(struct sockaddr_vm *addr);
25bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
26 const struct sockaddr_vm *other);
27bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
28 const struct sockaddr_vm *other);
29int vsock_addr_cast(const struct sockaddr *addr, size_t len,
30 struct sockaddr_vm **out_addr);
31
32#endif
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig
deleted file mode 100644
index a157a2e64e18..000000000000
--- a/net/wanrouter/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
1#
2# Configuration for WAN router
3#
4
5config WAN_ROUTER
6 tristate "WAN router (DEPRECATED)"
7 depends on EXPERIMENTAL
8 ---help---
9 Wide Area Networks (WANs), such as X.25, frame relay and leased
10 lines, are used to interconnect Local Area Networks (LANs) over vast
11 distances with data transfer rates significantly higher than those
12 achievable with commonly used asynchronous modem connections.
13 Usually, a quite expensive external device called a `WAN router' is
14 needed to connect to a WAN.
15
16 As an alternative, WAN routing can be built into the Linux kernel.
17 With relatively inexpensive WAN interface cards available on the
18 market, a perfectly usable router can be built for less than half
19 the price of an external router. If you have one of those cards and
20 wish to use your Linux box as a WAN router, say Y here and also to
21 the WAN driver for your card, below. You will then need the
22 wan-tools package which is available from <ftp://ftp.sangoma.com/>.
23
24 To compile WAN routing support as a module, choose M here: the
25 module will be called wanrouter.
26
27 If unsure, say N.
diff --git a/net/wanrouter/Makefile b/net/wanrouter/Makefile
deleted file mode 100644
index 4da14bc48078..000000000000
--- a/net/wanrouter/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for the Linux WAN router layer.
3#
4
5obj-$(CONFIG_WAN_ROUTER) += wanrouter.o
6
7wanrouter-y := wanproc.o wanmain.o
diff --git a/net/wanrouter/patchlevel b/net/wanrouter/patchlevel
deleted file mode 100644
index c043eea7767e..000000000000
--- a/net/wanrouter/patchlevel
+++ /dev/null
@@ -1 +0,0 @@
12.2.1
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
deleted file mode 100644
index 2ab785064b7e..000000000000
--- a/net/wanrouter/wanmain.c
+++ /dev/null
@@ -1,782 +0,0 @@
1/*****************************************************************************
2* wanmain.c WAN Multiprotocol Router Module. Main code.
3*
4* This module is completely hardware-independent and provides
5* the following common services for the WAN Link Drivers:
6* o WAN device management (registering, unregistering)
7* o Network interface management
8* o Physical connection management (dial-up, incoming calls)
9* o Logical connection management (switched virtual circuits)
10* o Protocol encapsulation/decapsulation
11*
12* Author: Gideon Hack
13*
14* Copyright: (c) 1995-1999 Sangoma Technologies Inc.
15*
16* This program is free software; you can redistribute it and/or
17* modify it under the terms of the GNU General Public License
18* as published by the Free Software Foundation; either version
19* 2 of the License, or (at your option) any later version.
20* ============================================================================
21* Nov 24, 2000 Nenad Corbic Updated for 2.4.X kernels
22* Nov 07, 2000 Nenad Corbic Fixed the Mulit-Port PPP for kernels 2.2.16 and
23* greater.
24* Aug 2, 2000 Nenad Corbic Block the Multi-Port PPP from running on
25* kernels 2.2.16 or greater. The SyncPPP
26* has changed.
27* Jul 13, 2000 Nenad Corbic Added SyncPPP support
28* Added extra debugging in device_setup().
29* Oct 01, 1999 Gideon Hack Update for s514 PCI card
30* Dec 27, 1996 Gene Kozin Initial version (based on Sangoma's WANPIPE)
31* Jan 16, 1997 Gene Kozin router_devlist made public
32* Jan 31, 1997 Alan Cox Hacked it about a bit for 2.1
33* Jun 27, 1997 Alan Cox realigned with vendor code
34* Oct 15, 1997 Farhan Thawar changed wan_encapsulate to add a pad byte of 0
35* Apr 20, 1998 Alan Cox Fixed 2.1 symbols
36* May 17, 1998 K. Baranowski Fixed SNAP encapsulation in wan_encapsulate
37* Dec 15, 1998 Arnaldo Melo support for firmwares of up to 128000 bytes
38* check wandev->setup return value
39* Dec 22, 1998 Arnaldo Melo vmalloc/vfree used in device_setup to allocate
40* kernel memory and copy configuration data to
41* kernel space (for big firmwares)
42* Jun 02, 1999 Gideon Hack Updates for Linux 2.0.X and 2.2.X kernels.
43*****************************************************************************/
44
45#include <linux/stddef.h> /* offsetof(), etc. */
46#include <linux/capability.h>
47#include <linux/errno.h> /* return codes */
48#include <linux/kernel.h>
49#include <linux/module.h> /* support for loadable modules */
50#include <linux/slab.h> /* kmalloc(), kfree() */
51#include <linux/mutex.h>
52#include <linux/mm.h>
53#include <linux/string.h> /* inline mem*, str* functions */
54
55#include <asm/byteorder.h> /* htons(), etc. */
56#include <linux/wanrouter.h> /* WAN router API definitions */
57
58#include <linux/vmalloc.h> /* vmalloc, vfree */
59#include <asm/uaccess.h> /* copy_to/from_user */
60#include <linux/init.h> /* __initfunc et al. */
61
62#define DEV_TO_SLAVE(dev) (*((struct net_device **)netdev_priv(dev)))
63
64/*
65 * Function Prototypes
66 */
67
68/*
69 * WAN device IOCTL handlers
70 */
71
72static DEFINE_MUTEX(wanrouter_mutex);
73static int wanrouter_device_setup(struct wan_device *wandev,
74 wandev_conf_t __user *u_conf);
75static int wanrouter_device_stat(struct wan_device *wandev,
76 wandev_stat_t __user *u_stat);
77static int wanrouter_device_shutdown(struct wan_device *wandev);
78static int wanrouter_device_new_if(struct wan_device *wandev,
79 wanif_conf_t __user *u_conf);
80static int wanrouter_device_del_if(struct wan_device *wandev,
81 char __user *u_name);
82
83/*
84 * Miscellaneous
85 */
86
87static struct wan_device *wanrouter_find_device(char *name);
88static int wanrouter_delete_interface(struct wan_device *wandev, char *name);
89static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
90 __acquires(lock);
91static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
92 __releases(lock);
93
94
95
96/*
97 * Global Data
98 */
99
100static char wanrouter_fullname[] = "Sangoma WANPIPE Router";
101static char wanrouter_copyright[] = "(c) 1995-2000 Sangoma Technologies Inc.";
102static char wanrouter_modname[] = ROUTER_NAME; /* short module name */
103struct wan_device* wanrouter_router_devlist; /* list of registered devices */
104
105/*
106 * Organize Unique Identifiers for encapsulation/decapsulation
107 */
108
109#if 0
110static unsigned char wanrouter_oui_ether[] = { 0x00, 0x00, 0x00 };
111static unsigned char wanrouter_oui_802_2[] = { 0x00, 0x80, 0xC2 };
112#endif
113
114static int __init wanrouter_init(void)
115{
116 int err;
117
118 printk(KERN_INFO "%s v%u.%u %s\n",
119 wanrouter_fullname, ROUTER_VERSION, ROUTER_RELEASE,
120 wanrouter_copyright);
121
122 err = wanrouter_proc_init();
123 if (err)
124 printk(KERN_INFO "%s: can't create entry in proc filesystem!\n",
125 wanrouter_modname);
126
127 return err;
128}
129
130static void __exit wanrouter_cleanup (void)
131{
132 wanrouter_proc_cleanup();
133}
134
135/*
136 * This is just plain dumb. We should move the bugger to drivers/net/wan,
137 * slap it first in directory and make it module_init(). The only reason
138 * for subsys_initcall() here is that net goes after drivers (why, BTW?)
139 */
140subsys_initcall(wanrouter_init);
141module_exit(wanrouter_cleanup);
142
143/*
144 * Kernel APIs
145 */
146
147/*
148 * Register WAN device.
149 * o verify device credentials
150 * o create an entry for the device in the /proc/net/router directory
151 * o initialize internally maintained fields of the wan_device structure
152 * o link device data space to a singly-linked list
153 * o if it's the first device, then start kernel 'thread'
154 * o increment module use count
155 *
156 * Return:
157 * 0 Ok
158 * < 0 error.
159 *
160 * Context: process
161 */
162
163
164int register_wan_device(struct wan_device *wandev)
165{
166 int err, namelen;
167
168 if ((wandev == NULL) || (wandev->magic != ROUTER_MAGIC) ||
169 (wandev->name == NULL))
170 return -EINVAL;
171
172 namelen = strlen(wandev->name);
173 if (!namelen || (namelen > WAN_DRVNAME_SZ))
174 return -EINVAL;
175
176 if (wanrouter_find_device(wandev->name))
177 return -EEXIST;
178
179#ifdef WANDEBUG
180 printk(KERN_INFO "%s: registering WAN device %s\n",
181 wanrouter_modname, wandev->name);
182#endif
183
184 /*
185 * Register /proc directory entry
186 */
187 err = wanrouter_proc_add(wandev);
188 if (err) {
189 printk(KERN_INFO
190 "%s: can't create /proc/net/router/%s entry!\n",
191 wanrouter_modname, wandev->name);
192 return err;
193 }
194
195 /*
196 * Initialize fields of the wan_device structure maintained by the
197 * router and update local data.
198 */
199
200 wandev->ndev = 0;
201 wandev->dev = NULL;
202 wandev->next = wanrouter_router_devlist;
203 wanrouter_router_devlist = wandev;
204 return 0;
205}
206
207/*
208 * Unregister WAN device.
209 * o shut down device
210 * o unlink device data space from the linked list
211 * o delete device entry in the /proc/net/router directory
212 * o decrement module use count
213 *
214 * Return: 0 Ok
215 * <0 error.
216 * Context: process
217 */
218
219
220int unregister_wan_device(char *name)
221{
222 struct wan_device *wandev, *prev;
223
224 if (name == NULL)
225 return -EINVAL;
226
227 for (wandev = wanrouter_router_devlist, prev = NULL;
228 wandev && strcmp(wandev->name, name);
229 prev = wandev, wandev = wandev->next)
230 ;
231 if (wandev == NULL)
232 return -ENODEV;
233
234#ifdef WANDEBUG
235 printk(KERN_INFO "%s: unregistering WAN device %s\n",
236 wanrouter_modname, name);
237#endif
238
239 if (wandev->state != WAN_UNCONFIGURED)
240 wanrouter_device_shutdown(wandev);
241
242 if (prev)
243 prev->next = wandev->next;
244 else
245 wanrouter_router_devlist = wandev->next;
246
247 wanrouter_proc_delete(wandev);
248 return 0;
249}
250
251#if 0
252
253/*
254 * Encapsulate packet.
255 *
256 * Return: encapsulation header size
257 * < 0 - unsupported Ethertype
258 *
259 * Notes:
260 * 1. This function may be called on interrupt context.
261 */
262
263
264int wanrouter_encapsulate(struct sk_buff *skb, struct net_device *dev,
265 unsigned short type)
266{
267 int hdr_len = 0;
268
269 switch (type) {
270 case ETH_P_IP: /* IP datagram encapsulation */
271 hdr_len += 1;
272 skb_push(skb, 1);
273 skb->data[0] = NLPID_IP;
274 break;
275
276 case ETH_P_IPX: /* SNAP encapsulation */
277 case ETH_P_ARP:
278 hdr_len += 7;
279 skb_push(skb, 7);
280 skb->data[0] = 0;
281 skb->data[1] = NLPID_SNAP;
282 skb_copy_to_linear_data_offset(skb, 2, wanrouter_oui_ether,
283 sizeof(wanrouter_oui_ether));
284 *((unsigned short*)&skb->data[5]) = htons(type);
285 break;
286
287 default: /* Unknown packet type */
288 printk(KERN_INFO
289 "%s: unsupported Ethertype 0x%04X on interface %s!\n",
290 wanrouter_modname, type, dev->name);
291 hdr_len = -EINVAL;
292 }
293 return hdr_len;
294}
295
296
297/*
298 * Decapsulate packet.
299 *
300 * Return: Ethertype (in network order)
301 * 0 unknown encapsulation
302 *
303 * Notes:
304 * 1. This function may be called on interrupt context.
305 */
306
307
308__be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
309{
310 int cnt = skb->data[0] ? 0 : 1; /* there may be a pad present */
311 __be16 ethertype;
312
313 switch (skb->data[cnt]) {
314 case NLPID_IP: /* IP datagramm */
315 ethertype = htons(ETH_P_IP);
316 cnt += 1;
317 break;
318
319 case NLPID_SNAP: /* SNAP encapsulation */
320 if (memcmp(&skb->data[cnt + 1], wanrouter_oui_ether,
321 sizeof(wanrouter_oui_ether))){
322 printk(KERN_INFO
323 "%s: unsupported SNAP OUI %02X-%02X-%02X "
324 "on interface %s!\n", wanrouter_modname,
325 skb->data[cnt+1], skb->data[cnt+2],
326 skb->data[cnt+3], dev->name);
327 return 0;
328 }
329 ethertype = *((__be16*)&skb->data[cnt+4]);
330 cnt += 6;
331 break;
332
333 /* add other protocols, e.g. CLNP, ESIS, ISIS, if needed */
334
335 default:
336 printk(KERN_INFO
337 "%s: unsupported NLPID 0x%02X on interface %s!\n",
338 wanrouter_modname, skb->data[cnt], dev->name);
339 return 0;
340 }
341 skb->protocol = ethertype;
342 skb->pkt_type = PACKET_HOST; /* Physically point to point */
343 skb_pull(skb, cnt);
344 skb_reset_mac_header(skb);
345 return ethertype;
346}
347
348#endif /* 0 */
349
350/*
351 * WAN device IOCTL.
352 * o find WAN device associated with this node
353 * o execute requested action or pass command to the device driver
354 */
355
356long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
357{
358 struct inode *inode = file->f_path.dentry->d_inode;
359 int err = 0;
360 struct proc_dir_entry *dent;
361 struct wan_device *wandev;
362 void __user *data = (void __user *)arg;
363
364 if (!capable(CAP_NET_ADMIN))
365 return -EPERM;
366
367 if ((cmd >> 8) != ROUTER_IOCTL)
368 return -EINVAL;
369
370 dent = PDE(inode);
371 if ((dent == NULL) || (dent->data == NULL))
372 return -EINVAL;
373
374 wandev = dent->data;
375 if (wandev->magic != ROUTER_MAGIC)
376 return -EINVAL;
377
378 mutex_lock(&wanrouter_mutex);
379 switch (cmd) {
380 case ROUTER_SETUP:
381 err = wanrouter_device_setup(wandev, data);
382 break;
383
384 case ROUTER_DOWN:
385 err = wanrouter_device_shutdown(wandev);
386 break;
387
388 case ROUTER_STAT:
389 err = wanrouter_device_stat(wandev, data);
390 break;
391
392 case ROUTER_IFNEW:
393 err = wanrouter_device_new_if(wandev, data);
394 break;
395
396 case ROUTER_IFDEL:
397 err = wanrouter_device_del_if(wandev, data);
398 break;
399
400 case ROUTER_IFSTAT:
401 break;
402
403 default:
404 if ((cmd >= ROUTER_USER) &&
405 (cmd <= ROUTER_USER_MAX) &&
406 wandev->ioctl)
407 err = wandev->ioctl(wandev, cmd, arg);
408 else err = -EINVAL;
409 }
410 mutex_unlock(&wanrouter_mutex);
411 return err;
412}
413
414/*
415 * WAN Driver IOCTL Handlers
416 */
417
418/*
419 * Setup WAN link device.
420 * o verify user address space
421 * o allocate kernel memory and copy configuration data to kernel space
422 * o if configuration data includes extension, copy it to kernel space too
423 * o call driver's setup() entry point
424 */
425
426static int wanrouter_device_setup(struct wan_device *wandev,
427 wandev_conf_t __user *u_conf)
428{
429 void *data = NULL;
430 wandev_conf_t *conf;
431 int err = -EINVAL;
432
433 if (wandev->setup == NULL) { /* Nothing to do ? */
434 printk(KERN_INFO "%s: ERROR, No setup script: wandev->setup()\n",
435 wandev->name);
436 return 0;
437 }
438
439 conf = kmalloc(sizeof(wandev_conf_t), GFP_KERNEL);
440 if (conf == NULL){
441 printk(KERN_INFO "%s: ERROR, Failed to allocate kernel memory !\n",
442 wandev->name);
443 return -ENOBUFS;
444 }
445
446 if (copy_from_user(conf, u_conf, sizeof(wandev_conf_t))) {
447 printk(KERN_INFO "%s: Failed to copy user config data to kernel space!\n",
448 wandev->name);
449 kfree(conf);
450 return -EFAULT;
451 }
452
453 if (conf->magic != ROUTER_MAGIC) {
454 kfree(conf);
455 printk(KERN_INFO "%s: ERROR, Invalid MAGIC Number\n",
456 wandev->name);
457 return -EINVAL;
458 }
459
460 if (conf->data_size && conf->data) {
461 if (conf->data_size > 128000) {
462 printk(KERN_INFO
463 "%s: ERROR, Invalid firmware data size %i !\n",
464 wandev->name, conf->data_size);
465 kfree(conf);
466 return -EINVAL;
467 }
468
469 data = vmalloc(conf->data_size);
470 if (!data) {
471 printk(KERN_INFO
472 "%s: ERROR, Failed allocate kernel memory !\n",
473 wandev->name);
474 kfree(conf);
475 return -ENOBUFS;
476 }
477 if (!copy_from_user(data, conf->data, conf->data_size)) {
478 conf->data = data;
479 err = wandev->setup(wandev, conf);
480 } else {
481 printk(KERN_INFO
482 "%s: ERROR, Failed to copy from user data !\n",
483 wandev->name);
484 err = -EFAULT;
485 }
486 vfree(data);
487 } else {
488 printk(KERN_INFO
489 "%s: ERROR, No firmware found ! Firmware size = %i !\n",
490 wandev->name, conf->data_size);
491 }
492
493 kfree(conf);
494 return err;
495}
496
497/*
498 * Shutdown WAN device.
499 * o delete all not opened logical channels for this device
500 * o call driver's shutdown() entry point
501 */
502
503static int wanrouter_device_shutdown(struct wan_device *wandev)
504{
505 struct net_device *dev;
506 int err=0;
507
508 if (wandev->state == WAN_UNCONFIGURED)
509 return 0;
510
511 printk(KERN_INFO "\n%s: Shutting Down!\n",wandev->name);
512
513 for (dev = wandev->dev; dev;) {
514 err = wanrouter_delete_interface(wandev, dev->name);
515 if (err)
516 return err;
517 /* The above function deallocates the current dev
518 * structure. Therefore, we cannot use netdev_priv(dev)
519 * as the next element: wandev->dev points to the
520 * next element */
521 dev = wandev->dev;
522 }
523
524 if (wandev->ndev)
525 return -EBUSY; /* there are opened interfaces */
526
527 if (wandev->shutdown)
528 err=wandev->shutdown(wandev);
529
530 return err;
531}
532
533/*
534 * Get WAN device status & statistics.
535 */
536
537static int wanrouter_device_stat(struct wan_device *wandev,
538 wandev_stat_t __user *u_stat)
539{
540 wandev_stat_t stat;
541
542 memset(&stat, 0, sizeof(stat));
543
544 /* Ask device driver to update device statistics */
545 if ((wandev->state != WAN_UNCONFIGURED) && wandev->update)
546 wandev->update(wandev);
547
548 /* Fill out structure */
549 stat.ndev = wandev->ndev;
550 stat.state = wandev->state;
551
552 if (copy_to_user(u_stat, &stat, sizeof(stat)))
553 return -EFAULT;
554
555 return 0;
556}
557
558/*
559 * Create new WAN interface.
560 * o verify user address space
561 * o copy configuration data to kernel address space
562 * o allocate network interface data space
563 * o call driver's new_if() entry point
564 * o make sure there is no interface name conflict
565 * o register network interface
566 */
567
568static int wanrouter_device_new_if(struct wan_device *wandev,
569 wanif_conf_t __user *u_conf)
570{
571 wanif_conf_t *cnf;
572 struct net_device *dev = NULL;
573 int err;
574
575 if ((wandev->state == WAN_UNCONFIGURED) || (wandev->new_if == NULL))
576 return -ENODEV;
577
578 cnf = kmalloc(sizeof(wanif_conf_t), GFP_KERNEL);
579 if (!cnf)
580 return -ENOBUFS;
581
582 err = -EFAULT;
583 if (copy_from_user(cnf, u_conf, sizeof(wanif_conf_t)))
584 goto out;
585
586 err = -EINVAL;
587 if (cnf->magic != ROUTER_MAGIC)
588 goto out;
589
590 if (cnf->config_id == WANCONFIG_MPPP) {
591 printk(KERN_INFO "%s: Wanpipe Mulit-Port PPP support has not been compiled in!\n",
592 wandev->name);
593 err = -EPROTONOSUPPORT;
594 goto out;
595 } else {
596 err = wandev->new_if(wandev, dev, cnf);
597 }
598
599 if (!err) {
600 /* Register network interface. This will invoke init()
601 * function supplied by the driver. If device registered
602 * successfully, add it to the interface list.
603 */
604
605#ifdef WANDEBUG
606 printk(KERN_INFO "%s: registering interface %s...\n",
607 wanrouter_modname, dev->name);
608#endif
609
610 err = register_netdev(dev);
611 if (!err) {
612 struct net_device *slave = NULL;
613 unsigned long smp_flags=0;
614
615 lock_adapter_irq(&wandev->lock, &smp_flags);
616
617 if (wandev->dev == NULL) {
618 wandev->dev = dev;
619 } else {
620 for (slave=wandev->dev;
621 DEV_TO_SLAVE(slave);
622 slave = DEV_TO_SLAVE(slave))
623 DEV_TO_SLAVE(slave) = dev;
624 }
625 ++wandev->ndev;
626
627 unlock_adapter_irq(&wandev->lock, &smp_flags);
628 err = 0; /* done !!! */
629 goto out;
630 }
631 if (wandev->del_if)
632 wandev->del_if(wandev, dev);
633 free_netdev(dev);
634 }
635
636out:
637 kfree(cnf);
638 return err;
639}
640
641
642/*
643 * Delete WAN logical channel.
644 * o verify user address space
645 * o copy configuration data to kernel address space
646 */
647
648static int wanrouter_device_del_if(struct wan_device *wandev, char __user *u_name)
649{
650 char name[WAN_IFNAME_SZ + 1];
651 int err = 0;
652
653 if (wandev->state == WAN_UNCONFIGURED)
654 return -ENODEV;
655
656 memset(name, 0, sizeof(name));
657
658 if (copy_from_user(name, u_name, WAN_IFNAME_SZ))
659 return -EFAULT;
660
661 err = wanrouter_delete_interface(wandev, name);
662 if (err)
663 return err;
664
665 /* If last interface being deleted, shutdown card
666 * This helps with administration at leaf nodes
667 * (You can tell if the person at the other end of the phone
668 * has an interface configured) and avoids DoS vulnerabilities
669 * in binary driver files - this fixes a problem with the current
670 * Sangoma driver going into strange states when all the network
671 * interfaces are deleted and the link irrecoverably disconnected.
672 */
673
674 if (!wandev->ndev && wandev->shutdown)
675 err = wandev->shutdown(wandev);
676
677 return err;
678}
679
680/*
681 * Miscellaneous Functions
682 */
683
684/*
685 * Find WAN device by name.
686 * Return pointer to the WAN device data space or NULL if device not found.
687 */
688
689static struct wan_device *wanrouter_find_device(char *name)
690{
691 struct wan_device *wandev;
692
693 for (wandev = wanrouter_router_devlist;
694 wandev && strcmp(wandev->name, name);
695 wandev = wandev->next);
696 return wandev;
697}
698
699/*
700 * Delete WAN logical channel identified by its name.
701 * o find logical channel by its name
702 * o call driver's del_if() entry point
703 * o unregister network interface
704 * o unlink channel data space from linked list of channels
705 * o release channel data space
706 *
707 * Return: 0 success
708 * -ENODEV channel not found.
709 * -EBUSY interface is open
710 *
711 * Note: If (force != 0), then device will be destroyed even if interface
712 * associated with it is open. It's caller's responsibility to make
713 * sure that opened interfaces are not removed!
714 */
715
716static int wanrouter_delete_interface(struct wan_device *wandev, char *name)
717{
718 struct net_device *dev = NULL, *prev = NULL;
719 unsigned long smp_flags=0;
720
721 lock_adapter_irq(&wandev->lock, &smp_flags);
722 dev = wandev->dev;
723 prev = NULL;
724 while (dev && strcmp(name, dev->name)) {
725 struct net_device **slave = netdev_priv(dev);
726 prev = dev;
727 dev = *slave;
728 }
729 unlock_adapter_irq(&wandev->lock, &smp_flags);
730
731 if (dev == NULL)
732 return -ENODEV; /* interface not found */
733
734 if (netif_running(dev))
735 return -EBUSY; /* interface in use */
736
737 if (wandev->del_if)
738 wandev->del_if(wandev, dev);
739
740 lock_adapter_irq(&wandev->lock, &smp_flags);
741 if (prev) {
742 struct net_device **prev_slave = netdev_priv(prev);
743 struct net_device **slave = netdev_priv(dev);
744
745 *prev_slave = *slave;
746 } else {
747 struct net_device **slave = netdev_priv(dev);
748 wandev->dev = *slave;
749 }
750 --wandev->ndev;
751 unlock_adapter_irq(&wandev->lock, &smp_flags);
752
753 printk(KERN_INFO "%s: unregistering '%s'\n", wandev->name, dev->name);
754
755 unregister_netdev(dev);
756
757 free_netdev(dev);
758
759 return 0;
760}
761
762static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
763 __acquires(lock)
764{
765 spin_lock_irqsave(lock, *smp_flags);
766}
767
768
769static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
770 __releases(lock)
771{
772 spin_unlock_irqrestore(lock, *smp_flags);
773}
774
775EXPORT_SYMBOL(register_wan_device);
776EXPORT_SYMBOL(unregister_wan_device);
777
778MODULE_LICENSE("GPL");
779
780/*
781 * End
782 */
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
deleted file mode 100644
index c43612ee96bb..000000000000
--- a/net/wanrouter/wanproc.c
+++ /dev/null
@@ -1,380 +0,0 @@
1/*****************************************************************************
2* wanproc.c WAN Router Module. /proc filesystem interface.
3*
4* This module is completely hardware-independent and provides
5* access to the router using Linux /proc filesystem.
6*
7* Author: Gideon Hack
8*
9* Copyright: (c) 1995-1999 Sangoma Technologies Inc.
10*
11* This program is free software; you can redistribute it and/or
12* modify it under the terms of the GNU General Public License
13* as published by the Free Software Foundation; either version
14* 2 of the License, or (at your option) any later version.
15* ============================================================================
16* Jun 02, 1999 Gideon Hack Updates for Linux 2.2.X kernels.
17* Jun 29, 1997 Alan Cox Merged with 1.0.3 vendor code
18* Jan 29, 1997 Gene Kozin v1.0.1. Implemented /proc read routines
19* Jan 30, 1997 Alan Cox Hacked around for 2.1
20* Dec 13, 1996 Gene Kozin Initial version (based on Sangoma's WANPIPE)
21*****************************************************************************/
22
23#include <linux/init.h> /* __initfunc et al. */
24#include <linux/stddef.h> /* offsetof(), etc. */
25#include <linux/errno.h> /* return codes */
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/wanrouter.h> /* WAN router API definitions */
29#include <linux/seq_file.h>
30#include <linux/mutex.h>
31
32#include <net/net_namespace.h>
33#include <asm/io.h>
34
35#define PROC_STATS_FORMAT "%30s: %12lu\n"
36
37/****** Defines and Macros **************************************************/
38
39#define PROT_DECODE(prot) ((prot == WANCONFIG_FR) ? " FR" :\
40 (prot == WANCONFIG_X25) ? " X25" : \
41 (prot == WANCONFIG_PPP) ? " PPP" : \
42 (prot == WANCONFIG_CHDLC) ? " CHDLC": \
43 (prot == WANCONFIG_MPPP) ? " MPPP" : \
44 " Unknown" )
45
46/****** Function Prototypes *************************************************/
47
48#ifdef CONFIG_PROC_FS
49
50/* Miscellaneous */
51
52/*
53 * Structures for interfacing with the /proc filesystem.
54 * Router creates its own directory /proc/net/router with the following
55 * entries:
56 * config device configuration
57 * status global device statistics
58 * <device> entry for each WAN device
59 */
60
61/*
62 * Generic /proc/net/router/<file> file and inode operations
63 */
64
65/*
66 * /proc/net/router
67 */
68
69static DEFINE_MUTEX(config_mutex);
70static struct proc_dir_entry *proc_router;
71
72/* Strings */
73
74/*
75 * Interface functions
76 */
77
78/****** Proc filesystem entry points ****************************************/
79
80/*
81 * Iterator
82 */
83static void *r_start(struct seq_file *m, loff_t *pos)
84{
85 struct wan_device *wandev;
86 loff_t l = *pos;
87
88 mutex_lock(&config_mutex);
89 if (!l--)
90 return SEQ_START_TOKEN;
91 for (wandev = wanrouter_router_devlist; l-- && wandev;
92 wandev = wandev->next)
93 ;
94 return wandev;
95}
96
97static void *r_next(struct seq_file *m, void *v, loff_t *pos)
98{
99 struct wan_device *wandev = v;
100 (*pos)++;
101 return (v == SEQ_START_TOKEN) ? wanrouter_router_devlist : wandev->next;
102}
103
104static void r_stop(struct seq_file *m, void *v)
105{
106 mutex_unlock(&config_mutex);
107}
108
109static int config_show(struct seq_file *m, void *v)
110{
111 struct wan_device *p = v;
112 if (v == SEQ_START_TOKEN) {
113 seq_puts(m, "Device name | port |IRQ|DMA| mem.addr |"
114 "mem.size|option1|option2|option3|option4\n");
115 return 0;
116 }
117 if (!p->state)
118 return 0;
119 seq_printf(m, "%-15s|0x%-4X|%3u|%3u| 0x%-8lX |0x%-6X|%7u|%7u|%7u|%7u\n",
120 p->name, p->ioport, p->irq, p->dma, p->maddr, p->msize,
121 p->hw_opt[0], p->hw_opt[1], p->hw_opt[2], p->hw_opt[3]);
122 return 0;
123}
124
125static int status_show(struct seq_file *m, void *v)
126{
127 struct wan_device *p = v;
128 if (v == SEQ_START_TOKEN) {
129 seq_puts(m, "Device name |protocol|station|interface|"
130 "clocking|baud rate| MTU |ndev|link state\n");
131 return 0;
132 }
133 if (!p->state)
134 return 0;
135 seq_printf(m, "%-15s|%-8s| %-7s| %-9s|%-8s|%9u|%5u|%3u |",
136 p->name,
137 PROT_DECODE(p->config_id),
138 p->config_id == WANCONFIG_FR ?
139 (p->station ? "Node" : "CPE") :
140 (p->config_id == WANCONFIG_X25 ?
141 (p->station ? "DCE" : "DTE") :
142 ("N/A")),
143 p->interface ? "V.35" : "RS-232",
144 p->clocking ? "internal" : "external",
145 p->bps,
146 p->mtu,
147 p->ndev);
148
149 switch (p->state) {
150 case WAN_UNCONFIGURED:
151 seq_printf(m, "%-12s\n", "unconfigured");
152 break;
153 case WAN_DISCONNECTED:
154 seq_printf(m, "%-12s\n", "disconnected");
155 break;
156 case WAN_CONNECTING:
157 seq_printf(m, "%-12s\n", "connecting");
158 break;
159 case WAN_CONNECTED:
160 seq_printf(m, "%-12s\n", "connected");
161 break;
162 default:
163 seq_printf(m, "%-12s\n", "invalid");
164 break;
165 }
166 return 0;
167}
168
169static const struct seq_operations config_op = {
170 .start = r_start,
171 .next = r_next,
172 .stop = r_stop,
173 .show = config_show,
174};
175
176static const struct seq_operations status_op = {
177 .start = r_start,
178 .next = r_next,
179 .stop = r_stop,
180 .show = status_show,
181};
182
183static int config_open(struct inode *inode, struct file *file)
184{
185 return seq_open(file, &config_op);
186}
187
188static int status_open(struct inode *inode, struct file *file)
189{
190 return seq_open(file, &status_op);
191}
192
193static const struct file_operations config_fops = {
194 .owner = THIS_MODULE,
195 .open = config_open,
196 .read = seq_read,
197 .llseek = seq_lseek,
198 .release = seq_release,
199};
200
201static const struct file_operations status_fops = {
202 .owner = THIS_MODULE,
203 .open = status_open,
204 .read = seq_read,
205 .llseek = seq_lseek,
206 .release = seq_release,
207};
208
209static int wandev_show(struct seq_file *m, void *v)
210{
211 struct wan_device *wandev = m->private;
212
213 if (wandev->magic != ROUTER_MAGIC)
214 return 0;
215
216 if (!wandev->state) {
217 seq_puts(m, "device is not configured!\n");
218 return 0;
219 }
220
221 /* Update device statistics */
222 if (wandev->update) {
223 int err = wandev->update(wandev);
224 if (err == -EAGAIN) {
225 seq_puts(m, "Device is busy!\n");
226 return 0;
227 }
228 if (err) {
229 seq_puts(m, "Device is not configured!\n");
230 return 0;
231 }
232 }
233
234 seq_printf(m, PROC_STATS_FORMAT,
235 "total packets received", wandev->stats.rx_packets);
236 seq_printf(m, PROC_STATS_FORMAT,
237 "total packets transmitted", wandev->stats.tx_packets);
238 seq_printf(m, PROC_STATS_FORMAT,
239 "total bytes received", wandev->stats.rx_bytes);
240 seq_printf(m, PROC_STATS_FORMAT,
241 "total bytes transmitted", wandev->stats.tx_bytes);
242 seq_printf(m, PROC_STATS_FORMAT,
243 "bad packets received", wandev->stats.rx_errors);
244 seq_printf(m, PROC_STATS_FORMAT,
245 "packet transmit problems", wandev->stats.tx_errors);
246 seq_printf(m, PROC_STATS_FORMAT,
247 "received frames dropped", wandev->stats.rx_dropped);
248 seq_printf(m, PROC_STATS_FORMAT,
249 "transmit frames dropped", wandev->stats.tx_dropped);
250 seq_printf(m, PROC_STATS_FORMAT,
251 "multicast packets received", wandev->stats.multicast);
252 seq_printf(m, PROC_STATS_FORMAT,
253 "transmit collisions", wandev->stats.collisions);
254 seq_printf(m, PROC_STATS_FORMAT,
255 "receive length errors", wandev->stats.rx_length_errors);
256 seq_printf(m, PROC_STATS_FORMAT,
257 "receiver overrun errors", wandev->stats.rx_over_errors);
258 seq_printf(m, PROC_STATS_FORMAT,
259 "CRC errors", wandev->stats.rx_crc_errors);
260 seq_printf(m, PROC_STATS_FORMAT,
261 "frame format errors (aborts)", wandev->stats.rx_frame_errors);
262 seq_printf(m, PROC_STATS_FORMAT,
263 "receiver fifo overrun", wandev->stats.rx_fifo_errors);
264 seq_printf(m, PROC_STATS_FORMAT,
265 "receiver missed packet", wandev->stats.rx_missed_errors);
266 seq_printf(m, PROC_STATS_FORMAT,
267 "aborted frames transmitted", wandev->stats.tx_aborted_errors);
268 return 0;
269}
270
271static int wandev_open(struct inode *inode, struct file *file)
272{
273 return single_open(file, wandev_show, PDE(inode)->data);
274}
275
276static const struct file_operations wandev_fops = {
277 .owner = THIS_MODULE,
278 .open = wandev_open,
279 .read = seq_read,
280 .llseek = seq_lseek,
281 .release = single_release,
282 .unlocked_ioctl = wanrouter_ioctl,
283};
284
285/*
286 * Initialize router proc interface.
287 */
288
289int __init wanrouter_proc_init(void)
290{
291 struct proc_dir_entry *p;
292 proc_router = proc_mkdir(ROUTER_NAME, init_net.proc_net);
293 if (!proc_router)
294 goto fail;
295
296 p = proc_create("config", S_IRUGO, proc_router, &config_fops);
297 if (!p)
298 goto fail_config;
299 p = proc_create("status", S_IRUGO, proc_router, &status_fops);
300 if (!p)
301 goto fail_stat;
302 return 0;
303fail_stat:
304 remove_proc_entry("config", proc_router);
305fail_config:
306 remove_proc_entry(ROUTER_NAME, init_net.proc_net);
307fail:
308 return -ENOMEM;
309}
310
311/*
312 * Clean up router proc interface.
313 */
314
315void wanrouter_proc_cleanup(void)
316{
317 remove_proc_entry("config", proc_router);
318 remove_proc_entry("status", proc_router);
319 remove_proc_entry(ROUTER_NAME, init_net.proc_net);
320}
321
322/*
323 * Add directory entry for WAN device.
324 */
325
326int wanrouter_proc_add(struct wan_device* wandev)
327{
328 if (wandev->magic != ROUTER_MAGIC)
329 return -EINVAL;
330
331 wandev->dent = proc_create(wandev->name, S_IRUGO,
332 proc_router, &wandev_fops);
333 if (!wandev->dent)
334 return -ENOMEM;
335 wandev->dent->data = wandev;
336 return 0;
337}
338
339/*
340 * Delete directory entry for WAN device.
341 */
342int wanrouter_proc_delete(struct wan_device* wandev)
343{
344 if (wandev->magic != ROUTER_MAGIC)
345 return -EINVAL;
346 remove_proc_entry(wandev->name, proc_router);
347 return 0;
348}
349
350#else
351
352/*
353 * No /proc - output stubs
354 */
355
356int __init wanrouter_proc_init(void)
357{
358 return 0;
359}
360
361void wanrouter_proc_cleanup(void)
362{
363}
364
365int wanrouter_proc_add(struct wan_device *wandev)
366{
367 return 0;
368}
369
370int wanrouter_proc_delete(struct wan_device *wandev)
371{
372 return 0;
373}
374
375#endif
376
377/*
378 * End
379 */
380
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 324e8d851dc4..a4a14e8f55cc 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -46,3 +46,65 @@ int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
46 46
47 return err; 47 return err;
48} 48}
49
50void cfg80211_ch_switch_notify(struct net_device *dev,
51 struct cfg80211_chan_def *chandef)
52{
53 struct wireless_dev *wdev = dev->ieee80211_ptr;
54 struct wiphy *wiphy = wdev->wiphy;
55 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
56
57 trace_cfg80211_ch_switch_notify(dev, chandef);
58
59 wdev_lock(wdev);
60
61 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
62 wdev->iftype != NL80211_IFTYPE_P2P_GO))
63 goto out;
64
65 wdev->channel = chandef->chan;
66 nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
67out:
68 wdev_unlock(wdev);
69 return;
70}
71EXPORT_SYMBOL(cfg80211_ch_switch_notify);
72
73bool cfg80211_rx_spurious_frame(struct net_device *dev,
74 const u8 *addr, gfp_t gfp)
75{
76 struct wireless_dev *wdev = dev->ieee80211_ptr;
77 bool ret;
78
79 trace_cfg80211_rx_spurious_frame(dev, addr);
80
81 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
82 wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
83 trace_cfg80211_return_bool(false);
84 return false;
85 }
86 ret = nl80211_unexpected_frame(dev, addr, gfp);
87 trace_cfg80211_return_bool(ret);
88 return ret;
89}
90EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
91
92bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
93 const u8 *addr, gfp_t gfp)
94{
95 struct wireless_dev *wdev = dev->ieee80211_ptr;
96 bool ret;
97
98 trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
99
100 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
101 wdev->iftype != NL80211_IFTYPE_P2P_GO &&
102 wdev->iftype != NL80211_IFTYPE_AP_VLAN)) {
103 trace_cfg80211_return_bool(false);
104 return false;
105 }
106 ret = nl80211_unexpected_4addr_frame(dev, addr, gfp);
107 trace_cfg80211_return_bool(ret);
108 return ret;
109}
110EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index a7990bb16529..fd556ac05fdb 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -76,6 +76,10 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
76 return false; 76 return false;
77 if (!chandef->center_freq2) 77 if (!chandef->center_freq2)
78 return false; 78 return false;
79 /* adjacent is not allowed -- that's a 160 MHz channel */
80 if (chandef->center_freq1 - chandef->center_freq2 == 80 ||
81 chandef->center_freq2 - chandef->center_freq1 == 80)
82 return false;
79 break; 83 break;
80 case NL80211_CHAN_WIDTH_80: 84 case NL80211_CHAN_WIDTH_80:
81 if (chandef->center_freq1 != control_freq + 30 && 85 if (chandef->center_freq1 != control_freq + 30 &&
@@ -143,6 +147,32 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
143 } 147 }
144} 148}
145 149
150static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
151{
152 int width;
153
154 switch (c->width) {
155 case NL80211_CHAN_WIDTH_20:
156 case NL80211_CHAN_WIDTH_20_NOHT:
157 width = 20;
158 break;
159 case NL80211_CHAN_WIDTH_40:
160 width = 40;
161 break;
162 case NL80211_CHAN_WIDTH_80P80:
163 case NL80211_CHAN_WIDTH_80:
164 width = 80;
165 break;
166 case NL80211_CHAN_WIDTH_160:
167 width = 160;
168 break;
169 default:
170 WARN_ON_ONCE(1);
171 return -1;
172 }
173 return width;
174}
175
146const struct cfg80211_chan_def * 176const struct cfg80211_chan_def *
147cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1, 177cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
148 const struct cfg80211_chan_def *c2) 178 const struct cfg80211_chan_def *c2)
@@ -188,6 +218,93 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
188} 218}
189EXPORT_SYMBOL(cfg80211_chandef_compatible); 219EXPORT_SYMBOL(cfg80211_chandef_compatible);
190 220
221static void cfg80211_set_chans_dfs_state(struct wiphy *wiphy, u32 center_freq,
222 u32 bandwidth,
223 enum nl80211_dfs_state dfs_state)
224{
225 struct ieee80211_channel *c;
226 u32 freq;
227
228 for (freq = center_freq - bandwidth/2 + 10;
229 freq <= center_freq + bandwidth/2 - 10;
230 freq += 20) {
231 c = ieee80211_get_channel(wiphy, freq);
232 if (!c || !(c->flags & IEEE80211_CHAN_RADAR))
233 continue;
234
235 c->dfs_state = dfs_state;
236 c->dfs_state_entered = jiffies;
237 }
238}
239
240void cfg80211_set_dfs_state(struct wiphy *wiphy,
241 const struct cfg80211_chan_def *chandef,
242 enum nl80211_dfs_state dfs_state)
243{
244 int width;
245
246 if (WARN_ON(!cfg80211_chandef_valid(chandef)))
247 return;
248
249 width = cfg80211_chandef_get_width(chandef);
250 if (width < 0)
251 return;
252
253 cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq1,
254 width, dfs_state);
255
256 if (!chandef->center_freq2)
257 return;
258 cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq2,
259 width, dfs_state);
260}
261
262static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
263 u32 center_freq,
264 u32 bandwidth)
265{
266 struct ieee80211_channel *c;
267 u32 freq;
268
269 for (freq = center_freq - bandwidth/2 + 10;
270 freq <= center_freq + bandwidth/2 - 10;
271 freq += 20) {
272 c = ieee80211_get_channel(wiphy, freq);
273 if (!c)
274 return -EINVAL;
275
276 if (c->flags & IEEE80211_CHAN_RADAR)
277 return 1;
278 }
279 return 0;
280}
281
282
283int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
284 const struct cfg80211_chan_def *chandef)
285{
286 int width;
287 int r;
288
289 if (WARN_ON(!cfg80211_chandef_valid(chandef)))
290 return -EINVAL;
291
292 width = cfg80211_chandef_get_width(chandef);
293 if (width < 0)
294 return -EINVAL;
295
296 r = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq1,
297 width);
298 if (r)
299 return r;
300
301 if (!chandef->center_freq2)
302 return 0;
303
304 return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2,
305 width);
306}
307
191static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, 308static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
192 u32 center_freq, u32 bandwidth, 309 u32 center_freq, u32 bandwidth,
193 u32 prohibited_flags) 310 u32 prohibited_flags)
@@ -199,7 +316,16 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
199 freq <= center_freq + bandwidth/2 - 10; 316 freq <= center_freq + bandwidth/2 - 10;
200 freq += 20) { 317 freq += 20) {
201 c = ieee80211_get_channel(wiphy, freq); 318 c = ieee80211_get_channel(wiphy, freq);
202 if (!c || c->flags & prohibited_flags) 319 if (!c)
320 return false;
321
322 /* check for radar flags */
323 if ((prohibited_flags & c->flags & IEEE80211_CHAN_RADAR) &&
324 (c->dfs_state != NL80211_DFS_AVAILABLE))
325 return false;
326
327 /* check for the other flags */
328 if (c->flags & prohibited_flags & ~IEEE80211_CHAN_RADAR)
203 return false; 329 return false;
204 } 330 }
205 331
@@ -249,6 +375,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
249 case NL80211_CHAN_WIDTH_80: 375 case NL80211_CHAN_WIDTH_80:
250 if (!vht_cap->vht_supported) 376 if (!vht_cap->vht_supported)
251 return false; 377 return false;
378 prohibited_flags |= IEEE80211_CHAN_NO_80MHZ;
252 width = 80; 379 width = 80;
253 break; 380 break;
254 case NL80211_CHAN_WIDTH_160: 381 case NL80211_CHAN_WIDTH_160:
@@ -256,6 +383,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
256 return false; 383 return false;
257 if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)) 384 if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
258 return false; 385 return false;
386 prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
259 width = 160; 387 width = 160;
260 break; 388 break;
261 default: 389 default:
@@ -263,7 +391,16 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
263 return false; 391 return false;
264 } 392 }
265 393
266 /* TODO: missing regulatory check on 80/160 bandwidth */ 394 /*
395 * TODO: What if there are only certain 80/160/80+80 MHz channels
396 * allowed by the driver, or only certain combinations?
397 * For 40 MHz the driver can set the NO_HT40 flags, but for
398 * 80/160 MHz and in particular 80+80 MHz this isn't really
399 * feasible and we only have NO_80MHZ/NO_160MHZ so far but
400 * no way to cover 80+80 MHz or more complex restrictions.
401 * Note that such restrictions also need to be advertised to
402 * userspace, for example for P2P channel selection.
403 */
267 404
268 if (width > 20) 405 if (width > 20)
269 prohibited_flags |= IEEE80211_CHAN_NO_OFDM; 406 prohibited_flags |= IEEE80211_CHAN_NO_OFDM;
@@ -340,7 +477,10 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
340 break; 477 break;
341 case NL80211_IFTYPE_AP: 478 case NL80211_IFTYPE_AP:
342 case NL80211_IFTYPE_P2P_GO: 479 case NL80211_IFTYPE_P2P_GO:
343 if (wdev->beacon_interval) { 480 if (wdev->cac_started) {
481 *chan = wdev->channel;
482 *chanmode = CHAN_MODE_SHARED;
483 } else if (wdev->beacon_interval) {
344 *chan = wdev->channel; 484 *chan = wdev->channel;
345 *chanmode = CHAN_MODE_SHARED; 485 *chanmode = CHAN_MODE_SHARED;
346 } 486 }
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 14d990400354..5ffff039b017 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -57,9 +57,6 @@ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
57{ 57{
58 struct cfg80211_registered_device *result = NULL, *rdev; 58 struct cfg80211_registered_device *result = NULL, *rdev;
59 59
60 if (!wiphy_idx_valid(wiphy_idx))
61 return NULL;
62
63 assert_cfg80211_lock(); 60 assert_cfg80211_lock();
64 61
65 list_for_each_entry(rdev, &cfg80211_rdev_list, list) { 62 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
@@ -74,10 +71,8 @@ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
74 71
75int get_wiphy_idx(struct wiphy *wiphy) 72int get_wiphy_idx(struct wiphy *wiphy)
76{ 73{
77 struct cfg80211_registered_device *rdev; 74 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
78 if (!wiphy) 75
79 return WIPHY_IDX_STALE;
80 rdev = wiphy_to_dev(wiphy);
81 return rdev->wiphy_idx; 76 return rdev->wiphy_idx;
82} 77}
83 78
@@ -86,9 +81,6 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
86{ 81{
87 struct cfg80211_registered_device *rdev; 82 struct cfg80211_registered_device *rdev;
88 83
89 if (!wiphy_idx_valid(wiphy_idx))
90 return NULL;
91
92 assert_cfg80211_lock(); 84 assert_cfg80211_lock();
93 85
94 rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx); 86 rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx);
@@ -309,7 +301,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
309 301
310 rdev->wiphy_idx = wiphy_counter++; 302 rdev->wiphy_idx = wiphy_counter++;
311 303
312 if (unlikely(!wiphy_idx_valid(rdev->wiphy_idx))) { 304 if (unlikely(rdev->wiphy_idx < 0)) {
313 wiphy_counter--; 305 wiphy_counter--;
314 mutex_unlock(&cfg80211_mutex); 306 mutex_unlock(&cfg80211_mutex);
315 /* ugh, wrapped! */ 307 /* ugh, wrapped! */
@@ -332,6 +324,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
332 INIT_LIST_HEAD(&rdev->bss_list); 324 INIT_LIST_HEAD(&rdev->bss_list);
333 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); 325 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
334 INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results); 326 INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
327 INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
328 cfg80211_dfs_channels_update_work);
335#ifdef CONFIG_CFG80211_WEXT 329#ifdef CONFIG_CFG80211_WEXT
336 rdev->wiphy.wext = &cfg80211_wext_handler; 330 rdev->wiphy.wext = &cfg80211_wext_handler;
337#endif 331#endif
@@ -373,7 +367,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
373 rdev->wiphy.rts_threshold = (u32) -1; 367 rdev->wiphy.rts_threshold = (u32) -1;
374 rdev->wiphy.coverage_class = 0; 368 rdev->wiphy.coverage_class = 0;
375 369
376 rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; 370 rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH |
371 NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
377 372
378 return &rdev->wiphy; 373 return &rdev->wiphy;
379} 374}
@@ -390,8 +385,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
390 385
391 c = &wiphy->iface_combinations[i]; 386 c = &wiphy->iface_combinations[i];
392 387
393 /* Combinations with just one interface aren't real */ 388 /*
394 if (WARN_ON(c->max_interfaces < 2)) 389 * Combinations with just one interface aren't real,
390 * however we make an exception for DFS.
391 */
392 if (WARN_ON((c->max_interfaces < 2) && !c->radar_detect_widths))
395 return -EINVAL; 393 return -EINVAL;
396 394
397 /* Need at least one channel */ 395 /* Need at least one channel */
@@ -406,6 +404,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
406 CFG80211_MAX_NUM_DIFFERENT_CHANNELS)) 404 CFG80211_MAX_NUM_DIFFERENT_CHANNELS))
407 return -EINVAL; 405 return -EINVAL;
408 406
407 /* DFS only works on one channel. */
408 if (WARN_ON(c->radar_detect_widths &&
409 (c->num_different_channels > 1)))
410 return -EINVAL;
411
409 if (WARN_ON(!c->n_limits)) 412 if (WARN_ON(!c->n_limits))
410 return -EINVAL; 413 return -EINVAL;
411 414
@@ -478,6 +481,11 @@ int wiphy_register(struct wiphy *wiphy)
478 ETH_ALEN))) 481 ETH_ALEN)))
479 return -EINVAL; 482 return -EINVAL;
480 483
484 if (WARN_ON(wiphy->max_acl_mac_addrs &&
485 (!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME) ||
486 !rdev->ops->set_mac_acl)))
487 return -EINVAL;
488
481 if (wiphy->addresses) 489 if (wiphy->addresses)
482 memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN); 490 memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
483 491
@@ -690,6 +698,7 @@ void wiphy_unregister(struct wiphy *wiphy)
690 flush_work(&rdev->scan_done_wk); 698 flush_work(&rdev->scan_done_wk);
691 cancel_work_sync(&rdev->conn_work); 699 cancel_work_sync(&rdev->conn_work);
692 flush_work(&rdev->event_work); 700 flush_work(&rdev->event_work);
701 cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
693 702
694 if (rdev->wowlan && rdev->ops->set_wakeup) 703 if (rdev->wowlan && rdev->ops->set_wakeup)
695 rdev_set_wakeup(rdev, false); 704 rdev_set_wakeup(rdev, false);
@@ -710,7 +719,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
710 kfree(reg); 719 kfree(reg);
711 } 720 }
712 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 721 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
713 cfg80211_put_bss(&scan->pub); 722 cfg80211_put_bss(&rdev->wiphy, &scan->pub);
714 kfree(rdev); 723 kfree(rdev);
715} 724}
716 725
@@ -866,8 +875,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
866 /* allow mac80211 to determine the timeout */ 875 /* allow mac80211 to determine the timeout */
867 wdev->ps_timeout = -1; 876 wdev->ps_timeout = -1;
868 877
869 if (!dev->ethtool_ops) 878 netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops);
870 dev->ethtool_ops = &cfg80211_ethtool_ops;
871 879
872 if ((wdev->iftype == NL80211_IFTYPE_STATION || 880 if ((wdev->iftype == NL80211_IFTYPE_STATION ||
873 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || 881 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3563097169cb..3aec0e429d8a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -8,7 +8,6 @@
8#include <linux/mutex.h> 8#include <linux/mutex.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/kref.h>
12#include <linux/rbtree.h> 11#include <linux/rbtree.h>
13#include <linux/debugfs.h> 12#include <linux/debugfs.h>
14#include <linux/rfkill.h> 13#include <linux/rfkill.h>
@@ -18,6 +17,9 @@
18#include <net/cfg80211.h> 17#include <net/cfg80211.h>
19#include "reg.h" 18#include "reg.h"
20 19
20
21#define WIPHY_IDX_INVALID -1
22
21struct cfg80211_registered_device { 23struct cfg80211_registered_device {
22 const struct cfg80211_ops *ops; 24 const struct cfg80211_ops *ops;
23 struct list_head list; 25 struct list_head list;
@@ -84,9 +86,11 @@ struct cfg80211_registered_device {
84 86
85 struct cfg80211_wowlan *wowlan; 87 struct cfg80211_wowlan *wowlan;
86 88
89 struct delayed_work dfs_update_channels_wk;
90
87 /* must be last because of the way we do wiphy_priv(), 91 /* must be last because of the way we do wiphy_priv(),
88 * and it should at least be aligned to NETDEV_ALIGN */ 92 * and it should at least be aligned to NETDEV_ALIGN */
89 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); 93 struct wiphy wiphy __aligned(NETDEV_ALIGN);
90}; 94};
91 95
92static inline 96static inline
@@ -96,13 +100,6 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
96 return container_of(wiphy, struct cfg80211_registered_device, wiphy); 100 return container_of(wiphy, struct cfg80211_registered_device, wiphy);
97} 101}
98 102
99/* Note 0 is valid, hence phy0 */
100static inline
101bool wiphy_idx_valid(int wiphy_idx)
102{
103 return wiphy_idx >= 0;
104}
105
106static inline void 103static inline void
107cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev) 104cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
108{ 105{
@@ -113,6 +110,9 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
113 for (i = 0; i < rdev->wowlan->n_patterns; i++) 110 for (i = 0; i < rdev->wowlan->n_patterns; i++)
114 kfree(rdev->wowlan->patterns[i].mask); 111 kfree(rdev->wowlan->patterns[i].mask);
115 kfree(rdev->wowlan->patterns); 112 kfree(rdev->wowlan->patterns);
113 if (rdev->wowlan->tcp && rdev->wowlan->tcp->sock)
114 sock_release(rdev->wowlan->tcp->sock);
115 kfree(rdev->wowlan->tcp);
116 kfree(rdev->wowlan); 116 kfree(rdev->wowlan);
117} 117}
118 118
@@ -126,17 +126,12 @@ static inline void assert_cfg80211_lock(void)
126 lockdep_assert_held(&cfg80211_mutex); 126 lockdep_assert_held(&cfg80211_mutex);
127} 127}
128 128
129/*
130 * You can use this to mark a wiphy_idx as not having an associated wiphy.
131 * It guarantees cfg80211_rdev_by_wiphy_idx(wiphy_idx) will return NULL
132 */
133#define WIPHY_IDX_STALE -1
134
135struct cfg80211_internal_bss { 129struct cfg80211_internal_bss {
136 struct list_head list; 130 struct list_head list;
131 struct list_head hidden_list;
137 struct rb_node rbn; 132 struct rb_node rbn;
138 unsigned long ts; 133 unsigned long ts;
139 struct kref ref; 134 unsigned long refcount;
140 atomic_t hold; 135 atomic_t hold;
141 136
142 /* must be last because of priv member */ 137 /* must be last because of priv member */
@@ -435,7 +430,24 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
435 struct wireless_dev *wdev, 430 struct wireless_dev *wdev,
436 enum nl80211_iftype iftype, 431 enum nl80211_iftype iftype,
437 struct ieee80211_channel *chan, 432 struct ieee80211_channel *chan,
438 enum cfg80211_chan_mode chanmode); 433 enum cfg80211_chan_mode chanmode,
434 u8 radar_detect);
435
436/**
437 * cfg80211_chandef_dfs_required - checks if radar detection is required
438 * @wiphy: the wiphy to validate against
439 * @chandef: the channel definition to check
440 * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
441 */
442int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
443 const struct cfg80211_chan_def *c);
444
445void cfg80211_set_dfs_state(struct wiphy *wiphy,
446 const struct cfg80211_chan_def *chandef,
447 enum nl80211_dfs_state dfs_state);
448
449void cfg80211_dfs_channels_update_work(struct work_struct *work);
450
439 451
440static inline int 452static inline int
441cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, 453cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
@@ -443,7 +455,7 @@ cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
443 enum nl80211_iftype iftype) 455 enum nl80211_iftype iftype)
444{ 456{
445 return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL, 457 return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL,
446 CHAN_MODE_UNDEFINED); 458 CHAN_MODE_UNDEFINED, 0);
447} 459}
448 460
449static inline int 461static inline int
@@ -460,7 +472,17 @@ cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
460 enum cfg80211_chan_mode chanmode) 472 enum cfg80211_chan_mode chanmode)
461{ 473{
462 return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, 474 return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
463 chan, chanmode); 475 chan, chanmode, 0);
476}
477
478static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
479{
480 unsigned long end = jiffies;
481
482 if (end >= start)
483 return jiffies_to_msecs(end - start);
484
485 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
464} 486}
465 487
466void 488void
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 48c48ffafa1d..e37862f1b127 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -15,10 +15,10 @@ static void cfg80211_get_drvinfo(struct net_device *dev,
15 strlcpy(info->version, init_utsname()->release, sizeof(info->version)); 15 strlcpy(info->version, init_utsname()->release, sizeof(info->version));
16 16
17 if (wdev->wiphy->fw_version[0]) 17 if (wdev->wiphy->fw_version[0])
18 strncpy(info->fw_version, wdev->wiphy->fw_version, 18 strlcpy(info->fw_version, wdev->wiphy->fw_version,
19 sizeof(info->fw_version)); 19 sizeof(info->fw_version));
20 else 20 else
21 strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); 21 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
22 22
23 strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)), 23 strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)),
24 sizeof(info->bus_info)); 24 sizeof(info->bus_info));
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 9b9551e4a6f9..d80e47194d49 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -37,7 +37,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
37 37
38 if (wdev->current_bss) { 38 if (wdev->current_bss) {
39 cfg80211_unhold_bss(wdev->current_bss); 39 cfg80211_unhold_bss(wdev->current_bss);
40 cfg80211_put_bss(&wdev->current_bss->pub); 40 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
41 } 41 }
42 42
43 cfg80211_hold_bss(bss_from_pub(bss)); 43 cfg80211_hold_bss(bss_from_pub(bss));
@@ -182,7 +182,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
182 182
183 if (wdev->current_bss) { 183 if (wdev->current_bss) {
184 cfg80211_unhold_bss(wdev->current_bss); 184 cfg80211_unhold_bss(wdev->current_bss);
185 cfg80211_put_bss(&wdev->current_bss->pub); 185 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
186 } 186 }
187 187
188 wdev->current_bss = NULL; 188 wdev->current_bss = NULL;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index f9d6ce5cfabb..55957a284f6c 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -44,6 +44,10 @@
44 44
45#define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50 45#define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50
46 46
47#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units (=TUs) */
48#define MESH_DEFAULT_DTIM_PERIOD 2
49#define MESH_DEFAULT_AWAKE_WINDOW 10 /* in 1024 us units (=TUs) */
50
47const struct mesh_config default_mesh_config = { 51const struct mesh_config default_mesh_config = {
48 .dot11MeshRetryTimeout = MESH_RET_T, 52 .dot11MeshRetryTimeout = MESH_RET_T,
49 .dot11MeshConfirmTimeout = MESH_CONF_T, 53 .dot11MeshConfirmTimeout = MESH_CONF_T,
@@ -69,6 +73,8 @@ const struct mesh_config default_mesh_config = {
69 .dot11MeshHWMPactivePathToRootTimeout = MESH_PATH_TO_ROOT_TIMEOUT, 73 .dot11MeshHWMPactivePathToRootTimeout = MESH_PATH_TO_ROOT_TIMEOUT,
70 .dot11MeshHWMProotInterval = MESH_ROOT_INTERVAL, 74 .dot11MeshHWMProotInterval = MESH_ROOT_INTERVAL,
71 .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL, 75 .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL,
76 .power_mode = NL80211_MESH_POWER_ACTIVE,
77 .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW,
72}; 78};
73 79
74const struct mesh_setup default_mesh_setup = { 80const struct mesh_setup default_mesh_setup = {
@@ -79,6 +85,8 @@ const struct mesh_setup default_mesh_setup = {
79 .ie = NULL, 85 .ie = NULL,
80 .ie_len = 0, 86 .ie_len = 0,
81 .is_secure = false, 87 .is_secure = false,
88 .beacon_interval = MESH_DEFAULT_BEACON_INTERVAL,
89 .dtim_period = MESH_DEFAULT_DTIM_PERIOD,
82}; 90};
83 91
84int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 92int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 5e8123ee63fd..caddca35d686 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -58,7 +58,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
58 */ 58 */
59 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && 59 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
60 cfg80211_sme_failed_reassoc(wdev)) { 60 cfg80211_sme_failed_reassoc(wdev)) {
61 cfg80211_put_bss(bss); 61 cfg80211_put_bss(wiphy, bss);
62 goto out; 62 goto out;
63 } 63 }
64 64
@@ -70,7 +70,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
70 * do not call connect_result() now because the 70 * do not call connect_result() now because the
71 * sme will schedule work that does it later. 71 * sme will schedule work that does it later.
72 */ 72 */
73 cfg80211_put_bss(bss); 73 cfg80211_put_bss(wiphy, bss);
74 goto out; 74 goto out;
75 } 75 }
76 76
@@ -108,7 +108,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
108 if (wdev->current_bss && 108 if (wdev->current_bss &&
109 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { 109 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
110 cfg80211_unhold_bss(wdev->current_bss); 110 cfg80211_unhold_bss(wdev->current_bss);
111 cfg80211_put_bss(&wdev->current_bss->pub); 111 cfg80211_put_bss(wiphy, &wdev->current_bss->pub);
112 wdev->current_bss = NULL; 112 wdev->current_bss = NULL;
113 was_current = true; 113 was_current = true;
114 } 114 }
@@ -164,7 +164,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
164 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { 164 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
165 cfg80211_sme_disassoc(dev, wdev->current_bss); 165 cfg80211_sme_disassoc(dev, wdev->current_bss);
166 cfg80211_unhold_bss(wdev->current_bss); 166 cfg80211_unhold_bss(wdev->current_bss);
167 cfg80211_put_bss(&wdev->current_bss->pub); 167 cfg80211_put_bss(wiphy, &wdev->current_bss->pub);
168 wdev->current_bss = NULL; 168 wdev->current_bss = NULL;
169 } else 169 } else
170 WARN_ON(1); 170 WARN_ON(1);
@@ -324,7 +324,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
324 err = rdev_auth(rdev, dev, &req); 324 err = rdev_auth(rdev, dev, &req);
325 325
326out: 326out:
327 cfg80211_put_bss(req.bss); 327 cfg80211_put_bss(&rdev->wiphy, req.bss);
328 return err; 328 return err;
329} 329}
330 330
@@ -432,7 +432,7 @@ out:
432 if (err) { 432 if (err) {
433 if (was_connected) 433 if (was_connected)
434 wdev->sme_state = CFG80211_SME_CONNECTED; 434 wdev->sme_state = CFG80211_SME_CONNECTED;
435 cfg80211_put_bss(req.bss); 435 cfg80211_put_bss(&rdev->wiphy, req.bss);
436 } 436 }
437 437
438 return err; 438 return err;
@@ -514,7 +514,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
514 if (wdev->sme_state != CFG80211_SME_CONNECTED) 514 if (wdev->sme_state != CFG80211_SME_CONNECTED)
515 return -ENOTCONN; 515 return -ENOTCONN;
516 516
517 if (WARN_ON(!wdev->current_bss)) 517 if (WARN(!wdev->current_bss, "sme_state=%d\n", wdev->sme_state))
518 return -ENOTCONN; 518 return -ENOTCONN;
519 519
520 memset(&req, 0, sizeof(req)); 520 memset(&req, 0, sizeof(req));
@@ -572,7 +572,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
572 572
573 if (wdev->current_bss) { 573 if (wdev->current_bss) {
574 cfg80211_unhold_bss(wdev->current_bss); 574 cfg80211_unhold_bss(wdev->current_bss);
575 cfg80211_put_bss(&wdev->current_bss->pub); 575 cfg80211_put_bss(&rdev->wiphy, &wdev->current_bss->pub);
576 wdev->current_bss = NULL; 576 wdev->current_bss = NULL;
577 } 577 }
578} 578}
@@ -988,64 +988,122 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
988} 988}
989EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); 989EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
990 990
991void cfg80211_ch_switch_notify(struct net_device *dev, 991void cfg80211_dfs_channels_update_work(struct work_struct *work)
992 struct cfg80211_chan_def *chandef)
993{ 992{
994 struct wireless_dev *wdev = dev->ieee80211_ptr; 993 struct delayed_work *delayed_work;
995 struct wiphy *wiphy = wdev->wiphy; 994 struct cfg80211_registered_device *rdev;
996 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 995 struct cfg80211_chan_def chandef;
996 struct ieee80211_supported_band *sband;
997 struct ieee80211_channel *c;
998 struct wiphy *wiphy;
999 bool check_again = false;
1000 unsigned long timeout, next_time = 0;
1001 int bandid, i;
1002
1003 delayed_work = container_of(work, struct delayed_work, work);
1004 rdev = container_of(delayed_work, struct cfg80211_registered_device,
1005 dfs_update_channels_wk);
1006 wiphy = &rdev->wiphy;
1007
1008 mutex_lock(&cfg80211_mutex);
1009 for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) {
1010 sband = wiphy->bands[bandid];
1011 if (!sband)
1012 continue;
997 1013
998 trace_cfg80211_ch_switch_notify(dev, chandef); 1014 for (i = 0; i < sband->n_channels; i++) {
1015 c = &sband->channels[i];
999 1016
1000 wdev_lock(wdev); 1017 if (c->dfs_state != NL80211_DFS_UNAVAILABLE)
1018 continue;
1001 1019
1002 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && 1020 timeout = c->dfs_state_entered +
1003 wdev->iftype != NL80211_IFTYPE_P2P_GO)) 1021 IEEE80211_DFS_MIN_NOP_TIME_MS;
1004 goto out;
1005 1022
1006 wdev->channel = chandef->chan; 1023 if (time_after_eq(jiffies, timeout)) {
1007 nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL); 1024 c->dfs_state = NL80211_DFS_USABLE;
1008out: 1025 cfg80211_chandef_create(&chandef, c,
1009 wdev_unlock(wdev); 1026 NL80211_CHAN_NO_HT);
1010 return; 1027
1028 nl80211_radar_notify(rdev, &chandef,
1029 NL80211_RADAR_NOP_FINISHED,
1030 NULL, GFP_ATOMIC);
1031 continue;
1032 }
1033
1034 if (!check_again)
1035 next_time = timeout - jiffies;
1036 else
1037 next_time = min(next_time, timeout - jiffies);
1038 check_again = true;
1039 }
1040 }
1041 mutex_unlock(&cfg80211_mutex);
1042
1043 /* reschedule if there are other channels waiting to be cleared again */
1044 if (check_again)
1045 queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk,
1046 next_time);
1011} 1047}
1012EXPORT_SYMBOL(cfg80211_ch_switch_notify);
1013 1048
1014bool cfg80211_rx_spurious_frame(struct net_device *dev, 1049
1015 const u8 *addr, gfp_t gfp) 1050void cfg80211_radar_event(struct wiphy *wiphy,
1051 struct cfg80211_chan_def *chandef,
1052 gfp_t gfp)
1016{ 1053{
1017 struct wireless_dev *wdev = dev->ieee80211_ptr; 1054 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
1018 bool ret; 1055 unsigned long timeout;
1019 1056
1020 trace_cfg80211_rx_spurious_frame(dev, addr); 1057 trace_cfg80211_radar_event(wiphy, chandef);
1021 1058
1022 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && 1059 /* only set the chandef supplied channel to unavailable, in
1023 wdev->iftype != NL80211_IFTYPE_P2P_GO)) { 1060 * case the radar is detected on only one of multiple channels
1024 trace_cfg80211_return_bool(false); 1061 * spanned by the chandef.
1025 return false; 1062 */
1026 } 1063 cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE);
1027 ret = nl80211_unexpected_frame(dev, addr, gfp); 1064
1028 trace_cfg80211_return_bool(ret); 1065 timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_NOP_TIME_MS);
1029 return ret; 1066 queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk,
1067 timeout);
1068
1069 nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp);
1030} 1070}
1031EXPORT_SYMBOL(cfg80211_rx_spurious_frame); 1071EXPORT_SYMBOL(cfg80211_radar_event);
1032 1072
1033bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, 1073void cfg80211_cac_event(struct net_device *netdev,
1034 const u8 *addr, gfp_t gfp) 1074 enum nl80211_radar_event event, gfp_t gfp)
1035{ 1075{
1036 struct wireless_dev *wdev = dev->ieee80211_ptr; 1076 struct wireless_dev *wdev = netdev->ieee80211_ptr;
1037 bool ret; 1077 struct wiphy *wiphy = wdev->wiphy;
1078 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
1079 struct cfg80211_chan_def chandef;
1080 unsigned long timeout;
1038 1081
1039 trace_cfg80211_rx_unexpected_4addr_frame(dev, addr); 1082 trace_cfg80211_cac_event(netdev, event);
1040 1083
1041 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && 1084 if (WARN_ON(!wdev->cac_started))
1042 wdev->iftype != NL80211_IFTYPE_P2P_GO && 1085 return;
1043 wdev->iftype != NL80211_IFTYPE_AP_VLAN)) { 1086
1044 trace_cfg80211_return_bool(false); 1087 if (WARN_ON(!wdev->channel))
1045 return false; 1088 return;
1089
1090 cfg80211_chandef_create(&chandef, wdev->channel, NL80211_CHAN_NO_HT);
1091
1092 switch (event) {
1093 case NL80211_RADAR_CAC_FINISHED:
1094 timeout = wdev->cac_start_time +
1095 msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
1096 WARN_ON(!time_after_eq(jiffies, timeout));
1097 cfg80211_set_dfs_state(wiphy, &chandef, NL80211_DFS_AVAILABLE);
1098 break;
1099 case NL80211_RADAR_CAC_ABORTED:
1100 break;
1101 default:
1102 WARN_ON(1);
1103 return;
1046 } 1104 }
1047 ret = nl80211_unexpected_4addr_frame(dev, addr, gfp); 1105 wdev->cac_started = false;
1048 trace_cfg80211_return_bool(ret); 1106
1049 return ret; 1107 nl80211_radar_notify(rdev, &chandef, event, netdev, gfp);
1050} 1108}
1051EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); 1109EXPORT_SYMBOL(cfg80211_cac_event);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f45706adaf34..35545ccc30fd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -19,6 +19,7 @@
19#include <net/genetlink.h> 19#include <net/genetlink.h>
20#include <net/cfg80211.h> 20#include <net/cfg80211.h>
21#include <net/sock.h> 21#include <net/sock.h>
22#include <net/inet_connection_sock.h>
22#include "core.h" 23#include "core.h"
23#include "nl80211.h" 24#include "nl80211.h"
24#include "reg.h" 25#include "reg.h"
@@ -365,6 +366,10 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
365 [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, 366 [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
366 [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, 367 [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
367 [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, 368 [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
369 [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
370 [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
371 [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
372 [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, },
368}; 373};
369 374
370/* policy for the key attributes */ 375/* policy for the key attributes */
@@ -397,6 +402,26 @@ nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = {
397 [NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST] = { .type = NLA_FLAG }, 402 [NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST] = { .type = NLA_FLAG },
398 [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG }, 403 [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG },
399 [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG }, 404 [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG },
405 [NL80211_WOWLAN_TRIG_TCP_CONNECTION] = { .type = NLA_NESTED },
406};
407
408static const struct nla_policy
409nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
410 [NL80211_WOWLAN_TCP_SRC_IPV4] = { .type = NLA_U32 },
411 [NL80211_WOWLAN_TCP_DST_IPV4] = { .type = NLA_U32 },
412 [NL80211_WOWLAN_TCP_DST_MAC] = { .len = ETH_ALEN },
413 [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 },
414 [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 },
415 [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .len = 1 },
416 [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = {
417 .len = sizeof(struct nl80211_wowlan_tcp_data_seq)
418 },
419 [NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN] = {
420 .len = sizeof(struct nl80211_wowlan_tcp_data_token)
421 },
422 [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 },
423 [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .len = 1 },
424 [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
400}; 425};
401 426
402/* policy for GTK rekey offload attributes */ 427/* policy for GTK rekey offload attributes */
@@ -529,8 +554,27 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
529 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
530 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) 555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
531 goto nla_put_failure; 556 goto nla_put_failure;
532 if ((chan->flags & IEEE80211_CHAN_RADAR) && 557 if (chan->flags & IEEE80211_CHAN_RADAR) {
533 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) 558 u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered);
559 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
560 goto nla_put_failure;
561 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
562 chan->dfs_state))
563 goto nla_put_failure;
564 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
565 goto nla_put_failure;
566 }
567 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
568 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
569 goto nla_put_failure;
570 if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
571 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
572 goto nla_put_failure;
573 if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
574 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
575 goto nla_put_failure;
576 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
577 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
534 goto nla_put_failure; 578 goto nla_put_failure;
535 579
536 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 580 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -856,6 +900,9 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
856 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, 900 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
857 c->max_interfaces)) 901 c->max_interfaces))
858 goto nla_put_failure; 902 goto nla_put_failure;
903 if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
904 c->radar_detect_widths))
905 goto nla_put_failure;
859 906
860 nla_nest_end(msg, nl_combi); 907 nla_nest_end(msg, nl_combi);
861 } 908 }
@@ -867,6 +914,48 @@ nla_put_failure:
867 return -ENOBUFS; 914 return -ENOBUFS;
868} 915}
869 916
917#ifdef CONFIG_PM
918static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
919 struct sk_buff *msg)
920{
921 const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
922 struct nlattr *nl_tcp;
923
924 if (!tcp)
925 return 0;
926
927 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
928 if (!nl_tcp)
929 return -ENOBUFS;
930
931 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
932 tcp->data_payload_max))
933 return -ENOBUFS;
934
935 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
936 tcp->data_payload_max))
937 return -ENOBUFS;
938
939 if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
940 return -ENOBUFS;
941
942 if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
943 sizeof(*tcp->tok), tcp->tok))
944 return -ENOBUFS;
945
946 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
947 tcp->data_interval_max))
948 return -ENOBUFS;
949
950 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
951 tcp->wake_payload_max))
952 return -ENOBUFS;
953
954 nla_nest_end(msg, nl_tcp);
955 return 0;
956}
957#endif
958
870static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, 959static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
871 struct cfg80211_registered_device *dev) 960 struct cfg80211_registered_device *dev)
872{ 961{
@@ -1233,12 +1322,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1233 dev->wiphy.wowlan.pattern_min_len, 1322 dev->wiphy.wowlan.pattern_min_len,
1234 .max_pattern_len = 1323 .max_pattern_len =
1235 dev->wiphy.wowlan.pattern_max_len, 1324 dev->wiphy.wowlan.pattern_max_len,
1325 .max_pkt_offset =
1326 dev->wiphy.wowlan.max_pkt_offset,
1236 }; 1327 };
1237 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, 1328 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
1238 sizeof(pat), &pat)) 1329 sizeof(pat), &pat))
1239 goto nla_put_failure; 1330 goto nla_put_failure;
1240 } 1331 }
1241 1332
1333 if (nl80211_send_wowlan_tcp_caps(dev, msg))
1334 goto nla_put_failure;
1335
1242 nla_nest_end(msg, nl_wowlan); 1336 nla_nest_end(msg, nl_wowlan);
1243 } 1337 }
1244#endif 1338#endif
@@ -1265,6 +1359,21 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1265 dev->wiphy.ht_capa_mod_mask)) 1359 dev->wiphy.ht_capa_mod_mask))
1266 goto nla_put_failure; 1360 goto nla_put_failure;
1267 1361
1362 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
1363 dev->wiphy.max_acl_mac_addrs &&
1364 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
1365 dev->wiphy.max_acl_mac_addrs))
1366 goto nla_put_failure;
1367
1368 if (dev->wiphy.extended_capabilities &&
1369 (nla_put(msg, NL80211_ATTR_EXT_CAPA,
1370 dev->wiphy.extended_capabilities_len,
1371 dev->wiphy.extended_capabilities) ||
1372 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
1373 dev->wiphy.extended_capabilities_len,
1374 dev->wiphy.extended_capabilities_mask)))
1375 goto nla_put_failure;
1376
1268 return genlmsg_end(msg, hdr); 1377 return genlmsg_end(msg, hdr);
1269 1378
1270 nla_put_failure: 1379 nla_put_failure:
@@ -2079,6 +2188,13 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2079 !(rdev->wiphy.interface_modes & (1 << type))) 2188 !(rdev->wiphy.interface_modes & (1 << type)))
2080 return -EOPNOTSUPP; 2189 return -EOPNOTSUPP;
2081 2190
2191 if (type == NL80211_IFTYPE_P2P_DEVICE && info->attrs[NL80211_ATTR_MAC]) {
2192 nla_memcpy(params.macaddr, info->attrs[NL80211_ATTR_MAC],
2193 ETH_ALEN);
2194 if (!is_valid_ether_addr(params.macaddr))
2195 return -EADDRNOTAVAIL;
2196 }
2197
2082 if (info->attrs[NL80211_ATTR_4ADDR]) { 2198 if (info->attrs[NL80211_ATTR_4ADDR]) {
2083 params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); 2199 params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]);
2084 err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type); 2200 err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type);
@@ -2481,6 +2597,97 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
2481 return err; 2597 return err;
2482} 2598}
2483 2599
2600/* This function returns an error or the number of nested attributes */
2601static int validate_acl_mac_addrs(struct nlattr *nl_attr)
2602{
2603 struct nlattr *attr;
2604 int n_entries = 0, tmp;
2605
2606 nla_for_each_nested(attr, nl_attr, tmp) {
2607 if (nla_len(attr) != ETH_ALEN)
2608 return -EINVAL;
2609
2610 n_entries++;
2611 }
2612
2613 return n_entries;
2614}
2615
2616/*
2617 * This function parses ACL information and allocates memory for ACL data.
2618 * On successful return, the calling function is responsible to free the
2619 * ACL buffer returned by this function.
2620 */
2621static struct cfg80211_acl_data *parse_acl_data(struct wiphy *wiphy,
2622 struct genl_info *info)
2623{
2624 enum nl80211_acl_policy acl_policy;
2625 struct nlattr *attr;
2626 struct cfg80211_acl_data *acl;
2627 int i = 0, n_entries, tmp;
2628
2629 if (!wiphy->max_acl_mac_addrs)
2630 return ERR_PTR(-EOPNOTSUPP);
2631
2632 if (!info->attrs[NL80211_ATTR_ACL_POLICY])
2633 return ERR_PTR(-EINVAL);
2634
2635 acl_policy = nla_get_u32(info->attrs[NL80211_ATTR_ACL_POLICY]);
2636 if (acl_policy != NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
2637 acl_policy != NL80211_ACL_POLICY_DENY_UNLESS_LISTED)
2638 return ERR_PTR(-EINVAL);
2639
2640 if (!info->attrs[NL80211_ATTR_MAC_ADDRS])
2641 return ERR_PTR(-EINVAL);
2642
2643 n_entries = validate_acl_mac_addrs(info->attrs[NL80211_ATTR_MAC_ADDRS]);
2644 if (n_entries < 0)
2645 return ERR_PTR(n_entries);
2646
2647 if (n_entries > wiphy->max_acl_mac_addrs)
2648 return ERR_PTR(-ENOTSUPP);
2649
2650 acl = kzalloc(sizeof(*acl) + (sizeof(struct mac_address) * n_entries),
2651 GFP_KERNEL);
2652 if (!acl)
2653 return ERR_PTR(-ENOMEM);
2654
2655 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_MAC_ADDRS], tmp) {
2656 memcpy(acl->mac_addrs[i].addr, nla_data(attr), ETH_ALEN);
2657 i++;
2658 }
2659
2660 acl->n_acl_entries = n_entries;
2661 acl->acl_policy = acl_policy;
2662
2663 return acl;
2664}
2665
2666static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
2667{
2668 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2669 struct net_device *dev = info->user_ptr[1];
2670 struct cfg80211_acl_data *acl;
2671 int err;
2672
2673 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2674 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2675 return -EOPNOTSUPP;
2676
2677 if (!dev->ieee80211_ptr->beacon_interval)
2678 return -EINVAL;
2679
2680 acl = parse_acl_data(&rdev->wiphy, info);
2681 if (IS_ERR(acl))
2682 return PTR_ERR(acl);
2683
2684 err = rdev_set_mac_acl(rdev, dev, acl);
2685
2686 kfree(acl);
2687
2688 return err;
2689}
2690
2484static int nl80211_parse_beacon(struct genl_info *info, 2691static int nl80211_parse_beacon(struct genl_info *info,
2485 struct cfg80211_beacon_data *bcn) 2692 struct cfg80211_beacon_data *bcn)
2486{ 2693{
@@ -2598,6 +2805,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2598 struct wireless_dev *wdev = dev->ieee80211_ptr; 2805 struct wireless_dev *wdev = dev->ieee80211_ptr;
2599 struct cfg80211_ap_settings params; 2806 struct cfg80211_ap_settings params;
2600 int err; 2807 int err;
2808 u8 radar_detect_width = 0;
2601 2809
2602 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2810 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2603 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2811 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -2716,14 +2924,30 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2716 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef)) 2924 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
2717 return -EINVAL; 2925 return -EINVAL;
2718 2926
2927 err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
2928 if (err < 0)
2929 return err;
2930 if (err) {
2931 radar_detect_width = BIT(params.chandef.width);
2932 params.radar_required = true;
2933 }
2934
2719 mutex_lock(&rdev->devlist_mtx); 2935 mutex_lock(&rdev->devlist_mtx);
2720 err = cfg80211_can_use_chan(rdev, wdev, params.chandef.chan, 2936 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
2721 CHAN_MODE_SHARED); 2937 params.chandef.chan,
2938 CHAN_MODE_SHARED,
2939 radar_detect_width);
2722 mutex_unlock(&rdev->devlist_mtx); 2940 mutex_unlock(&rdev->devlist_mtx);
2723 2941
2724 if (err) 2942 if (err)
2725 return err; 2943 return err;
2726 2944
2945 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
2946 params.acl = parse_acl_data(&rdev->wiphy, info);
2947 if (IS_ERR(params.acl))
2948 return PTR_ERR(params.acl);
2949 }
2950
2727 err = rdev_start_ap(rdev, dev, &params); 2951 err = rdev_start_ap(rdev, dev, &params);
2728 if (!err) { 2952 if (!err) {
2729 wdev->preset_chandef = params.chandef; 2953 wdev->preset_chandef = params.chandef;
@@ -2732,6 +2956,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2732 wdev->ssid_len = params.ssid_len; 2956 wdev->ssid_len = params.ssid_len;
2733 memcpy(wdev->ssid, params.ssid, wdev->ssid_len); 2957 memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
2734 } 2958 }
2959
2960 kfree(params.acl);
2961
2735 return err; 2962 return err;
2736} 2963}
2737 2964
@@ -2939,12 +3166,22 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
2939 nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME, 3166 nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
2940 sinfo->inactive_time)) 3167 sinfo->inactive_time))
2941 goto nla_put_failure; 3168 goto nla_put_failure;
2942 if ((sinfo->filled & STATION_INFO_RX_BYTES) && 3169 if ((sinfo->filled & (STATION_INFO_RX_BYTES |
3170 STATION_INFO_RX_BYTES64)) &&
2943 nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, 3171 nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
2944 sinfo->rx_bytes)) 3172 (u32)sinfo->rx_bytes))
2945 goto nla_put_failure; 3173 goto nla_put_failure;
2946 if ((sinfo->filled & STATION_INFO_TX_BYTES) && 3174 if ((sinfo->filled & (STATION_INFO_TX_BYTES |
3175 NL80211_STA_INFO_TX_BYTES64)) &&
2947 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, 3176 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
3177 (u32)sinfo->tx_bytes))
3178 goto nla_put_failure;
3179 if ((sinfo->filled & STATION_INFO_RX_BYTES64) &&
3180 nla_put_u64(msg, NL80211_STA_INFO_RX_BYTES64,
3181 sinfo->rx_bytes))
3182 goto nla_put_failure;
3183 if ((sinfo->filled & STATION_INFO_TX_BYTES64) &&
3184 nla_put_u64(msg, NL80211_STA_INFO_TX_BYTES64,
2948 sinfo->tx_bytes)) 3185 sinfo->tx_bytes))
2949 goto nla_put_failure; 3186 goto nla_put_failure;
2950 if ((sinfo->filled & STATION_INFO_LLID) && 3187 if ((sinfo->filled & STATION_INFO_LLID) &&
@@ -3001,6 +3238,18 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
3001 nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS, 3238 nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
3002 sinfo->beacon_loss_count)) 3239 sinfo->beacon_loss_count))
3003 goto nla_put_failure; 3240 goto nla_put_failure;
3241 if ((sinfo->filled & STATION_INFO_LOCAL_PM) &&
3242 nla_put_u32(msg, NL80211_STA_INFO_LOCAL_PM,
3243 sinfo->local_pm))
3244 goto nla_put_failure;
3245 if ((sinfo->filled & STATION_INFO_PEER_PM) &&
3246 nla_put_u32(msg, NL80211_STA_INFO_PEER_PM,
3247 sinfo->peer_pm))
3248 goto nla_put_failure;
3249 if ((sinfo->filled & STATION_INFO_NONPEER_PM) &&
3250 nla_put_u32(msg, NL80211_STA_INFO_NONPEER_PM,
3251 sinfo->nonpeer_pm))
3252 goto nla_put_failure;
3004 if (sinfo->filled & STATION_INFO_BSS_PARAM) { 3253 if (sinfo->filled & STATION_INFO_BSS_PARAM) {
3005 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); 3254 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
3006 if (!bss_param) 3255 if (!bss_param)
@@ -3160,6 +3409,54 @@ static struct net_device *get_vlan(struct genl_info *info,
3160 return ERR_PTR(ret); 3409 return ERR_PTR(ret);
3161} 3410}
3162 3411
3412static struct nla_policy
3413nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
3414 [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
3415 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
3416};
3417
3418static int nl80211_set_station_tdls(struct genl_info *info,
3419 struct station_parameters *params)
3420{
3421 struct nlattr *tb[NL80211_STA_WME_MAX + 1];
3422 struct nlattr *nla;
3423 int err;
3424
3425 /* Dummy STA entry gets updated once the peer capabilities are known */
3426 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
3427 params->ht_capa =
3428 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
3429 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3430 params->vht_capa =
3431 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3432
3433 /* parse WME attributes if present */
3434 if (!info->attrs[NL80211_ATTR_STA_WME])
3435 return 0;
3436
3437 nla = info->attrs[NL80211_ATTR_STA_WME];
3438 err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
3439 nl80211_sta_wme_policy);
3440 if (err)
3441 return err;
3442
3443 if (tb[NL80211_STA_WME_UAPSD_QUEUES])
3444 params->uapsd_queues = nla_get_u8(
3445 tb[NL80211_STA_WME_UAPSD_QUEUES]);
3446 if (params->uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
3447 return -EINVAL;
3448
3449 if (tb[NL80211_STA_WME_MAX_SP])
3450 params->max_sp = nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
3451
3452 if (params->max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
3453 return -EINVAL;
3454
3455 params->sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
3456
3457 return 0;
3458}
3459
3163static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) 3460static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3164{ 3461{
3165 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3462 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -3188,13 +3485,21 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3188 nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 3485 nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]);
3189 } 3486 }
3190 3487
3191 if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) 3488 if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) {
3192 params.listen_interval = 3489 params.capability =
3193 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 3490 nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]);
3491 params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY;
3492 }
3194 3493
3195 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 3494 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) {
3196 params.ht_capa = 3495 params.ext_capab =
3197 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 3496 nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
3497 params.ext_capab_len =
3498 nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
3499 }
3500
3501 if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
3502 return -EINVAL;
3198 3503
3199 if (!rdev->ops->change_station) 3504 if (!rdev->ops->change_station)
3200 return -EOPNOTSUPP; 3505 return -EOPNOTSUPP;
@@ -3210,6 +3515,17 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3210 params.plink_state = 3515 params.plink_state =
3211 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); 3516 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
3212 3517
3518 if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) {
3519 enum nl80211_mesh_power_mode pm = nla_get_u32(
3520 info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]);
3521
3522 if (pm <= NL80211_MESH_POWER_UNKNOWN ||
3523 pm > NL80211_MESH_POWER_MAX)
3524 return -EINVAL;
3525
3526 params.local_pm = pm;
3527 }
3528
3213 switch (dev->ieee80211_ptr->iftype) { 3529 switch (dev->ieee80211_ptr->iftype) {
3214 case NL80211_IFTYPE_AP: 3530 case NL80211_IFTYPE_AP:
3215 case NL80211_IFTYPE_AP_VLAN: 3531 case NL80211_IFTYPE_AP_VLAN:
@@ -3217,6 +3533,8 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3217 /* disallow mesh-specific things */ 3533 /* disallow mesh-specific things */
3218 if (params.plink_action) 3534 if (params.plink_action)
3219 return -EINVAL; 3535 return -EINVAL;
3536 if (params.local_pm)
3537 return -EINVAL;
3220 3538
3221 /* TDLS can't be set, ... */ 3539 /* TDLS can't be set, ... */
3222 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 3540 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
@@ -3231,11 +3549,32 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3231 /* accept only the listed bits */ 3549 /* accept only the listed bits */
3232 if (params.sta_flags_mask & 3550 if (params.sta_flags_mask &
3233 ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | 3551 ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3552 BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3553 BIT(NL80211_STA_FLAG_ASSOCIATED) |
3234 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 3554 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
3235 BIT(NL80211_STA_FLAG_WME) | 3555 BIT(NL80211_STA_FLAG_WME) |
3236 BIT(NL80211_STA_FLAG_MFP))) 3556 BIT(NL80211_STA_FLAG_MFP)))
3237 return -EINVAL; 3557 return -EINVAL;
3238 3558
3559 /* but authenticated/associated only if driver handles it */
3560 if (!(rdev->wiphy.features &
3561 NL80211_FEATURE_FULL_AP_CLIENT_STATE) &&
3562 params.sta_flags_mask &
3563 (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3564 BIT(NL80211_STA_FLAG_ASSOCIATED)))
3565 return -EINVAL;
3566
3567 /* reject other things that can't change */
3568 if (params.supported_rates)
3569 return -EINVAL;
3570 if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
3571 return -EINVAL;
3572 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
3573 return -EINVAL;
3574 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3575 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3576 return -EINVAL;
3577
3239 /* must be last in here for error handling */ 3578 /* must be last in here for error handling */
3240 params.vlan = get_vlan(info, rdev); 3579 params.vlan = get_vlan(info, rdev);
3241 if (IS_ERR(params.vlan)) 3580 if (IS_ERR(params.vlan))
@@ -3250,14 +3589,28 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3250 * to change the flag. 3589 * to change the flag.
3251 */ 3590 */
3252 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); 3591 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3253 /* fall through */ 3592 /* Include parameters for TDLS peer (driver will check) */
3593 err = nl80211_set_station_tdls(info, &params);
3594 if (err)
3595 return err;
3596 /* disallow things sta doesn't support */
3597 if (params.plink_action)
3598 return -EINVAL;
3599 if (params.local_pm)
3600 return -EINVAL;
3601 /* reject any changes other than AUTHORIZED or WME (for TDLS) */
3602 if (params.sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3603 BIT(NL80211_STA_FLAG_WME)))
3604 return -EINVAL;
3605 break;
3254 case NL80211_IFTYPE_ADHOC: 3606 case NL80211_IFTYPE_ADHOC:
3255 /* disallow things sta doesn't support */ 3607 /* disallow things sta doesn't support */
3256 if (params.plink_action) 3608 if (params.plink_action)
3257 return -EINVAL; 3609 return -EINVAL;
3258 if (params.ht_capa) 3610 if (params.local_pm)
3259 return -EINVAL; 3611 return -EINVAL;
3260 if (params.listen_interval >= 0) 3612 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3613 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3261 return -EINVAL; 3614 return -EINVAL;
3262 /* reject any changes other than AUTHORIZED */ 3615 /* reject any changes other than AUTHORIZED */
3263 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) 3616 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
@@ -3267,9 +3620,14 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3267 /* disallow things mesh doesn't support */ 3620 /* disallow things mesh doesn't support */
3268 if (params.vlan) 3621 if (params.vlan)
3269 return -EINVAL; 3622 return -EINVAL;
3270 if (params.ht_capa) 3623 if (params.supported_rates)
3624 return -EINVAL;
3625 if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
3626 return -EINVAL;
3627 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
3271 return -EINVAL; 3628 return -EINVAL;
3272 if (params.listen_interval >= 0) 3629 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3630 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3273 return -EINVAL; 3631 return -EINVAL;
3274 /* 3632 /*
3275 * No special handling for TDLS here -- the userspace 3633 * No special handling for TDLS here -- the userspace
@@ -3295,12 +3653,6 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3295 return err; 3653 return err;
3296} 3654}
3297 3655
3298static struct nla_policy
3299nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
3300 [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
3301 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
3302};
3303
3304static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) 3656static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3305{ 3657{
3306 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3658 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -3335,6 +3687,19 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3335 if (!params.aid || params.aid > IEEE80211_MAX_AID) 3687 if (!params.aid || params.aid > IEEE80211_MAX_AID)
3336 return -EINVAL; 3688 return -EINVAL;
3337 3689
3690 if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) {
3691 params.capability =
3692 nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]);
3693 params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY;
3694 }
3695
3696 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) {
3697 params.ext_capab =
3698 nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
3699 params.ext_capab_len =
3700 nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
3701 }
3702
3338 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 3703 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
3339 params.ht_capa = 3704 params.ht_capa =
3340 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 3705 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
@@ -3393,17 +3758,31 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3393 /* but don't bother the driver with it */ 3758 /* but don't bother the driver with it */
3394 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); 3759 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3395 3760
3761 /* allow authenticated/associated only if driver handles it */
3762 if (!(rdev->wiphy.features &
3763 NL80211_FEATURE_FULL_AP_CLIENT_STATE) &&
3764 params.sta_flags_mask &
3765 (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3766 BIT(NL80211_STA_FLAG_ASSOCIATED)))
3767 return -EINVAL;
3768
3396 /* must be last in here for error handling */ 3769 /* must be last in here for error handling */
3397 params.vlan = get_vlan(info, rdev); 3770 params.vlan = get_vlan(info, rdev);
3398 if (IS_ERR(params.vlan)) 3771 if (IS_ERR(params.vlan))
3399 return PTR_ERR(params.vlan); 3772 return PTR_ERR(params.vlan);
3400 break; 3773 break;
3401 case NL80211_IFTYPE_MESH_POINT: 3774 case NL80211_IFTYPE_MESH_POINT:
3775 /* associated is disallowed */
3776 if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED))
3777 return -EINVAL;
3402 /* TDLS peers cannot be added */ 3778 /* TDLS peers cannot be added */
3403 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 3779 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
3404 return -EINVAL; 3780 return -EINVAL;
3405 break; 3781 break;
3406 case NL80211_IFTYPE_STATION: 3782 case NL80211_IFTYPE_STATION:
3783 /* associated is disallowed */
3784 if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED))
3785 return -EINVAL;
3407 /* Only TDLS peers can be added */ 3786 /* Only TDLS peers can be added */
3408 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) 3787 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
3409 return -EINVAL; 3788 return -EINVAL;
@@ -3787,12 +4166,8 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
3787 * window between nl80211_init() and regulatory_init(), if that is 4166 * window between nl80211_init() and regulatory_init(), if that is
3788 * even possible. 4167 * even possible.
3789 */ 4168 */
3790 mutex_lock(&cfg80211_mutex); 4169 if (unlikely(!rcu_access_pointer(cfg80211_regdomain)))
3791 if (unlikely(!cfg80211_regdomain)) {
3792 mutex_unlock(&cfg80211_mutex);
3793 return -EINPROGRESS; 4170 return -EINPROGRESS;
3794 }
3795 mutex_unlock(&cfg80211_mutex);
3796 4171
3797 if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) 4172 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
3798 return -EINVAL; 4173 return -EINVAL;
@@ -3908,7 +4283,11 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
3908 nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, 4283 nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
3909 cur_params.dot11MeshHWMProotInterval) || 4284 cur_params.dot11MeshHWMProotInterval) ||
3910 nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, 4285 nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
3911 cur_params.dot11MeshHWMPconfirmationInterval)) 4286 cur_params.dot11MeshHWMPconfirmationInterval) ||
4287 nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE,
4288 cur_params.power_mode) ||
4289 nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW,
4290 cur_params.dot11MeshAwakeWindowDuration))
3912 goto nla_put_failure; 4291 goto nla_put_failure;
3913 nla_nest_end(msg, pinfoattr); 4292 nla_nest_end(msg, pinfoattr);
3914 genlmsg_end(msg, hdr); 4293 genlmsg_end(msg, hdr);
@@ -3947,6 +4326,8 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3947 [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 }, 4326 [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 },
3948 [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 }, 4327 [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 },
3949 [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 }, 4328 [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 },
4329 [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 },
4330 [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 },
3950}; 4331};
3951 4332
3952static const struct nla_policy 4333static const struct nla_policy
@@ -3967,13 +4348,15 @@ static int nl80211_parse_mesh_config(struct genl_info *info,
3967 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; 4348 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
3968 u32 mask = 0; 4349 u32 mask = 0;
3969 4350
3970#define FILL_IN_MESH_PARAM_IF_SET(table, cfg, param, mask, attr_num, nla_fn) \ 4351#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
3971do {\ 4352do { \
3972 if (table[attr_num]) {\ 4353 if (tb[attr]) { \
3973 cfg->param = nla_fn(table[attr_num]); \ 4354 if (fn(tb[attr]) < min || fn(tb[attr]) > max) \
3974 mask |= (1 << (attr_num - 1)); \ 4355 return -EINVAL; \
3975 } \ 4356 cfg->param = fn(tb[attr]); \
3976} while (0);\ 4357 mask |= (1 << (attr - 1)); \
4358 } \
4359} while (0)
3977 4360
3978 4361
3979 if (!info->attrs[NL80211_ATTR_MESH_CONFIG]) 4362 if (!info->attrs[NL80211_ATTR_MESH_CONFIG])
@@ -3988,83 +4371,98 @@ do {\
3988 BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); 4371 BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32);
3989 4372
3990 /* Fill in the params struct */ 4373 /* Fill in the params struct */
3991 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 4374 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255,
3992 mask, NL80211_MESHCONF_RETRY_TIMEOUT, 4375 mask, NL80211_MESHCONF_RETRY_TIMEOUT,
3993 nla_get_u16); 4376 nla_get_u16);
3994 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 4377 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255,
3995 mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, 4378 mask, NL80211_MESHCONF_CONFIRM_TIMEOUT,
3996 nla_get_u16); 4379 nla_get_u16);
3997 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 4380 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255,
3998 mask, NL80211_MESHCONF_HOLDING_TIMEOUT, 4381 mask, NL80211_MESHCONF_HOLDING_TIMEOUT,
3999 nla_get_u16); 4382 nla_get_u16);
4000 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 4383 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255,
4001 mask, NL80211_MESHCONF_MAX_PEER_LINKS, 4384 mask, NL80211_MESHCONF_MAX_PEER_LINKS,
4002 nla_get_u16); 4385 nla_get_u16);
4003 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 4386 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16,
4004 mask, NL80211_MESHCONF_MAX_RETRIES, 4387 mask, NL80211_MESHCONF_MAX_RETRIES,
4005 nla_get_u8); 4388 nla_get_u8);
4006 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 4389 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255,
4007 mask, NL80211_MESHCONF_TTL, nla_get_u8); 4390 mask, NL80211_MESHCONF_TTL, nla_get_u8);
4008 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 4391 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255,
4009 mask, NL80211_MESHCONF_ELEMENT_TTL, 4392 mask, NL80211_MESHCONF_ELEMENT_TTL,
4010 nla_get_u8); 4393 nla_get_u8);
4011 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 4394 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1,
4012 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, 4395 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
4013 nla_get_u8); 4396 nla_get_u8);
4014 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, mask, 4397 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
4398 1, 255, mask,
4015 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, 4399 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
4016 nla_get_u32); 4400 nla_get_u32);
4017 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 4401 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255,
4018 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, 4402 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
4019 nla_get_u8); 4403 nla_get_u8);
4020 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 4404 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535,
4021 mask, NL80211_MESHCONF_PATH_REFRESH_TIME, 4405 mask, NL80211_MESHCONF_PATH_REFRESH_TIME,
4022 nla_get_u32); 4406 nla_get_u32);
4023 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 4407 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535,
4024 mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, 4408 mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
4025 nla_get_u16); 4409 nla_get_u16);
4026 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask, 4410 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout,
4411 1, 65535, mask,
4027 NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, 4412 NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
4028 nla_get_u32); 4413 nla_get_u32);
4029 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, 4414 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
4030 mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, 4415 1, 65535, mask,
4416 NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
4031 nla_get_u16); 4417 nla_get_u16);
4032 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, 4418 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
4033 mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, 4419 1, 65535, mask,
4420 NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
4034 nla_get_u16); 4421 nla_get_u16);
4035 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 4422 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
4036 dot11MeshHWMPnetDiameterTraversalTime, mask, 4423 dot11MeshHWMPnetDiameterTraversalTime,
4424 1, 65535, mask,
4037 NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, 4425 NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
4038 nla_get_u16); 4426 nla_get_u16);
4039 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask, 4427 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4,
4040 NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8); 4428 mask, NL80211_MESHCONF_HWMP_ROOTMODE,
4041 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask, 4429 nla_get_u8);
4042 NL80211_MESHCONF_HWMP_RANN_INTERVAL, 4430 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535,
4431 mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
4043 nla_get_u16); 4432 nla_get_u16);
4044 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 4433 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
4045 dot11MeshGateAnnouncementProtocol, mask, 4434 dot11MeshGateAnnouncementProtocol, 0, 1,
4046 NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 4435 mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
4047 nla_get_u8); 4436 nla_get_u8);
4048 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 4437 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1,
4049 mask, NL80211_MESHCONF_FORWARDING, 4438 mask, NL80211_MESHCONF_FORWARDING,
4050 nla_get_u8); 4439 nla_get_u8);
4051 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, 4440 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, 1, 255,
4052 mask, NL80211_MESHCONF_RSSI_THRESHOLD, 4441 mask, NL80211_MESHCONF_RSSI_THRESHOLD,
4053 nla_get_u32); 4442 nla_get_u32);
4054 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 4443 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16,
4055 mask, NL80211_MESHCONF_HT_OPMODE, 4444 mask, NL80211_MESHCONF_HT_OPMODE,
4056 nla_get_u16); 4445 nla_get_u16);
4057 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 4446 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
4058 mask, 4447 1, 65535, mask,
4059 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, 4448 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
4060 nla_get_u32); 4449 nla_get_u32);
4061 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 4450 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535,
4062 mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, 4451 mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
4063 nla_get_u16); 4452 nla_get_u16);
4064 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 4453 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
4065 dot11MeshHWMPconfirmationInterval, mask, 4454 dot11MeshHWMPconfirmationInterval,
4455 1, 65535, mask,
4066 NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, 4456 NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
4067 nla_get_u16); 4457 nla_get_u16);
4458 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode,
4459 NL80211_MESH_POWER_ACTIVE,
4460 NL80211_MESH_POWER_MAX,
4461 mask, NL80211_MESHCONF_POWER_MODE,
4462 nla_get_u32);
4463 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration,
4464 0, 65535, mask,
4465 NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16);
4068 if (mask_out) 4466 if (mask_out)
4069 *mask_out = mask; 4467 *mask_out = mask;
4070 4468
@@ -4152,6 +4550,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
4152 4550
4153static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) 4551static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
4154{ 4552{
4553 const struct ieee80211_regdomain *regdom;
4155 struct sk_buff *msg; 4554 struct sk_buff *msg;
4156 void *hdr = NULL; 4555 void *hdr = NULL;
4157 struct nlattr *nl_reg_rules; 4556 struct nlattr *nl_reg_rules;
@@ -4174,35 +4573,36 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
4174 if (!hdr) 4573 if (!hdr)
4175 goto put_failure; 4574 goto put_failure;
4176 4575
4177 if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
4178 cfg80211_regdomain->alpha2) ||
4179 (cfg80211_regdomain->dfs_region &&
4180 nla_put_u8(msg, NL80211_ATTR_DFS_REGION,
4181 cfg80211_regdomain->dfs_region)))
4182 goto nla_put_failure;
4183
4184 if (reg_last_request_cell_base() && 4576 if (reg_last_request_cell_base() &&
4185 nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE, 4577 nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE,
4186 NL80211_USER_REG_HINT_CELL_BASE)) 4578 NL80211_USER_REG_HINT_CELL_BASE))
4187 goto nla_put_failure; 4579 goto nla_put_failure;
4188 4580
4581 rcu_read_lock();
4582 regdom = rcu_dereference(cfg80211_regdomain);
4583
4584 if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, regdom->alpha2) ||
4585 (regdom->dfs_region &&
4586 nla_put_u8(msg, NL80211_ATTR_DFS_REGION, regdom->dfs_region)))
4587 goto nla_put_failure_rcu;
4588
4189 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); 4589 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
4190 if (!nl_reg_rules) 4590 if (!nl_reg_rules)
4191 goto nla_put_failure; 4591 goto nla_put_failure_rcu;
4192 4592
4193 for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) { 4593 for (i = 0; i < regdom->n_reg_rules; i++) {
4194 struct nlattr *nl_reg_rule; 4594 struct nlattr *nl_reg_rule;
4195 const struct ieee80211_reg_rule *reg_rule; 4595 const struct ieee80211_reg_rule *reg_rule;
4196 const struct ieee80211_freq_range *freq_range; 4596 const struct ieee80211_freq_range *freq_range;
4197 const struct ieee80211_power_rule *power_rule; 4597 const struct ieee80211_power_rule *power_rule;
4198 4598
4199 reg_rule = &cfg80211_regdomain->reg_rules[i]; 4599 reg_rule = &regdom->reg_rules[i];
4200 freq_range = &reg_rule->freq_range; 4600 freq_range = &reg_rule->freq_range;
4201 power_rule = &reg_rule->power_rule; 4601 power_rule = &reg_rule->power_rule;
4202 4602
4203 nl_reg_rule = nla_nest_start(msg, i); 4603 nl_reg_rule = nla_nest_start(msg, i);
4204 if (!nl_reg_rule) 4604 if (!nl_reg_rule)
4205 goto nla_put_failure; 4605 goto nla_put_failure_rcu;
4206 4606
4207 if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS, 4607 if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
4208 reg_rule->flags) || 4608 reg_rule->flags) ||
@@ -4216,10 +4616,11 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
4216 power_rule->max_antenna_gain) || 4616 power_rule->max_antenna_gain) ||
4217 nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, 4617 nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
4218 power_rule->max_eirp)) 4618 power_rule->max_eirp))
4219 goto nla_put_failure; 4619 goto nla_put_failure_rcu;
4220 4620
4221 nla_nest_end(msg, nl_reg_rule); 4621 nla_nest_end(msg, nl_reg_rule);
4222 } 4622 }
4623 rcu_read_unlock();
4223 4624
4224 nla_nest_end(msg, nl_reg_rules); 4625 nla_nest_end(msg, nl_reg_rules);
4225 4626
@@ -4227,6 +4628,8 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
4227 err = genlmsg_reply(msg, info); 4628 err = genlmsg_reply(msg, info);
4228 goto out; 4629 goto out;
4229 4630
4631nla_put_failure_rcu:
4632 rcu_read_unlock();
4230nla_put_failure: 4633nla_put_failure:
4231 genlmsg_cancel(msg, hdr); 4634 genlmsg_cancel(msg, hdr);
4232put_failure: 4635put_failure:
@@ -4259,27 +4662,18 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
4259 dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]); 4662 dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]);
4260 4663
4261 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], 4664 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
4262 rem_reg_rules) { 4665 rem_reg_rules) {
4263 num_rules++; 4666 num_rules++;
4264 if (num_rules > NL80211_MAX_SUPP_REG_RULES) 4667 if (num_rules > NL80211_MAX_SUPP_REG_RULES)
4265 return -EINVAL; 4668 return -EINVAL;
4266 } 4669 }
4267 4670
4268 mutex_lock(&cfg80211_mutex);
4269
4270 if (!reg_is_valid_request(alpha2)) {
4271 r = -EINVAL;
4272 goto bad_reg;
4273 }
4274
4275 size_of_regd = sizeof(struct ieee80211_regdomain) + 4671 size_of_regd = sizeof(struct ieee80211_regdomain) +
4276 (num_rules * sizeof(struct ieee80211_reg_rule)); 4672 num_rules * sizeof(struct ieee80211_reg_rule);
4277 4673
4278 rd = kzalloc(size_of_regd, GFP_KERNEL); 4674 rd = kzalloc(size_of_regd, GFP_KERNEL);
4279 if (!rd) { 4675 if (!rd)
4280 r = -ENOMEM; 4676 return -ENOMEM;
4281 goto bad_reg;
4282 }
4283 4677
4284 rd->n_reg_rules = num_rules; 4678 rd->n_reg_rules = num_rules;
4285 rd->alpha2[0] = alpha2[0]; 4679 rd->alpha2[0] = alpha2[0];
@@ -4293,10 +4687,10 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
4293 rd->dfs_region = dfs_region; 4687 rd->dfs_region = dfs_region;
4294 4688
4295 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], 4689 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
4296 rem_reg_rules) { 4690 rem_reg_rules) {
4297 nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, 4691 nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
4298 nla_data(nl_reg_rule), nla_len(nl_reg_rule), 4692 nla_data(nl_reg_rule), nla_len(nl_reg_rule),
4299 reg_rule_policy); 4693 reg_rule_policy);
4300 r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); 4694 r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]);
4301 if (r) 4695 if (r)
4302 goto bad_reg; 4696 goto bad_reg;
@@ -4309,16 +4703,14 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
4309 } 4703 }
4310 } 4704 }
4311 4705
4312 BUG_ON(rule_idx != num_rules); 4706 mutex_lock(&cfg80211_mutex);
4313 4707
4314 r = set_regdom(rd); 4708 r = set_regdom(rd);
4315 4709 /* set_regdom took ownership */
4710 rd = NULL;
4316 mutex_unlock(&cfg80211_mutex); 4711 mutex_unlock(&cfg80211_mutex);
4317 4712
4318 return r;
4319
4320 bad_reg: 4713 bad_reg:
4321 mutex_unlock(&cfg80211_mutex);
4322 kfree(rd); 4714 kfree(rd);
4323 return r; 4715 return r;
4324} 4716}
@@ -4801,6 +5193,54 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
4801 return err; 5193 return err;
4802} 5194}
4803 5195
5196static int nl80211_start_radar_detection(struct sk_buff *skb,
5197 struct genl_info *info)
5198{
5199 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5200 struct net_device *dev = info->user_ptr[1];
5201 struct wireless_dev *wdev = dev->ieee80211_ptr;
5202 struct cfg80211_chan_def chandef;
5203 int err;
5204
5205 err = nl80211_parse_chandef(rdev, info, &chandef);
5206 if (err)
5207 return err;
5208
5209 if (wdev->cac_started)
5210 return -EBUSY;
5211
5212 err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef);
5213 if (err < 0)
5214 return err;
5215
5216 if (err == 0)
5217 return -EINVAL;
5218
5219 if (chandef.chan->dfs_state != NL80211_DFS_USABLE)
5220 return -EINVAL;
5221
5222 if (!rdev->ops->start_radar_detection)
5223 return -EOPNOTSUPP;
5224
5225 mutex_lock(&rdev->devlist_mtx);
5226 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
5227 chandef.chan, CHAN_MODE_SHARED,
5228 BIT(chandef.width));
5229 if (err)
5230 goto err_locked;
5231
5232 err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef);
5233 if (!err) {
5234 wdev->channel = chandef.chan;
5235 wdev->cac_started = true;
5236 wdev->cac_start_time = jiffies;
5237 }
5238err_locked:
5239 mutex_unlock(&rdev->devlist_mtx);
5240
5241 return err;
5242}
5243
4804static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, 5244static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4805 u32 seq, int flags, 5245 u32 seq, int flags,
4806 struct cfg80211_registered_device *rdev, 5246 struct cfg80211_registered_device *rdev,
@@ -4811,6 +5251,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4811 const struct cfg80211_bss_ies *ies; 5251 const struct cfg80211_bss_ies *ies;
4812 void *hdr; 5252 void *hdr;
4813 struct nlattr *bss; 5253 struct nlattr *bss;
5254 bool tsf = false;
4814 5255
4815 ASSERT_WDEV_LOCK(wdev); 5256 ASSERT_WDEV_LOCK(wdev);
4816 5257
@@ -4834,22 +5275,24 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4834 5275
4835 rcu_read_lock(); 5276 rcu_read_lock();
4836 ies = rcu_dereference(res->ies); 5277 ies = rcu_dereference(res->ies);
4837 if (ies && ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, 5278 if (ies) {
4838 ies->len, ies->data)) { 5279 if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
4839 rcu_read_unlock(); 5280 goto fail_unlock_rcu;
4840 goto nla_put_failure; 5281 tsf = true;
5282 if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
5283 ies->len, ies->data))
5284 goto fail_unlock_rcu;
4841 } 5285 }
4842 ies = rcu_dereference(res->beacon_ies); 5286 ies = rcu_dereference(res->beacon_ies);
4843 if (ies && ies->len && nla_put(msg, NL80211_BSS_BEACON_IES, 5287 if (ies) {
4844 ies->len, ies->data)) { 5288 if (!tsf && nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
4845 rcu_read_unlock(); 5289 goto fail_unlock_rcu;
4846 goto nla_put_failure; 5290 if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
5291 ies->len, ies->data))
5292 goto fail_unlock_rcu;
4847 } 5293 }
4848 rcu_read_unlock(); 5294 rcu_read_unlock();
4849 5295
4850 if (res->tsf &&
4851 nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
4852 goto nla_put_failure;
4853 if (res->beacon_interval && 5296 if (res->beacon_interval &&
4854 nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval)) 5297 nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
4855 goto nla_put_failure; 5298 goto nla_put_failure;
@@ -4894,6 +5337,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4894 5337
4895 return genlmsg_end(msg, hdr); 5338 return genlmsg_end(msg, hdr);
4896 5339
5340 fail_unlock_rcu:
5341 rcu_read_unlock();
4897 nla_put_failure: 5342 nla_put_failure:
4898 genlmsg_cancel(msg, hdr); 5343 genlmsg_cancel(msg, hdr);
4899 return -EMSGSIZE; 5344 return -EMSGSIZE;
@@ -5867,6 +6312,15 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
5867 connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 6312 connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
5868 } 6313 }
5869 6314
6315 if (info->attrs[NL80211_ATTR_USE_MFP]) {
6316 connect.mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
6317 if (connect.mfp != NL80211_MFP_REQUIRED &&
6318 connect.mfp != NL80211_MFP_NO)
6319 return -EINVAL;
6320 } else {
6321 connect.mfp = NL80211_MFP_NO;
6322 }
6323
5870 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 6324 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
5871 connect.channel = 6325 connect.channel =
5872 ieee80211_get_channel(wiphy, 6326 ieee80211_get_channel(wiphy,
@@ -6652,6 +7106,21 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
6652 nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]))) 7106 nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
6653 return -EINVAL; 7107 return -EINVAL;
6654 7108
7109 if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
7110 setup.beacon_interval =
7111 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
7112 if (setup.beacon_interval < 10 ||
7113 setup.beacon_interval > 10000)
7114 return -EINVAL;
7115 }
7116
7117 if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) {
7118 setup.dtim_period =
7119 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
7120 if (setup.dtim_period < 1 || setup.dtim_period > 100)
7121 return -EINVAL;
7122 }
7123
6655 if (info->attrs[NL80211_ATTR_MESH_SETUP]) { 7124 if (info->attrs[NL80211_ATTR_MESH_SETUP]) {
6656 /* parse additional setup parameters if given */ 7125 /* parse additional setup parameters if given */
6657 err = nl80211_parse_mesh_setup(info, &setup); 7126 err = nl80211_parse_mesh_setup(info, &setup);
@@ -6680,16 +7149,100 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
6680} 7149}
6681 7150
6682#ifdef CONFIG_PM 7151#ifdef CONFIG_PM
7152static int nl80211_send_wowlan_patterns(struct sk_buff *msg,
7153 struct cfg80211_registered_device *rdev)
7154{
7155 struct nlattr *nl_pats, *nl_pat;
7156 int i, pat_len;
7157
7158 if (!rdev->wowlan->n_patterns)
7159 return 0;
7160
7161 nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN);
7162 if (!nl_pats)
7163 return -ENOBUFS;
7164
7165 for (i = 0; i < rdev->wowlan->n_patterns; i++) {
7166 nl_pat = nla_nest_start(msg, i + 1);
7167 if (!nl_pat)
7168 return -ENOBUFS;
7169 pat_len = rdev->wowlan->patterns[i].pattern_len;
7170 if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
7171 DIV_ROUND_UP(pat_len, 8),
7172 rdev->wowlan->patterns[i].mask) ||
7173 nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
7174 pat_len, rdev->wowlan->patterns[i].pattern) ||
7175 nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET,
7176 rdev->wowlan->patterns[i].pkt_offset))
7177 return -ENOBUFS;
7178 nla_nest_end(msg, nl_pat);
7179 }
7180 nla_nest_end(msg, nl_pats);
7181
7182 return 0;
7183}
7184
7185static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
7186 struct cfg80211_wowlan_tcp *tcp)
7187{
7188 struct nlattr *nl_tcp;
7189
7190 if (!tcp)
7191 return 0;
7192
7193 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
7194 if (!nl_tcp)
7195 return -ENOBUFS;
7196
7197 if (nla_put_be32(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) ||
7198 nla_put_be32(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) ||
7199 nla_put(msg, NL80211_WOWLAN_TCP_DST_MAC, ETH_ALEN, tcp->dst_mac) ||
7200 nla_put_u16(msg, NL80211_WOWLAN_TCP_SRC_PORT, tcp->src_port) ||
7201 nla_put_u16(msg, NL80211_WOWLAN_TCP_DST_PORT, tcp->dst_port) ||
7202 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
7203 tcp->payload_len, tcp->payload) ||
7204 nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
7205 tcp->data_interval) ||
7206 nla_put(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
7207 tcp->wake_len, tcp->wake_data) ||
7208 nla_put(msg, NL80211_WOWLAN_TCP_WAKE_MASK,
7209 DIV_ROUND_UP(tcp->wake_len, 8), tcp->wake_mask))
7210 return -ENOBUFS;
7211
7212 if (tcp->payload_seq.len &&
7213 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ,
7214 sizeof(tcp->payload_seq), &tcp->payload_seq))
7215 return -ENOBUFS;
7216
7217 if (tcp->payload_tok.len &&
7218 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
7219 sizeof(tcp->payload_tok) + tcp->tokens_size,
7220 &tcp->payload_tok))
7221 return -ENOBUFS;
7222
7223 return 0;
7224}
7225
6683static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) 7226static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
6684{ 7227{
6685 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 7228 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6686 struct sk_buff *msg; 7229 struct sk_buff *msg;
6687 void *hdr; 7230 void *hdr;
7231 u32 size = NLMSG_DEFAULT_SIZE;
6688 7232
6689 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) 7233 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns &&
7234 !rdev->wiphy.wowlan.tcp)
6690 return -EOPNOTSUPP; 7235 return -EOPNOTSUPP;
6691 7236
6692 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 7237 if (rdev->wowlan && rdev->wowlan->tcp) {
7238 /* adjust size to have room for all the data */
7239 size += rdev->wowlan->tcp->tokens_size +
7240 rdev->wowlan->tcp->payload_len +
7241 rdev->wowlan->tcp->wake_len +
7242 rdev->wowlan->tcp->wake_len / 8;
7243 }
7244
7245 msg = nlmsg_new(size, GFP_KERNEL);
6693 if (!msg) 7246 if (!msg)
6694 return -ENOMEM; 7247 return -ENOMEM;
6695 7248
@@ -6720,31 +7273,12 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
6720 (rdev->wowlan->rfkill_release && 7273 (rdev->wowlan->rfkill_release &&
6721 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) 7274 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
6722 goto nla_put_failure; 7275 goto nla_put_failure;
6723 if (rdev->wowlan->n_patterns) {
6724 struct nlattr *nl_pats, *nl_pat;
6725 int i, pat_len;
6726 7276
6727 nl_pats = nla_nest_start(msg, 7277 if (nl80211_send_wowlan_patterns(msg, rdev))
6728 NL80211_WOWLAN_TRIG_PKT_PATTERN); 7278 goto nla_put_failure;
6729 if (!nl_pats)
6730 goto nla_put_failure;
6731 7279
6732 for (i = 0; i < rdev->wowlan->n_patterns; i++) { 7280 if (nl80211_send_wowlan_tcp(msg, rdev->wowlan->tcp))
6733 nl_pat = nla_nest_start(msg, i + 1); 7281 goto nla_put_failure;
6734 if (!nl_pat)
6735 goto nla_put_failure;
6736 pat_len = rdev->wowlan->patterns[i].pattern_len;
6737 if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
6738 DIV_ROUND_UP(pat_len, 8),
6739 rdev->wowlan->patterns[i].mask) ||
6740 nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
6741 pat_len,
6742 rdev->wowlan->patterns[i].pattern))
6743 goto nla_put_failure;
6744 nla_nest_end(msg, nl_pat);
6745 }
6746 nla_nest_end(msg, nl_pats);
6747 }
6748 7282
6749 nla_nest_end(msg, nl_wowlan); 7283 nla_nest_end(msg, nl_wowlan);
6750 } 7284 }
@@ -6757,6 +7291,150 @@ nla_put_failure:
6757 return -ENOBUFS; 7291 return -ENOBUFS;
6758} 7292}
6759 7293
7294static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
7295 struct nlattr *attr,
7296 struct cfg80211_wowlan *trig)
7297{
7298 struct nlattr *tb[NUM_NL80211_WOWLAN_TCP];
7299 struct cfg80211_wowlan_tcp *cfg;
7300 struct nl80211_wowlan_tcp_data_token *tok = NULL;
7301 struct nl80211_wowlan_tcp_data_seq *seq = NULL;
7302 u32 size;
7303 u32 data_size, wake_size, tokens_size = 0, wake_mask_size;
7304 int err, port;
7305
7306 if (!rdev->wiphy.wowlan.tcp)
7307 return -EINVAL;
7308
7309 err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP,
7310 nla_data(attr), nla_len(attr),
7311 nl80211_wowlan_tcp_policy);
7312 if (err)
7313 return err;
7314
7315 if (!tb[NL80211_WOWLAN_TCP_SRC_IPV4] ||
7316 !tb[NL80211_WOWLAN_TCP_DST_IPV4] ||
7317 !tb[NL80211_WOWLAN_TCP_DST_MAC] ||
7318 !tb[NL80211_WOWLAN_TCP_DST_PORT] ||
7319 !tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD] ||
7320 !tb[NL80211_WOWLAN_TCP_DATA_INTERVAL] ||
7321 !tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] ||
7322 !tb[NL80211_WOWLAN_TCP_WAKE_MASK])
7323 return -EINVAL;
7324
7325 data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]);
7326 if (data_size > rdev->wiphy.wowlan.tcp->data_payload_max)
7327 return -EINVAL;
7328
7329 if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) >
7330 rdev->wiphy.wowlan.tcp->data_interval_max)
7331 return -EINVAL;
7332
7333 wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]);
7334 if (wake_size > rdev->wiphy.wowlan.tcp->wake_payload_max)
7335 return -EINVAL;
7336
7337 wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]);
7338 if (wake_mask_size != DIV_ROUND_UP(wake_size, 8))
7339 return -EINVAL;
7340
7341 if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]) {
7342 u32 tokln = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]);
7343
7344 tok = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]);
7345 tokens_size = tokln - sizeof(*tok);
7346
7347 if (!tok->len || tokens_size % tok->len)
7348 return -EINVAL;
7349 if (!rdev->wiphy.wowlan.tcp->tok)
7350 return -EINVAL;
7351 if (tok->len > rdev->wiphy.wowlan.tcp->tok->max_len)
7352 return -EINVAL;
7353 if (tok->len < rdev->wiphy.wowlan.tcp->tok->min_len)
7354 return -EINVAL;
7355 if (tokens_size > rdev->wiphy.wowlan.tcp->tok->bufsize)
7356 return -EINVAL;
7357 if (tok->offset + tok->len > data_size)
7358 return -EINVAL;
7359 }
7360
7361 if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) {
7362 seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]);
7363 if (!rdev->wiphy.wowlan.tcp->seq)
7364 return -EINVAL;
7365 if (seq->len == 0 || seq->len > 4)
7366 return -EINVAL;
7367 if (seq->len + seq->offset > data_size)
7368 return -EINVAL;
7369 }
7370
7371 size = sizeof(*cfg);
7372 size += data_size;
7373 size += wake_size + wake_mask_size;
7374 size += tokens_size;
7375
7376 cfg = kzalloc(size, GFP_KERNEL);
7377 if (!cfg)
7378 return -ENOMEM;
7379 cfg->src = nla_get_be32(tb[NL80211_WOWLAN_TCP_SRC_IPV4]);
7380 cfg->dst = nla_get_be32(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
7381 memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]),
7382 ETH_ALEN);
7383 if (tb[NL80211_WOWLAN_TCP_SRC_PORT])
7384 port = nla_get_u16(tb[NL80211_WOWLAN_TCP_SRC_PORT]);
7385 else
7386 port = 0;
7387#ifdef CONFIG_INET
7388 /* allocate a socket and port for it and use it */
7389 err = __sock_create(wiphy_net(&rdev->wiphy), PF_INET, SOCK_STREAM,
7390 IPPROTO_TCP, &cfg->sock, 1);
7391 if (err) {
7392 kfree(cfg);
7393 return err;
7394 }
7395 if (inet_csk_get_port(cfg->sock->sk, port)) {
7396 sock_release(cfg->sock);
7397 kfree(cfg);
7398 return -EADDRINUSE;
7399 }
7400 cfg->src_port = inet_sk(cfg->sock->sk)->inet_num;
7401#else
7402 if (!port) {
7403 kfree(cfg);
7404 return -EINVAL;
7405 }
7406 cfg->src_port = port;
7407#endif
7408
7409 cfg->dst_port = nla_get_u16(tb[NL80211_WOWLAN_TCP_DST_PORT]);
7410 cfg->payload_len = data_size;
7411 cfg->payload = (u8 *)cfg + sizeof(*cfg) + tokens_size;
7412 memcpy((void *)cfg->payload,
7413 nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]),
7414 data_size);
7415 if (seq)
7416 cfg->payload_seq = *seq;
7417 cfg->data_interval = nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]);
7418 cfg->wake_len = wake_size;
7419 cfg->wake_data = (u8 *)cfg + sizeof(*cfg) + tokens_size + data_size;
7420 memcpy((void *)cfg->wake_data,
7421 nla_data(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]),
7422 wake_size);
7423 cfg->wake_mask = (u8 *)cfg + sizeof(*cfg) + tokens_size +
7424 data_size + wake_size;
7425 memcpy((void *)cfg->wake_mask,
7426 nla_data(tb[NL80211_WOWLAN_TCP_WAKE_MASK]),
7427 wake_mask_size);
7428 if (tok) {
7429 cfg->tokens_size = tokens_size;
7430 memcpy(&cfg->payload_tok, tok, sizeof(*tok) + tokens_size);
7431 }
7432
7433 trig->tcp = cfg;
7434
7435 return 0;
7436}
7437
6760static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) 7438static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6761{ 7439{
6762 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 7440 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6767,7 +7445,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6767 int err, i; 7445 int err, i;
6768 bool prev_enabled = rdev->wowlan; 7446 bool prev_enabled = rdev->wowlan;
6769 7447
6770 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) 7448 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns &&
7449 !rdev->wiphy.wowlan.tcp)
6771 return -EOPNOTSUPP; 7450 return -EOPNOTSUPP;
6772 7451
6773 if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { 7452 if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) {
@@ -6831,7 +7510,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6831 if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) { 7510 if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) {
6832 struct nlattr *pat; 7511 struct nlattr *pat;
6833 int n_patterns = 0; 7512 int n_patterns = 0;
6834 int rem, pat_len, mask_len; 7513 int rem, pat_len, mask_len, pkt_offset;
6835 struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT]; 7514 struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
6836 7515
6837 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], 7516 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
@@ -6866,6 +7545,15 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6866 pat_len < wowlan->pattern_min_len) 7545 pat_len < wowlan->pattern_min_len)
6867 goto error; 7546 goto error;
6868 7547
7548 if (!pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET])
7549 pkt_offset = 0;
7550 else
7551 pkt_offset = nla_get_u32(
7552 pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]);
7553 if (pkt_offset > wowlan->max_pkt_offset)
7554 goto error;
7555 new_triggers.patterns[i].pkt_offset = pkt_offset;
7556
6869 new_triggers.patterns[i].mask = 7557 new_triggers.patterns[i].mask =
6870 kmalloc(mask_len + pat_len, GFP_KERNEL); 7558 kmalloc(mask_len + pat_len, GFP_KERNEL);
6871 if (!new_triggers.patterns[i].mask) { 7559 if (!new_triggers.patterns[i].mask) {
@@ -6885,6 +7573,14 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6885 } 7573 }
6886 } 7574 }
6887 7575
7576 if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) {
7577 err = nl80211_parse_wowlan_tcp(
7578 rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION],
7579 &new_triggers);
7580 if (err)
7581 goto error;
7582 }
7583
6888 ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL); 7584 ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
6889 if (!ntrig) { 7585 if (!ntrig) {
6890 err = -ENOMEM; 7586 err = -ENOMEM;
@@ -6902,6 +7598,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6902 for (i = 0; i < new_triggers.n_patterns; i++) 7598 for (i = 0; i < new_triggers.n_patterns; i++)
6903 kfree(new_triggers.patterns[i].mask); 7599 kfree(new_triggers.patterns[i].mask);
6904 kfree(new_triggers.patterns); 7600 kfree(new_triggers.patterns);
7601 if (new_triggers.tcp && new_triggers.tcp->sock)
7602 sock_release(new_triggers.tcp->sock);
7603 kfree(new_triggers.tcp);
6905 return err; 7604 return err;
6906} 7605}
6907#endif 7606#endif
@@ -7784,6 +8483,22 @@ static struct genl_ops nl80211_ops[] = {
7784 .internal_flags = NL80211_FLAG_NEED_NETDEV | 8483 .internal_flags = NL80211_FLAG_NEED_NETDEV |
7785 NL80211_FLAG_NEED_RTNL, 8484 NL80211_FLAG_NEED_RTNL,
7786 }, 8485 },
8486 {
8487 .cmd = NL80211_CMD_SET_MAC_ACL,
8488 .doit = nl80211_set_mac_acl,
8489 .policy = nl80211_policy,
8490 .flags = GENL_ADMIN_PERM,
8491 .internal_flags = NL80211_FLAG_NEED_NETDEV |
8492 NL80211_FLAG_NEED_RTNL,
8493 },
8494 {
8495 .cmd = NL80211_CMD_RADAR_DETECT,
8496 .doit = nl80211_start_radar_detection,
8497 .policy = nl80211_policy,
8498 .flags = GENL_ADMIN_PERM,
8499 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
8500 NL80211_FLAG_NEED_RTNL,
8501 },
7787}; 8502};
7788 8503
7789static struct genl_multicast_group nl80211_mlme_mcgrp = { 8504static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -8051,7 +8766,7 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
8051 goto nla_put_failure; 8766 goto nla_put_failure;
8052 } 8767 }
8053 8768
8054 if (wiphy_idx_valid(request->wiphy_idx) && 8769 if (request->wiphy_idx != WIPHY_IDX_INVALID &&
8055 nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx)) 8770 nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
8056 goto nla_put_failure; 8771 goto nla_put_failure;
8057 8772
@@ -8981,6 +9696,57 @@ nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
8981} 9696}
8982 9697
8983void 9698void
9699nl80211_radar_notify(struct cfg80211_registered_device *rdev,
9700 struct cfg80211_chan_def *chandef,
9701 enum nl80211_radar_event event,
9702 struct net_device *netdev, gfp_t gfp)
9703{
9704 struct sk_buff *msg;
9705 void *hdr;
9706
9707 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9708 if (!msg)
9709 return;
9710
9711 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_RADAR_DETECT);
9712 if (!hdr) {
9713 nlmsg_free(msg);
9714 return;
9715 }
9716
9717 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
9718 goto nla_put_failure;
9719
9720 /* NOP and radar events don't need a netdev parameter */
9721 if (netdev) {
9722 struct wireless_dev *wdev = netdev->ieee80211_ptr;
9723
9724 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
9725 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
9726 goto nla_put_failure;
9727 }
9728
9729 if (nla_put_u32(msg, NL80211_ATTR_RADAR_EVENT, event))
9730 goto nla_put_failure;
9731
9732 if (nl80211_send_chandef(msg, chandef))
9733 goto nla_put_failure;
9734
9735 if (genlmsg_end(msg, hdr) < 0) {
9736 nlmsg_free(msg);
9737 return;
9738 }
9739
9740 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
9741 nl80211_mlme_mcgrp.id, gfp);
9742 return;
9743
9744 nla_put_failure:
9745 genlmsg_cancel(msg, hdr);
9746 nlmsg_free(msg);
9747}
9748
9749void
8984nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 9750nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
8985 struct net_device *netdev, const u8 *peer, 9751 struct net_device *netdev, const u8 *peer,
8986 u32 num_packets, gfp_t gfp) 9752 u32 num_packets, gfp_t gfp)
@@ -9115,6 +9881,114 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
9115} 9881}
9116EXPORT_SYMBOL(cfg80211_report_obss_beacon); 9882EXPORT_SYMBOL(cfg80211_report_obss_beacon);
9117 9883
9884#ifdef CONFIG_PM
9885void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
9886 struct cfg80211_wowlan_wakeup *wakeup,
9887 gfp_t gfp)
9888{
9889 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
9890 struct sk_buff *msg;
9891 void *hdr;
9892 int err, size = 200;
9893
9894 trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup);
9895
9896 if (wakeup)
9897 size += wakeup->packet_present_len;
9898
9899 msg = nlmsg_new(size, gfp);
9900 if (!msg)
9901 return;
9902
9903 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_WOWLAN);
9904 if (!hdr)
9905 goto free_msg;
9906
9907 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9908 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
9909 goto free_msg;
9910
9911 if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
9912 wdev->netdev->ifindex))
9913 goto free_msg;
9914
9915 if (wakeup) {
9916 struct nlattr *reasons;
9917
9918 reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
9919
9920 if (wakeup->disconnect &&
9921 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT))
9922 goto free_msg;
9923 if (wakeup->magic_pkt &&
9924 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT))
9925 goto free_msg;
9926 if (wakeup->gtk_rekey_failure &&
9927 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE))
9928 goto free_msg;
9929 if (wakeup->eap_identity_req &&
9930 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST))
9931 goto free_msg;
9932 if (wakeup->four_way_handshake &&
9933 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE))
9934 goto free_msg;
9935 if (wakeup->rfkill_release &&
9936 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))
9937 goto free_msg;
9938
9939 if (wakeup->pattern_idx >= 0 &&
9940 nla_put_u32(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
9941 wakeup->pattern_idx))
9942 goto free_msg;
9943
9944 if (wakeup->tcp_match)
9945 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH);
9946
9947 if (wakeup->tcp_connlost)
9948 nla_put_flag(msg,
9949 NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST);
9950
9951 if (wakeup->tcp_nomoretokens)
9952 nla_put_flag(msg,
9953 NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS);
9954
9955 if (wakeup->packet) {
9956 u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
9957 u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN;
9958
9959 if (!wakeup->packet_80211) {
9960 pkt_attr =
9961 NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023;
9962 len_attr =
9963 NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN;
9964 }
9965
9966 if (wakeup->packet_len &&
9967 nla_put_u32(msg, len_attr, wakeup->packet_len))
9968 goto free_msg;
9969
9970 if (nla_put(msg, pkt_attr, wakeup->packet_present_len,
9971 wakeup->packet))
9972 goto free_msg;
9973 }
9974
9975 nla_nest_end(msg, reasons);
9976 }
9977
9978 err = genlmsg_end(msg, hdr);
9979 if (err < 0)
9980 goto free_msg;
9981
9982 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
9983 nl80211_mlme_mcgrp.id, gfp);
9984 return;
9985
9986 free_msg:
9987 nlmsg_free(msg);
9988}
9989EXPORT_SYMBOL(cfg80211_report_wowlan_wakeup);
9990#endif
9991
9118void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, 9992void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
9119 enum nl80211_tdls_operation oper, 9993 enum nl80211_tdls_operation oper,
9120 u16 reason_code, gfp_t gfp) 9994 u16 reason_code, gfp_t gfp)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 2acba8477e9d..b061da4919e1 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -108,6 +108,13 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
108 struct net_device *netdev, 108 struct net_device *netdev,
109 enum nl80211_cqm_rssi_threshold_event rssi_event, 109 enum nl80211_cqm_rssi_threshold_event rssi_event,
110 gfp_t gfp); 110 gfp_t gfp);
111
112void
113nl80211_radar_notify(struct cfg80211_registered_device *rdev,
114 struct cfg80211_chan_def *chandef,
115 enum nl80211_radar_event event,
116 struct net_device *netdev, gfp_t gfp);
117
111void 118void
112nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 119nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
113 struct net_device *netdev, const u8 *peer, 120 struct net_device *netdev, const u8 *peer,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 6c0c8191f837..422d38291d66 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -875,4 +875,16 @@ static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev,
875 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); 875 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
876 trace_rdev_return_void(&rdev->wiphy); 876 trace_rdev_return_void(&rdev->wiphy);
877} 877}
878
879static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev,
880 struct net_device *dev,
881 struct cfg80211_acl_data *params)
882{
883 int ret;
884
885 trace_rdev_set_mac_acl(&rdev->wiphy, dev, params);
886 ret = rdev->ops->set_mac_acl(&rdev->wiphy, dev, params);
887 trace_rdev_return_int(&rdev->wiphy, ret);
888 return ret;
889}
878#endif /* __CFG80211_RDEV_OPS */ 890#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6e5308998e30..98532c00242d 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -48,7 +48,6 @@
48#include <linux/export.h> 48#include <linux/export.h>
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/list.h> 50#include <linux/list.h>
51#include <linux/random.h>
52#include <linux/ctype.h> 51#include <linux/ctype.h>
53#include <linux/nl80211.h> 52#include <linux/nl80211.h>
54#include <linux/platform_device.h> 53#include <linux/platform_device.h>
@@ -66,6 +65,13 @@
66#define REG_DBG_PRINT(args...) 65#define REG_DBG_PRINT(args...)
67#endif 66#endif
68 67
68enum reg_request_treatment {
69 REG_REQ_OK,
70 REG_REQ_IGNORE,
71 REG_REQ_INTERSECT,
72 REG_REQ_ALREADY_SET,
73};
74
69static struct regulatory_request core_request_world = { 75static struct regulatory_request core_request_world = {
70 .initiator = NL80211_REGDOM_SET_BY_CORE, 76 .initiator = NL80211_REGDOM_SET_BY_CORE,
71 .alpha2[0] = '0', 77 .alpha2[0] = '0',
@@ -76,7 +82,8 @@ static struct regulatory_request core_request_world = {
76}; 82};
77 83
78/* Receipt of information from last regulatory request */ 84/* Receipt of information from last regulatory request */
79static struct regulatory_request *last_request = &core_request_world; 85static struct regulatory_request __rcu *last_request =
86 (void __rcu *)&core_request_world;
80 87
81/* To trigger userspace events */ 88/* To trigger userspace events */
82static struct platform_device *reg_pdev; 89static struct platform_device *reg_pdev;
@@ -88,16 +95,16 @@ static struct device_type reg_device_type = {
88/* 95/*
89 * Central wireless core regulatory domains, we only need two, 96 * Central wireless core regulatory domains, we only need two,
90 * the current one and a world regulatory domain in case we have no 97 * the current one and a world regulatory domain in case we have no
91 * information to give us an alpha2 98 * information to give us an alpha2.
92 */ 99 */
93const struct ieee80211_regdomain *cfg80211_regdomain; 100const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
94 101
95/* 102/*
96 * Protects static reg.c components: 103 * Protects static reg.c components:
97 * - cfg80211_world_regdom 104 * - cfg80211_regdomain (if not used with RCU)
98 * - cfg80211_regdom 105 * - cfg80211_world_regdom
99 * - last_request 106 * - last_request (if not used with RCU)
100 * - reg_num_devs_support_basehint 107 * - reg_num_devs_support_basehint
101 */ 108 */
102static DEFINE_MUTEX(reg_mutex); 109static DEFINE_MUTEX(reg_mutex);
103 110
@@ -112,6 +119,31 @@ static inline void assert_reg_lock(void)
112 lockdep_assert_held(&reg_mutex); 119 lockdep_assert_held(&reg_mutex);
113} 120}
114 121
122static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
123{
124 return rcu_dereference_protected(cfg80211_regdomain,
125 lockdep_is_held(&reg_mutex));
126}
127
128static const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
129{
130 return rcu_dereference_protected(wiphy->regd,
131 lockdep_is_held(&reg_mutex));
132}
133
134static void rcu_free_regdom(const struct ieee80211_regdomain *r)
135{
136 if (!r)
137 return;
138 kfree_rcu((struct ieee80211_regdomain *)r, rcu_head);
139}
140
141static struct regulatory_request *get_last_request(void)
142{
143 return rcu_dereference_check(last_request,
144 lockdep_is_held(&reg_mutex));
145}
146
115/* Used to queue up regulatory hints */ 147/* Used to queue up regulatory hints */
116static LIST_HEAD(reg_requests_list); 148static LIST_HEAD(reg_requests_list);
117static spinlock_t reg_requests_lock; 149static spinlock_t reg_requests_lock;
@@ -177,28 +209,37 @@ static char user_alpha2[2];
177module_param(ieee80211_regdom, charp, 0444); 209module_param(ieee80211_regdom, charp, 0444);
178MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 210MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
179 211
180static void reset_regdomains(bool full_reset) 212static void reset_regdomains(bool full_reset,
213 const struct ieee80211_regdomain *new_regdom)
181{ 214{
215 const struct ieee80211_regdomain *r;
216 struct regulatory_request *lr;
217
218 assert_reg_lock();
219
220 r = get_cfg80211_regdom();
221
182 /* avoid freeing static information or freeing something twice */ 222 /* avoid freeing static information or freeing something twice */
183 if (cfg80211_regdomain == cfg80211_world_regdom) 223 if (r == cfg80211_world_regdom)
184 cfg80211_regdomain = NULL; 224 r = NULL;
185 if (cfg80211_world_regdom == &world_regdom) 225 if (cfg80211_world_regdom == &world_regdom)
186 cfg80211_world_regdom = NULL; 226 cfg80211_world_regdom = NULL;
187 if (cfg80211_regdomain == &world_regdom) 227 if (r == &world_regdom)
188 cfg80211_regdomain = NULL; 228 r = NULL;
189 229
190 kfree(cfg80211_regdomain); 230 rcu_free_regdom(r);
191 kfree(cfg80211_world_regdom); 231 rcu_free_regdom(cfg80211_world_regdom);
192 232
193 cfg80211_world_regdom = &world_regdom; 233 cfg80211_world_regdom = &world_regdom;
194 cfg80211_regdomain = NULL; 234 rcu_assign_pointer(cfg80211_regdomain, new_regdom);
195 235
196 if (!full_reset) 236 if (!full_reset)
197 return; 237 return;
198 238
199 if (last_request != &core_request_world) 239 lr = get_last_request();
200 kfree(last_request); 240 if (lr != &core_request_world && lr)
201 last_request = &core_request_world; 241 kfree_rcu(lr, rcu_head);
242 rcu_assign_pointer(last_request, &core_request_world);
202} 243}
203 244
204/* 245/*
@@ -207,30 +248,29 @@ static void reset_regdomains(bool full_reset)
207 */ 248 */
208static void update_world_regdomain(const struct ieee80211_regdomain *rd) 249static void update_world_regdomain(const struct ieee80211_regdomain *rd)
209{ 250{
210 BUG_ON(!last_request); 251 struct regulatory_request *lr;
211 252
212 reset_regdomains(false); 253 lr = get_last_request();
254
255 WARN_ON(!lr);
256
257 reset_regdomains(false, rd);
213 258
214 cfg80211_world_regdom = rd; 259 cfg80211_world_regdom = rd;
215 cfg80211_regdomain = rd;
216} 260}
217 261
218bool is_world_regdom(const char *alpha2) 262bool is_world_regdom(const char *alpha2)
219{ 263{
220 if (!alpha2) 264 if (!alpha2)
221 return false; 265 return false;
222 if (alpha2[0] == '0' && alpha2[1] == '0') 266 return alpha2[0] == '0' && alpha2[1] == '0';
223 return true;
224 return false;
225} 267}
226 268
227static bool is_alpha2_set(const char *alpha2) 269static bool is_alpha2_set(const char *alpha2)
228{ 270{
229 if (!alpha2) 271 if (!alpha2)
230 return false; 272 return false;
231 if (alpha2[0] != 0 && alpha2[1] != 0) 273 return alpha2[0] && alpha2[1];
232 return true;
233 return false;
234} 274}
235 275
236static bool is_unknown_alpha2(const char *alpha2) 276static bool is_unknown_alpha2(const char *alpha2)
@@ -241,9 +281,7 @@ static bool is_unknown_alpha2(const char *alpha2)
241 * Special case where regulatory domain was built by driver 281 * Special case where regulatory domain was built by driver
242 * but a specific alpha2 cannot be determined 282 * but a specific alpha2 cannot be determined
243 */ 283 */
244 if (alpha2[0] == '9' && alpha2[1] == '9') 284 return alpha2[0] == '9' && alpha2[1] == '9';
245 return true;
246 return false;
247} 285}
248 286
249static bool is_intersected_alpha2(const char *alpha2) 287static bool is_intersected_alpha2(const char *alpha2)
@@ -255,39 +293,30 @@ static bool is_intersected_alpha2(const char *alpha2)
255 * result of an intersection between two regulatory domain 293 * result of an intersection between two regulatory domain
256 * structures 294 * structures
257 */ 295 */
258 if (alpha2[0] == '9' && alpha2[1] == '8') 296 return alpha2[0] == '9' && alpha2[1] == '8';
259 return true;
260 return false;
261} 297}
262 298
263static bool is_an_alpha2(const char *alpha2) 299static bool is_an_alpha2(const char *alpha2)
264{ 300{
265 if (!alpha2) 301 if (!alpha2)
266 return false; 302 return false;
267 if (isalpha(alpha2[0]) && isalpha(alpha2[1])) 303 return isalpha(alpha2[0]) && isalpha(alpha2[1]);
268 return true;
269 return false;
270} 304}
271 305
272static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) 306static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
273{ 307{
274 if (!alpha2_x || !alpha2_y) 308 if (!alpha2_x || !alpha2_y)
275 return false; 309 return false;
276 if (alpha2_x[0] == alpha2_y[0] && 310 return alpha2_x[0] == alpha2_y[0] && alpha2_x[1] == alpha2_y[1];
277 alpha2_x[1] == alpha2_y[1])
278 return true;
279 return false;
280} 311}
281 312
282static bool regdom_changes(const char *alpha2) 313static bool regdom_changes(const char *alpha2)
283{ 314{
284 assert_cfg80211_lock(); 315 const struct ieee80211_regdomain *r = get_cfg80211_regdom();
285 316
286 if (!cfg80211_regdomain) 317 if (!r)
287 return true; 318 return true;
288 if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2)) 319 return !alpha2_equal(r->alpha2, alpha2);
289 return false;
290 return true;
291} 320}
292 321
293/* 322/*
@@ -301,38 +330,36 @@ static bool is_user_regdom_saved(void)
301 return false; 330 return false;
302 331
303 /* This would indicate a mistake on the design */ 332 /* This would indicate a mistake on the design */
304 if (WARN((!is_world_regdom(user_alpha2) && 333 if (WARN(!is_world_regdom(user_alpha2) && !is_an_alpha2(user_alpha2),
305 !is_an_alpha2(user_alpha2)),
306 "Unexpected user alpha2: %c%c\n", 334 "Unexpected user alpha2: %c%c\n",
307 user_alpha2[0], 335 user_alpha2[0], user_alpha2[1]))
308 user_alpha2[1]))
309 return false; 336 return false;
310 337
311 return true; 338 return true;
312} 339}
313 340
314static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd, 341static const struct ieee80211_regdomain *
315 const struct ieee80211_regdomain *src_regd) 342reg_copy_regd(const struct ieee80211_regdomain *src_regd)
316{ 343{
317 struct ieee80211_regdomain *regd; 344 struct ieee80211_regdomain *regd;
318 int size_of_regd = 0; 345 int size_of_regd;
319 unsigned int i; 346 unsigned int i;
320 347
321 size_of_regd = sizeof(struct ieee80211_regdomain) + 348 size_of_regd =
322 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule)); 349 sizeof(struct ieee80211_regdomain) +
350 src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
323 351
324 regd = kzalloc(size_of_regd, GFP_KERNEL); 352 regd = kzalloc(size_of_regd, GFP_KERNEL);
325 if (!regd) 353 if (!regd)
326 return -ENOMEM; 354 return ERR_PTR(-ENOMEM);
327 355
328 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); 356 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
329 357
330 for (i = 0; i < src_regd->n_reg_rules; i++) 358 for (i = 0; i < src_regd->n_reg_rules; i++)
331 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i], 359 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
332 sizeof(struct ieee80211_reg_rule)); 360 sizeof(struct ieee80211_reg_rule));
333 361
334 *dst_regd = regd; 362 return regd;
335 return 0;
336} 363}
337 364
338#ifdef CONFIG_CFG80211_INTERNAL_REGDB 365#ifdef CONFIG_CFG80211_INTERNAL_REGDB
@@ -347,9 +374,8 @@ static DEFINE_MUTEX(reg_regdb_search_mutex);
347static void reg_regdb_search(struct work_struct *work) 374static void reg_regdb_search(struct work_struct *work)
348{ 375{
349 struct reg_regdb_search_request *request; 376 struct reg_regdb_search_request *request;
350 const struct ieee80211_regdomain *curdom, *regdom; 377 const struct ieee80211_regdomain *curdom, *regdom = NULL;
351 int i, r; 378 int i;
352 bool set_reg = false;
353 379
354 mutex_lock(&cfg80211_mutex); 380 mutex_lock(&cfg80211_mutex);
355 381
@@ -360,14 +386,11 @@ static void reg_regdb_search(struct work_struct *work)
360 list); 386 list);
361 list_del(&request->list); 387 list_del(&request->list);
362 388
363 for (i=0; i<reg_regdb_size; i++) { 389 for (i = 0; i < reg_regdb_size; i++) {
364 curdom = reg_regdb[i]; 390 curdom = reg_regdb[i];
365 391
366 if (!memcmp(request->alpha2, curdom->alpha2, 2)) { 392 if (alpha2_equal(request->alpha2, curdom->alpha2)) {
367 r = reg_copy_regd(&regdom, curdom); 393 regdom = reg_copy_regd(curdom);
368 if (r)
369 break;
370 set_reg = true;
371 break; 394 break;
372 } 395 }
373 } 396 }
@@ -376,7 +399,7 @@ static void reg_regdb_search(struct work_struct *work)
376 } 399 }
377 mutex_unlock(&reg_regdb_search_mutex); 400 mutex_unlock(&reg_regdb_search_mutex);
378 401
379 if (set_reg) 402 if (!IS_ERR_OR_NULL(regdom))
380 set_regdom(regdom); 403 set_regdom(regdom);
381 404
382 mutex_unlock(&cfg80211_mutex); 405 mutex_unlock(&cfg80211_mutex);
@@ -434,15 +457,14 @@ static int call_crda(const char *alpha2)
434 return kobject_uevent(&reg_pdev->dev.kobj, KOBJ_CHANGE); 457 return kobject_uevent(&reg_pdev->dev.kobj, KOBJ_CHANGE);
435} 458}
436 459
437/* Used by nl80211 before kmalloc'ing our regulatory domain */ 460static bool reg_is_valid_request(const char *alpha2)
438bool reg_is_valid_request(const char *alpha2)
439{ 461{
440 assert_cfg80211_lock(); 462 struct regulatory_request *lr = get_last_request();
441 463
442 if (!last_request) 464 if (!lr || lr->processed)
443 return false; 465 return false;
444 466
445 return alpha2_equal(last_request->alpha2, alpha2); 467 return alpha2_equal(lr->alpha2, alpha2);
446} 468}
447 469
448/* Sanity check on a regulatory rule */ 470/* Sanity check on a regulatory rule */
@@ -460,7 +482,7 @@ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
460 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; 482 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
461 483
462 if (freq_range->end_freq_khz <= freq_range->start_freq_khz || 484 if (freq_range->end_freq_khz <= freq_range->start_freq_khz ||
463 freq_range->max_bandwidth_khz > freq_diff) 485 freq_range->max_bandwidth_khz > freq_diff)
464 return false; 486 return false;
465 487
466 return true; 488 return true;
@@ -487,8 +509,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
487} 509}
488 510
489static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range, 511static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
490 u32 center_freq_khz, 512 u32 center_freq_khz, u32 bw_khz)
491 u32 bw_khz)
492{ 513{
493 u32 start_freq_khz, end_freq_khz; 514 u32 start_freq_khz, end_freq_khz;
494 515
@@ -518,7 +539,7 @@ static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
518 * regulatory rule support for other "bands". 539 * regulatory rule support for other "bands".
519 **/ 540 **/
520static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, 541static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
521 u32 freq_khz) 542 u32 freq_khz)
522{ 543{
523#define ONE_GHZ_IN_KHZ 1000000 544#define ONE_GHZ_IN_KHZ 1000000
524 /* 545 /*
@@ -540,10 +561,9 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
540 * Helper for regdom_intersect(), this does the real 561 * Helper for regdom_intersect(), this does the real
541 * mathematical intersection fun 562 * mathematical intersection fun
542 */ 563 */
543static int reg_rules_intersect( 564static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
544 const struct ieee80211_reg_rule *rule1, 565 const struct ieee80211_reg_rule *rule2,
545 const struct ieee80211_reg_rule *rule2, 566 struct ieee80211_reg_rule *intersected_rule)
546 struct ieee80211_reg_rule *intersected_rule)
547{ 567{
548 const struct ieee80211_freq_range *freq_range1, *freq_range2; 568 const struct ieee80211_freq_range *freq_range1, *freq_range2;
549 struct ieee80211_freq_range *freq_range; 569 struct ieee80211_freq_range *freq_range;
@@ -560,11 +580,11 @@ static int reg_rules_intersect(
560 power_rule = &intersected_rule->power_rule; 580 power_rule = &intersected_rule->power_rule;
561 581
562 freq_range->start_freq_khz = max(freq_range1->start_freq_khz, 582 freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
563 freq_range2->start_freq_khz); 583 freq_range2->start_freq_khz);
564 freq_range->end_freq_khz = min(freq_range1->end_freq_khz, 584 freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
565 freq_range2->end_freq_khz); 585 freq_range2->end_freq_khz);
566 freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz, 586 freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz,
567 freq_range2->max_bandwidth_khz); 587 freq_range2->max_bandwidth_khz);
568 588
569 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; 589 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
570 if (freq_range->max_bandwidth_khz > freq_diff) 590 if (freq_range->max_bandwidth_khz > freq_diff)
@@ -575,7 +595,7 @@ static int reg_rules_intersect(
575 power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain, 595 power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain,
576 power_rule2->max_antenna_gain); 596 power_rule2->max_antenna_gain);
577 597
578 intersected_rule->flags = (rule1->flags | rule2->flags); 598 intersected_rule->flags = rule1->flags | rule2->flags;
579 599
580 if (!is_valid_reg_rule(intersected_rule)) 600 if (!is_valid_reg_rule(intersected_rule))
581 return -EINVAL; 601 return -EINVAL;
@@ -596,9 +616,9 @@ static int reg_rules_intersect(
596 * resulting intersection of rules between rd1 and rd2. We will 616 * resulting intersection of rules between rd1 and rd2. We will
597 * kzalloc() this structure for you. 617 * kzalloc() this structure for you.
598 */ 618 */
599static struct ieee80211_regdomain *regdom_intersect( 619static struct ieee80211_regdomain *
600 const struct ieee80211_regdomain *rd1, 620regdom_intersect(const struct ieee80211_regdomain *rd1,
601 const struct ieee80211_regdomain *rd2) 621 const struct ieee80211_regdomain *rd2)
602{ 622{
603 int r, size_of_regd; 623 int r, size_of_regd;
604 unsigned int x, y; 624 unsigned int x, y;
@@ -607,12 +627,7 @@ static struct ieee80211_regdomain *regdom_intersect(
607 struct ieee80211_reg_rule *intersected_rule; 627 struct ieee80211_reg_rule *intersected_rule;
608 struct ieee80211_regdomain *rd; 628 struct ieee80211_regdomain *rd;
609 /* This is just a dummy holder to help us count */ 629 /* This is just a dummy holder to help us count */
610 struct ieee80211_reg_rule irule; 630 struct ieee80211_reg_rule dummy_rule;
611
612 /* Uses the stack temporarily for counter arithmetic */
613 intersected_rule = &irule;
614
615 memset(intersected_rule, 0, sizeof(struct ieee80211_reg_rule));
616 631
617 if (!rd1 || !rd2) 632 if (!rd1 || !rd2)
618 return NULL; 633 return NULL;
@@ -629,11 +644,8 @@ static struct ieee80211_regdomain *regdom_intersect(
629 rule1 = &rd1->reg_rules[x]; 644 rule1 = &rd1->reg_rules[x];
630 for (y = 0; y < rd2->n_reg_rules; y++) { 645 for (y = 0; y < rd2->n_reg_rules; y++) {
631 rule2 = &rd2->reg_rules[y]; 646 rule2 = &rd2->reg_rules[y];
632 if (!reg_rules_intersect(rule1, rule2, 647 if (!reg_rules_intersect(rule1, rule2, &dummy_rule))
633 intersected_rule))
634 num_rules++; 648 num_rules++;
635 memset(intersected_rule, 0,
636 sizeof(struct ieee80211_reg_rule));
637 } 649 }
638 } 650 }
639 651
@@ -641,15 +653,15 @@ static struct ieee80211_regdomain *regdom_intersect(
641 return NULL; 653 return NULL;
642 654
643 size_of_regd = sizeof(struct ieee80211_regdomain) + 655 size_of_regd = sizeof(struct ieee80211_regdomain) +
644 ((num_rules + 1) * sizeof(struct ieee80211_reg_rule)); 656 num_rules * sizeof(struct ieee80211_reg_rule);
645 657
646 rd = kzalloc(size_of_regd, GFP_KERNEL); 658 rd = kzalloc(size_of_regd, GFP_KERNEL);
647 if (!rd) 659 if (!rd)
648 return NULL; 660 return NULL;
649 661
650 for (x = 0; x < rd1->n_reg_rules; x++) { 662 for (x = 0; x < rd1->n_reg_rules && rule_idx < num_rules; x++) {
651 rule1 = &rd1->reg_rules[x]; 663 rule1 = &rd1->reg_rules[x];
652 for (y = 0; y < rd2->n_reg_rules; y++) { 664 for (y = 0; y < rd2->n_reg_rules && rule_idx < num_rules; y++) {
653 rule2 = &rd2->reg_rules[y]; 665 rule2 = &rd2->reg_rules[y];
654 /* 666 /*
655 * This time around instead of using the stack lets 667 * This time around instead of using the stack lets
@@ -657,8 +669,7 @@ static struct ieee80211_regdomain *regdom_intersect(
657 * a memcpy() 669 * a memcpy()
658 */ 670 */
659 intersected_rule = &rd->reg_rules[rule_idx]; 671 intersected_rule = &rd->reg_rules[rule_idx];
660 r = reg_rules_intersect(rule1, rule2, 672 r = reg_rules_intersect(rule1, rule2, intersected_rule);
661 intersected_rule);
662 /* 673 /*
663 * No need to memset here the intersected rule here as 674 * No need to memset here the intersected rule here as
664 * we're not using the stack anymore 675 * we're not using the stack anymore
@@ -699,34 +710,16 @@ static u32 map_regdom_flags(u32 rd_flags)
699 return channel_flags; 710 return channel_flags;
700} 711}
701 712
702static int freq_reg_info_regd(struct wiphy *wiphy, 713static const struct ieee80211_reg_rule *
703 u32 center_freq, 714freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
704 u32 desired_bw_khz, 715 const struct ieee80211_regdomain *regd)
705 const struct ieee80211_reg_rule **reg_rule,
706 const struct ieee80211_regdomain *custom_regd)
707{ 716{
708 int i; 717 int i;
709 bool band_rule_found = false; 718 bool band_rule_found = false;
710 const struct ieee80211_regdomain *regd;
711 bool bw_fits = false; 719 bool bw_fits = false;
712 720
713 if (!desired_bw_khz)
714 desired_bw_khz = MHZ_TO_KHZ(20);
715
716 regd = custom_regd ? custom_regd : cfg80211_regdomain;
717
718 /*
719 * Follow the driver's regulatory domain, if present, unless a country
720 * IE has been processed or a user wants to help complaince further
721 */
722 if (!custom_regd &&
723 last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
724 last_request->initiator != NL80211_REGDOM_SET_BY_USER &&
725 wiphy->regd)
726 regd = wiphy->regd;
727
728 if (!regd) 721 if (!regd)
729 return -EINVAL; 722 return ERR_PTR(-EINVAL);
730 723
731 for (i = 0; i < regd->n_reg_rules; i++) { 724 for (i = 0; i < regd->n_reg_rules; i++) {
732 const struct ieee80211_reg_rule *rr; 725 const struct ieee80211_reg_rule *rr;
@@ -743,33 +736,36 @@ static int freq_reg_info_regd(struct wiphy *wiphy,
743 if (!band_rule_found) 736 if (!band_rule_found)
744 band_rule_found = freq_in_rule_band(fr, center_freq); 737 band_rule_found = freq_in_rule_band(fr, center_freq);
745 738
746 bw_fits = reg_does_bw_fit(fr, 739 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
747 center_freq,
748 desired_bw_khz);
749 740
750 if (band_rule_found && bw_fits) { 741 if (band_rule_found && bw_fits)
751 *reg_rule = rr; 742 return rr;
752 return 0;
753 }
754 } 743 }
755 744
756 if (!band_rule_found) 745 if (!band_rule_found)
757 return -ERANGE; 746 return ERR_PTR(-ERANGE);
758 747
759 return -EINVAL; 748 return ERR_PTR(-EINVAL);
760} 749}
761 750
762int freq_reg_info(struct wiphy *wiphy, 751const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
763 u32 center_freq, 752 u32 center_freq)
764 u32 desired_bw_khz,
765 const struct ieee80211_reg_rule **reg_rule)
766{ 753{
767 assert_cfg80211_lock(); 754 const struct ieee80211_regdomain *regd;
768 return freq_reg_info_regd(wiphy, 755 struct regulatory_request *lr = get_last_request();
769 center_freq, 756
770 desired_bw_khz, 757 /*
771 reg_rule, 758 * Follow the driver's regulatory domain, if present, unless a country
772 NULL); 759 * IE has been processed or a user wants to help complaince further
760 */
761 if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
762 lr->initiator != NL80211_REGDOM_SET_BY_USER &&
763 wiphy->regd)
764 regd = get_wiphy_regdom(wiphy);
765 else
766 regd = get_cfg80211_regdom();
767
768 return freq_reg_info_regd(wiphy, center_freq, regd);
773} 769}
774EXPORT_SYMBOL(freq_reg_info); 770EXPORT_SYMBOL(freq_reg_info);
775 771
@@ -792,7 +788,6 @@ static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
792} 788}
793 789
794static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan, 790static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
795 u32 desired_bw_khz,
796 const struct ieee80211_reg_rule *reg_rule) 791 const struct ieee80211_reg_rule *reg_rule)
797{ 792{
798 const struct ieee80211_power_rule *power_rule; 793 const struct ieee80211_power_rule *power_rule;
@@ -807,21 +802,16 @@ static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
807 else 802 else
808 snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain); 803 snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain);
809 804
810 REG_DBG_PRINT("Updating information on frequency %d MHz " 805 REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n",
811 "for a %d MHz width channel with regulatory rule:\n", 806 chan->center_freq);
812 chan->center_freq,
813 KHZ_TO_MHZ(desired_bw_khz));
814 807
815 REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n", 808 REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n",
816 freq_range->start_freq_khz, 809 freq_range->start_freq_khz, freq_range->end_freq_khz,
817 freq_range->end_freq_khz, 810 freq_range->max_bandwidth_khz, max_antenna_gain,
818 freq_range->max_bandwidth_khz,
819 max_antenna_gain,
820 power_rule->max_eirp); 811 power_rule->max_eirp);
821} 812}
822#else 813#else
823static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan, 814static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
824 u32 desired_bw_khz,
825 const struct ieee80211_reg_rule *reg_rule) 815 const struct ieee80211_reg_rule *reg_rule)
826{ 816{
827 return; 817 return;
@@ -831,43 +821,25 @@ static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
831/* 821/*
832 * Note that right now we assume the desired channel bandwidth 822 * Note that right now we assume the desired channel bandwidth
833 * is always 20 MHz for each individual channel (HT40 uses 20 MHz 823 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
834 * per channel, the primary and the extension channel). To support 824 * per channel, the primary and the extension channel).
835 * smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a
836 * new ieee80211_channel.target_bw and re run the regulatory check
837 * on the wiphy with the target_bw specified. Then we can simply use
838 * that below for the desired_bw_khz below.
839 */ 825 */
840static void handle_channel(struct wiphy *wiphy, 826static void handle_channel(struct wiphy *wiphy,
841 enum nl80211_reg_initiator initiator, 827 enum nl80211_reg_initiator initiator,
842 enum ieee80211_band band, 828 struct ieee80211_channel *chan)
843 unsigned int chan_idx)
844{ 829{
845 int r;
846 u32 flags, bw_flags = 0; 830 u32 flags, bw_flags = 0;
847 u32 desired_bw_khz = MHZ_TO_KHZ(20);
848 const struct ieee80211_reg_rule *reg_rule = NULL; 831 const struct ieee80211_reg_rule *reg_rule = NULL;
849 const struct ieee80211_power_rule *power_rule = NULL; 832 const struct ieee80211_power_rule *power_rule = NULL;
850 const struct ieee80211_freq_range *freq_range = NULL; 833 const struct ieee80211_freq_range *freq_range = NULL;
851 struct ieee80211_supported_band *sband;
852 struct ieee80211_channel *chan;
853 struct wiphy *request_wiphy = NULL; 834 struct wiphy *request_wiphy = NULL;
835 struct regulatory_request *lr = get_last_request();
854 836
855 assert_cfg80211_lock(); 837 request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
856
857 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
858
859 sband = wiphy->bands[band];
860 BUG_ON(chan_idx >= sband->n_channels);
861 chan = &sband->channels[chan_idx];
862 838
863 flags = chan->orig_flags; 839 flags = chan->orig_flags;
864 840
865 r = freq_reg_info(wiphy, 841 reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq));
866 MHZ_TO_KHZ(chan->center_freq), 842 if (IS_ERR(reg_rule)) {
867 desired_bw_khz,
868 &reg_rule);
869
870 if (r) {
871 /* 843 /*
872 * We will disable all channels that do not match our 844 * We will disable all channels that do not match our
873 * received regulatory rule unless the hint is coming 845 * received regulatory rule unless the hint is coming
@@ -879,7 +851,7 @@ static void handle_channel(struct wiphy *wiphy,
879 * while 5 GHz is still supported. 851 * while 5 GHz is still supported.
880 */ 852 */
881 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && 853 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
882 r == -ERANGE) 854 PTR_ERR(reg_rule) == -ERANGE)
883 return; 855 return;
884 856
885 REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq); 857 REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
@@ -887,15 +859,19 @@ static void handle_channel(struct wiphy *wiphy,
887 return; 859 return;
888 } 860 }
889 861
890 chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule); 862 chan_reg_rule_print_dbg(chan, reg_rule);
891 863
892 power_rule = &reg_rule->power_rule; 864 power_rule = &reg_rule->power_rule;
893 freq_range = &reg_rule->freq_range; 865 freq_range = &reg_rule->freq_range;
894 866
895 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) 867 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
896 bw_flags = IEEE80211_CHAN_NO_HT40; 868 bw_flags = IEEE80211_CHAN_NO_HT40;
869 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
870 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
871 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
872 bw_flags |= IEEE80211_CHAN_NO_160MHZ;
897 873
898 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && 874 if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
899 request_wiphy && request_wiphy == wiphy && 875 request_wiphy && request_wiphy == wiphy &&
900 request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { 876 request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
901 /* 877 /*
@@ -912,10 +888,14 @@ static void handle_channel(struct wiphy *wiphy,
912 return; 888 return;
913 } 889 }
914 890
891 chan->dfs_state = NL80211_DFS_USABLE;
892 chan->dfs_state_entered = jiffies;
893
915 chan->beacon_found = false; 894 chan->beacon_found = false;
916 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 895 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
917 chan->max_antenna_gain = min(chan->orig_mag, 896 chan->max_antenna_gain =
918 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 897 min_t(int, chan->orig_mag,
898 MBI_TO_DBI(power_rule->max_antenna_gain));
919 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); 899 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
920 if (chan->orig_mpwr) { 900 if (chan->orig_mpwr) {
921 /* 901 /*
@@ -935,68 +915,65 @@ static void handle_channel(struct wiphy *wiphy,
935} 915}
936 916
937static void handle_band(struct wiphy *wiphy, 917static void handle_band(struct wiphy *wiphy,
938 enum ieee80211_band band, 918 enum nl80211_reg_initiator initiator,
939 enum nl80211_reg_initiator initiator) 919 struct ieee80211_supported_band *sband)
940{ 920{
941 unsigned int i; 921 unsigned int i;
942 struct ieee80211_supported_band *sband;
943 922
944 BUG_ON(!wiphy->bands[band]); 923 if (!sband)
945 sband = wiphy->bands[band]; 924 return;
946 925
947 for (i = 0; i < sband->n_channels; i++) 926 for (i = 0; i < sband->n_channels; i++)
948 handle_channel(wiphy, initiator, band, i); 927 handle_channel(wiphy, initiator, &sband->channels[i]);
949} 928}
950 929
951static bool reg_request_cell_base(struct regulatory_request *request) 930static bool reg_request_cell_base(struct regulatory_request *request)
952{ 931{
953 if (request->initiator != NL80211_REGDOM_SET_BY_USER) 932 if (request->initiator != NL80211_REGDOM_SET_BY_USER)
954 return false; 933 return false;
955 if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE) 934 return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE;
956 return false;
957 return true;
958} 935}
959 936
960bool reg_last_request_cell_base(void) 937bool reg_last_request_cell_base(void)
961{ 938{
962 bool val; 939 bool val;
963 assert_cfg80211_lock();
964 940
965 mutex_lock(&reg_mutex); 941 mutex_lock(&reg_mutex);
966 val = reg_request_cell_base(last_request); 942 val = reg_request_cell_base(get_last_request());
967 mutex_unlock(&reg_mutex); 943 mutex_unlock(&reg_mutex);
944
968 return val; 945 return val;
969} 946}
970 947
971#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS 948#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
972
973/* Core specific check */ 949/* Core specific check */
974static int reg_ignore_cell_hint(struct regulatory_request *pending_request) 950static enum reg_request_treatment
951reg_ignore_cell_hint(struct regulatory_request *pending_request)
975{ 952{
953 struct regulatory_request *lr = get_last_request();
954
976 if (!reg_num_devs_support_basehint) 955 if (!reg_num_devs_support_basehint)
977 return -EOPNOTSUPP; 956 return REG_REQ_IGNORE;
978 957
979 if (reg_request_cell_base(last_request)) { 958 if (reg_request_cell_base(lr) &&
980 if (!regdom_changes(pending_request->alpha2)) 959 !regdom_changes(pending_request->alpha2))
981 return -EALREADY; 960 return REG_REQ_ALREADY_SET;
982 return 0; 961
983 } 962 return REG_REQ_OK;
984 return 0;
985} 963}
986 964
987/* Device specific check */ 965/* Device specific check */
988static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy) 966static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy)
989{ 967{
990 if (!(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS)) 968 return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS);
991 return true;
992 return false;
993} 969}
994#else 970#else
995static int reg_ignore_cell_hint(struct regulatory_request *pending_request) 971static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
996{ 972{
997 return -EOPNOTSUPP; 973 return REG_REQ_IGNORE;
998} 974}
999static int reg_dev_ignore_cell_hint(struct wiphy *wiphy) 975
976static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy)
1000{ 977{
1001 return true; 978 return true;
1002} 979}
@@ -1006,18 +983,17 @@ static int reg_dev_ignore_cell_hint(struct wiphy *wiphy)
1006static bool ignore_reg_update(struct wiphy *wiphy, 983static bool ignore_reg_update(struct wiphy *wiphy,
1007 enum nl80211_reg_initiator initiator) 984 enum nl80211_reg_initiator initiator)
1008{ 985{
1009 if (!last_request) { 986 struct regulatory_request *lr = get_last_request();
1010 REG_DBG_PRINT("Ignoring regulatory request %s since " 987
1011 "last_request is not set\n", 988 if (!lr) {
989 REG_DBG_PRINT("Ignoring regulatory request %s since last_request is not set\n",
1012 reg_initiator_name(initiator)); 990 reg_initiator_name(initiator));
1013 return true; 991 return true;
1014 } 992 }
1015 993
1016 if (initiator == NL80211_REGDOM_SET_BY_CORE && 994 if (initiator == NL80211_REGDOM_SET_BY_CORE &&
1017 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) { 995 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
1018 REG_DBG_PRINT("Ignoring regulatory request %s " 996 REG_DBG_PRINT("Ignoring regulatory request %s since the driver uses its own custom regulatory domain\n",
1019 "since the driver uses its own custom "
1020 "regulatory domain\n",
1021 reg_initiator_name(initiator)); 997 reg_initiator_name(initiator));
1022 return true; 998 return true;
1023 } 999 }
@@ -1028,22 +1004,35 @@ static bool ignore_reg_update(struct wiphy *wiphy,
1028 */ 1004 */
1029 if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd && 1005 if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
1030 initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && 1006 initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1031 !is_world_regdom(last_request->alpha2)) { 1007 !is_world_regdom(lr->alpha2)) {
1032 REG_DBG_PRINT("Ignoring regulatory request %s " 1008 REG_DBG_PRINT("Ignoring regulatory request %s since the driver requires its own regulatory domain to be set first\n",
1033 "since the driver requires its own regulatory "
1034 "domain to be set first\n",
1035 reg_initiator_name(initiator)); 1009 reg_initiator_name(initiator));
1036 return true; 1010 return true;
1037 } 1011 }
1038 1012
1039 if (reg_request_cell_base(last_request)) 1013 if (reg_request_cell_base(lr))
1040 return reg_dev_ignore_cell_hint(wiphy); 1014 return reg_dev_ignore_cell_hint(wiphy);
1041 1015
1042 return false; 1016 return false;
1043} 1017}
1044 1018
1045static void handle_reg_beacon(struct wiphy *wiphy, 1019static bool reg_is_world_roaming(struct wiphy *wiphy)
1046 unsigned int chan_idx, 1020{
1021 const struct ieee80211_regdomain *cr = get_cfg80211_regdom();
1022 const struct ieee80211_regdomain *wr = get_wiphy_regdom(wiphy);
1023 struct regulatory_request *lr = get_last_request();
1024
1025 if (is_world_regdom(cr->alpha2) || (wr && is_world_regdom(wr->alpha2)))
1026 return true;
1027
1028 if (lr && lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1029 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
1030 return true;
1031
1032 return false;
1033}
1034
1035static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
1047 struct reg_beacon *reg_beacon) 1036 struct reg_beacon *reg_beacon)
1048{ 1037{
1049 struct ieee80211_supported_band *sband; 1038 struct ieee80211_supported_band *sband;
@@ -1051,8 +1040,6 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1051 bool channel_changed = false; 1040 bool channel_changed = false;
1052 struct ieee80211_channel chan_before; 1041 struct ieee80211_channel chan_before;
1053 1042
1054 assert_cfg80211_lock();
1055
1056 sband = wiphy->bands[reg_beacon->chan.band]; 1043 sband = wiphy->bands[reg_beacon->chan.band];
1057 chan = &sband->channels[chan_idx]; 1044 chan = &sband->channels[chan_idx];
1058 1045
@@ -1064,6 +1051,9 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1064 1051
1065 chan->beacon_found = true; 1052 chan->beacon_found = true;
1066 1053
1054 if (!reg_is_world_roaming(wiphy))
1055 return;
1056
1067 if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS) 1057 if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS)
1068 return; 1058 return;
1069 1059
@@ -1094,8 +1084,6 @@ static void wiphy_update_new_beacon(struct wiphy *wiphy,
1094 unsigned int i; 1084 unsigned int i;
1095 struct ieee80211_supported_band *sband; 1085 struct ieee80211_supported_band *sband;
1096 1086
1097 assert_cfg80211_lock();
1098
1099 if (!wiphy->bands[reg_beacon->chan.band]) 1087 if (!wiphy->bands[reg_beacon->chan.band])
1100 return; 1088 return;
1101 1089
@@ -1114,11 +1102,6 @@ static void wiphy_update_beacon_reg(struct wiphy *wiphy)
1114 struct ieee80211_supported_band *sband; 1102 struct ieee80211_supported_band *sband;
1115 struct reg_beacon *reg_beacon; 1103 struct reg_beacon *reg_beacon;
1116 1104
1117 assert_cfg80211_lock();
1118
1119 if (list_empty(&reg_beacon_list))
1120 return;
1121
1122 list_for_each_entry(reg_beacon, &reg_beacon_list, list) { 1105 list_for_each_entry(reg_beacon, &reg_beacon_list, list) {
1123 if (!wiphy->bands[reg_beacon->chan.band]) 1106 if (!wiphy->bands[reg_beacon->chan.band])
1124 continue; 1107 continue;
@@ -1128,18 +1111,6 @@ static void wiphy_update_beacon_reg(struct wiphy *wiphy)
1128 } 1111 }
1129} 1112}
1130 1113
1131static bool reg_is_world_roaming(struct wiphy *wiphy)
1132{
1133 if (is_world_regdom(cfg80211_regdomain->alpha2) ||
1134 (wiphy->regd && is_world_regdom(wiphy->regd->alpha2)))
1135 return true;
1136 if (last_request &&
1137 last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1138 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
1139 return true;
1140 return false;
1141}
1142
1143/* Reap the advantages of previously found beacons */ 1114/* Reap the advantages of previously found beacons */
1144static void reg_process_beacons(struct wiphy *wiphy) 1115static void reg_process_beacons(struct wiphy *wiphy)
1145{ 1116{
@@ -1149,39 +1120,29 @@ static void reg_process_beacons(struct wiphy *wiphy)
1149 */ 1120 */
1150 if (!last_request) 1121 if (!last_request)
1151 return; 1122 return;
1152 if (!reg_is_world_roaming(wiphy))
1153 return;
1154 wiphy_update_beacon_reg(wiphy); 1123 wiphy_update_beacon_reg(wiphy);
1155} 1124}
1156 1125
1157static bool is_ht40_not_allowed(struct ieee80211_channel *chan) 1126static bool is_ht40_allowed(struct ieee80211_channel *chan)
1158{ 1127{
1159 if (!chan) 1128 if (!chan)
1160 return true; 1129 return false;
1161 if (chan->flags & IEEE80211_CHAN_DISABLED) 1130 if (chan->flags & IEEE80211_CHAN_DISABLED)
1162 return true; 1131 return false;
1163 /* This would happen when regulatory rules disallow HT40 completely */ 1132 /* This would happen when regulatory rules disallow HT40 completely */
1164 if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40))) 1133 if ((chan->flags & IEEE80211_CHAN_NO_HT40) == IEEE80211_CHAN_NO_HT40)
1165 return true; 1134 return false;
1166 return false; 1135 return true;
1167} 1136}
1168 1137
1169static void reg_process_ht_flags_channel(struct wiphy *wiphy, 1138static void reg_process_ht_flags_channel(struct wiphy *wiphy,
1170 enum ieee80211_band band, 1139 struct ieee80211_channel *channel)
1171 unsigned int chan_idx)
1172{ 1140{
1173 struct ieee80211_supported_band *sband; 1141 struct ieee80211_supported_band *sband = wiphy->bands[channel->band];
1174 struct ieee80211_channel *channel;
1175 struct ieee80211_channel *channel_before = NULL, *channel_after = NULL; 1142 struct ieee80211_channel *channel_before = NULL, *channel_after = NULL;
1176 unsigned int i; 1143 unsigned int i;
1177 1144
1178 assert_cfg80211_lock(); 1145 if (!is_ht40_allowed(channel)) {
1179
1180 sband = wiphy->bands[band];
1181 BUG_ON(chan_idx >= sband->n_channels);
1182 channel = &sband->channels[chan_idx];
1183
1184 if (is_ht40_not_allowed(channel)) {
1185 channel->flags |= IEEE80211_CHAN_NO_HT40; 1146 channel->flags |= IEEE80211_CHAN_NO_HT40;
1186 return; 1147 return;
1187 } 1148 }
@@ -1192,6 +1153,7 @@ static void reg_process_ht_flags_channel(struct wiphy *wiphy,
1192 */ 1153 */
1193 for (i = 0; i < sband->n_channels; i++) { 1154 for (i = 0; i < sband->n_channels; i++) {
1194 struct ieee80211_channel *c = &sband->channels[i]; 1155 struct ieee80211_channel *c = &sband->channels[i];
1156
1195 if (c->center_freq == (channel->center_freq - 20)) 1157 if (c->center_freq == (channel->center_freq - 20))
1196 channel_before = c; 1158 channel_before = c;
1197 if (c->center_freq == (channel->center_freq + 20)) 1159 if (c->center_freq == (channel->center_freq + 20))
@@ -1203,28 +1165,27 @@ static void reg_process_ht_flags_channel(struct wiphy *wiphy,
1203 * if that ever changes we also need to change the below logic 1165 * if that ever changes we also need to change the below logic
1204 * to include that as well. 1166 * to include that as well.
1205 */ 1167 */
1206 if (is_ht40_not_allowed(channel_before)) 1168 if (!is_ht40_allowed(channel_before))
1207 channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; 1169 channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
1208 else 1170 else
1209 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 1171 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
1210 1172
1211 if (is_ht40_not_allowed(channel_after)) 1173 if (!is_ht40_allowed(channel_after))
1212 channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; 1174 channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
1213 else 1175 else
1214 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 1176 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
1215} 1177}
1216 1178
1217static void reg_process_ht_flags_band(struct wiphy *wiphy, 1179static void reg_process_ht_flags_band(struct wiphy *wiphy,
1218 enum ieee80211_band band) 1180 struct ieee80211_supported_band *sband)
1219{ 1181{
1220 unsigned int i; 1182 unsigned int i;
1221 struct ieee80211_supported_band *sband;
1222 1183
1223 BUG_ON(!wiphy->bands[band]); 1184 if (!sband)
1224 sband = wiphy->bands[band]; 1185 return;
1225 1186
1226 for (i = 0; i < sband->n_channels; i++) 1187 for (i = 0; i < sband->n_channels; i++)
1227 reg_process_ht_flags_channel(wiphy, band, i); 1188 reg_process_ht_flags_channel(wiphy, &sband->channels[i]);
1228} 1189}
1229 1190
1230static void reg_process_ht_flags(struct wiphy *wiphy) 1191static void reg_process_ht_flags(struct wiphy *wiphy)
@@ -1234,34 +1195,29 @@ static void reg_process_ht_flags(struct wiphy *wiphy)
1234 if (!wiphy) 1195 if (!wiphy)
1235 return; 1196 return;
1236 1197
1237 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1198 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
1238 if (wiphy->bands[band]) 1199 reg_process_ht_flags_band(wiphy, wiphy->bands[band]);
1239 reg_process_ht_flags_band(wiphy, band);
1240 }
1241
1242} 1200}
1243 1201
1244static void wiphy_update_regulatory(struct wiphy *wiphy, 1202static void wiphy_update_regulatory(struct wiphy *wiphy,
1245 enum nl80211_reg_initiator initiator) 1203 enum nl80211_reg_initiator initiator)
1246{ 1204{
1247 enum ieee80211_band band; 1205 enum ieee80211_band band;
1248 1206 struct regulatory_request *lr = get_last_request();
1249 assert_reg_lock();
1250 1207
1251 if (ignore_reg_update(wiphy, initiator)) 1208 if (ignore_reg_update(wiphy, initiator))
1252 return; 1209 return;
1253 1210
1254 last_request->dfs_region = cfg80211_regdomain->dfs_region; 1211 lr->dfs_region = get_cfg80211_regdom()->dfs_region;
1255 1212
1256 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1213 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
1257 if (wiphy->bands[band]) 1214 handle_band(wiphy, initiator, wiphy->bands[band]);
1258 handle_band(wiphy, band, initiator);
1259 }
1260 1215
1261 reg_process_beacons(wiphy); 1216 reg_process_beacons(wiphy);
1262 reg_process_ht_flags(wiphy); 1217 reg_process_ht_flags(wiphy);
1218
1263 if (wiphy->reg_notifier) 1219 if (wiphy->reg_notifier)
1264 wiphy->reg_notifier(wiphy, last_request); 1220 wiphy->reg_notifier(wiphy, lr);
1265} 1221}
1266 1222
1267static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) 1223static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
@@ -1269,6 +1225,8 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
1269 struct cfg80211_registered_device *rdev; 1225 struct cfg80211_registered_device *rdev;
1270 struct wiphy *wiphy; 1226 struct wiphy *wiphy;
1271 1227
1228 assert_cfg80211_lock();
1229
1272 list_for_each_entry(rdev, &cfg80211_rdev_list, list) { 1230 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
1273 wiphy = &rdev->wiphy; 1231 wiphy = &rdev->wiphy;
1274 wiphy_update_regulatory(wiphy, initiator); 1232 wiphy_update_regulatory(wiphy, initiator);
@@ -1280,53 +1238,40 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
1280 if (initiator == NL80211_REGDOM_SET_BY_CORE && 1238 if (initiator == NL80211_REGDOM_SET_BY_CORE &&
1281 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && 1239 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
1282 wiphy->reg_notifier) 1240 wiphy->reg_notifier)
1283 wiphy->reg_notifier(wiphy, last_request); 1241 wiphy->reg_notifier(wiphy, get_last_request());
1284 } 1242 }
1285} 1243}
1286 1244
1287static void handle_channel_custom(struct wiphy *wiphy, 1245static void handle_channel_custom(struct wiphy *wiphy,
1288 enum ieee80211_band band, 1246 struct ieee80211_channel *chan,
1289 unsigned int chan_idx,
1290 const struct ieee80211_regdomain *regd) 1247 const struct ieee80211_regdomain *regd)
1291{ 1248{
1292 int r;
1293 u32 desired_bw_khz = MHZ_TO_KHZ(20);
1294 u32 bw_flags = 0; 1249 u32 bw_flags = 0;
1295 const struct ieee80211_reg_rule *reg_rule = NULL; 1250 const struct ieee80211_reg_rule *reg_rule = NULL;
1296 const struct ieee80211_power_rule *power_rule = NULL; 1251 const struct ieee80211_power_rule *power_rule = NULL;
1297 const struct ieee80211_freq_range *freq_range = NULL; 1252 const struct ieee80211_freq_range *freq_range = NULL;
1298 struct ieee80211_supported_band *sband;
1299 struct ieee80211_channel *chan;
1300 1253
1301 assert_reg_lock(); 1254 reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
1255 regd);
1302 1256
1303 sband = wiphy->bands[band]; 1257 if (IS_ERR(reg_rule)) {
1304 BUG_ON(chan_idx >= sband->n_channels); 1258 REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
1305 chan = &sband->channels[chan_idx]; 1259 chan->center_freq);
1306
1307 r = freq_reg_info_regd(wiphy,
1308 MHZ_TO_KHZ(chan->center_freq),
1309 desired_bw_khz,
1310 &reg_rule,
1311 regd);
1312
1313 if (r) {
1314 REG_DBG_PRINT("Disabling freq %d MHz as custom "
1315 "regd has no rule that fits a %d MHz "
1316 "wide channel\n",
1317 chan->center_freq,
1318 KHZ_TO_MHZ(desired_bw_khz));
1319 chan->flags = IEEE80211_CHAN_DISABLED; 1260 chan->flags = IEEE80211_CHAN_DISABLED;
1320 return; 1261 return;
1321 } 1262 }
1322 1263
1323 chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule); 1264 chan_reg_rule_print_dbg(chan, reg_rule);
1324 1265
1325 power_rule = &reg_rule->power_rule; 1266 power_rule = &reg_rule->power_rule;
1326 freq_range = &reg_rule->freq_range; 1267 freq_range = &reg_rule->freq_range;
1327 1268
1328 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) 1269 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
1329 bw_flags = IEEE80211_CHAN_NO_HT40; 1270 bw_flags = IEEE80211_CHAN_NO_HT40;
1271 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
1272 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1273 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
1274 bw_flags |= IEEE80211_CHAN_NO_160MHZ;
1330 1275
1331 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; 1276 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
1332 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1277 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
@@ -1334,17 +1279,17 @@ static void handle_channel_custom(struct wiphy *wiphy,
1334 (int) MBM_TO_DBM(power_rule->max_eirp); 1279 (int) MBM_TO_DBM(power_rule->max_eirp);
1335} 1280}
1336 1281
1337static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band, 1282static void handle_band_custom(struct wiphy *wiphy,
1283 struct ieee80211_supported_band *sband,
1338 const struct ieee80211_regdomain *regd) 1284 const struct ieee80211_regdomain *regd)
1339{ 1285{
1340 unsigned int i; 1286 unsigned int i;
1341 struct ieee80211_supported_band *sband;
1342 1287
1343 BUG_ON(!wiphy->bands[band]); 1288 if (!sband)
1344 sband = wiphy->bands[band]; 1289 return;
1345 1290
1346 for (i = 0; i < sband->n_channels; i++) 1291 for (i = 0; i < sband->n_channels; i++)
1347 handle_channel_custom(wiphy, band, i, regd); 1292 handle_channel_custom(wiphy, &sband->channels[i], regd);
1348} 1293}
1349 1294
1350/* Used by drivers prior to wiphy registration */ 1295/* Used by drivers prior to wiphy registration */
@@ -1354,60 +1299,50 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1354 enum ieee80211_band band; 1299 enum ieee80211_band band;
1355 unsigned int bands_set = 0; 1300 unsigned int bands_set = 0;
1356 1301
1357 mutex_lock(&reg_mutex);
1358 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1302 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1359 if (!wiphy->bands[band]) 1303 if (!wiphy->bands[band])
1360 continue; 1304 continue;
1361 handle_band_custom(wiphy, band, regd); 1305 handle_band_custom(wiphy, wiphy->bands[band], regd);
1362 bands_set++; 1306 bands_set++;
1363 } 1307 }
1364 mutex_unlock(&reg_mutex);
1365 1308
1366 /* 1309 /*
1367 * no point in calling this if it won't have any effect 1310 * no point in calling this if it won't have any effect
1368 * on your device's supportd bands. 1311 * on your device's supported bands.
1369 */ 1312 */
1370 WARN_ON(!bands_set); 1313 WARN_ON(!bands_set);
1371} 1314}
1372EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1315EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1373 1316
1374/*
1375 * Return value which can be used by ignore_request() to indicate
1376 * it has been determined we should intersect two regulatory domains
1377 */
1378#define REG_INTERSECT 1
1379
1380/* This has the logic which determines when a new request 1317/* This has the logic which determines when a new request
1381 * should be ignored. */ 1318 * should be ignored. */
1382static int ignore_request(struct wiphy *wiphy, 1319static enum reg_request_treatment
1320get_reg_request_treatment(struct wiphy *wiphy,
1383 struct regulatory_request *pending_request) 1321 struct regulatory_request *pending_request)
1384{ 1322{
1385 struct wiphy *last_wiphy = NULL; 1323 struct wiphy *last_wiphy = NULL;
1386 1324 struct regulatory_request *lr = get_last_request();
1387 assert_cfg80211_lock();
1388 1325
1389 /* All initial requests are respected */ 1326 /* All initial requests are respected */
1390 if (!last_request) 1327 if (!lr)
1391 return 0; 1328 return REG_REQ_OK;
1392 1329
1393 switch (pending_request->initiator) { 1330 switch (pending_request->initiator) {
1394 case NL80211_REGDOM_SET_BY_CORE: 1331 case NL80211_REGDOM_SET_BY_CORE:
1395 return 0; 1332 return REG_REQ_OK;
1396 case NL80211_REGDOM_SET_BY_COUNTRY_IE: 1333 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
1397 1334 if (reg_request_cell_base(lr)) {
1398 if (reg_request_cell_base(last_request)) {
1399 /* Trust a Cell base station over the AP's country IE */ 1335 /* Trust a Cell base station over the AP's country IE */
1400 if (regdom_changes(pending_request->alpha2)) 1336 if (regdom_changes(pending_request->alpha2))
1401 return -EOPNOTSUPP; 1337 return REG_REQ_IGNORE;
1402 return -EALREADY; 1338 return REG_REQ_ALREADY_SET;
1403 } 1339 }
1404 1340
1405 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 1341 last_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
1406 1342
1407 if (unlikely(!is_an_alpha2(pending_request->alpha2))) 1343 if (unlikely(!is_an_alpha2(pending_request->alpha2)))
1408 return -EINVAL; 1344 return -EINVAL;
1409 if (last_request->initiator == 1345 if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
1410 NL80211_REGDOM_SET_BY_COUNTRY_IE) {
1411 if (last_wiphy != wiphy) { 1346 if (last_wiphy != wiphy) {
1412 /* 1347 /*
1413 * Two cards with two APs claiming different 1348 * Two cards with two APs claiming different
@@ -1416,23 +1351,23 @@ static int ignore_request(struct wiphy *wiphy,
1416 * to be correct. Reject second one for now. 1351 * to be correct. Reject second one for now.
1417 */ 1352 */
1418 if (regdom_changes(pending_request->alpha2)) 1353 if (regdom_changes(pending_request->alpha2))
1419 return -EOPNOTSUPP; 1354 return REG_REQ_IGNORE;
1420 return -EALREADY; 1355 return REG_REQ_ALREADY_SET;
1421 } 1356 }
1422 /* 1357 /*
1423 * Two consecutive Country IE hints on the same wiphy. 1358 * Two consecutive Country IE hints on the same wiphy.
1424 * This should be picked up early by the driver/stack 1359 * This should be picked up early by the driver/stack
1425 */ 1360 */
1426 if (WARN_ON(regdom_changes(pending_request->alpha2))) 1361 if (WARN_ON(regdom_changes(pending_request->alpha2)))
1427 return 0; 1362 return REG_REQ_OK;
1428 return -EALREADY; 1363 return REG_REQ_ALREADY_SET;
1429 } 1364 }
1430 return 0; 1365 return 0;
1431 case NL80211_REGDOM_SET_BY_DRIVER: 1366 case NL80211_REGDOM_SET_BY_DRIVER:
1432 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { 1367 if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) {
1433 if (regdom_changes(pending_request->alpha2)) 1368 if (regdom_changes(pending_request->alpha2))
1434 return 0; 1369 return REG_REQ_OK;
1435 return -EALREADY; 1370 return REG_REQ_ALREADY_SET;
1436 } 1371 }
1437 1372
1438 /* 1373 /*
@@ -1440,59 +1375,59 @@ static int ignore_request(struct wiphy *wiphy,
1440 * back in or if you add a new device for which the previously 1375 * back in or if you add a new device for which the previously
1441 * loaded card also agrees on the regulatory domain. 1376 * loaded card also agrees on the regulatory domain.
1442 */ 1377 */
1443 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && 1378 if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
1444 !regdom_changes(pending_request->alpha2)) 1379 !regdom_changes(pending_request->alpha2))
1445 return -EALREADY; 1380 return REG_REQ_ALREADY_SET;
1446 1381
1447 return REG_INTERSECT; 1382 return REG_REQ_INTERSECT;
1448 case NL80211_REGDOM_SET_BY_USER: 1383 case NL80211_REGDOM_SET_BY_USER:
1449 if (reg_request_cell_base(pending_request)) 1384 if (reg_request_cell_base(pending_request))
1450 return reg_ignore_cell_hint(pending_request); 1385 return reg_ignore_cell_hint(pending_request);
1451 1386
1452 if (reg_request_cell_base(last_request)) 1387 if (reg_request_cell_base(lr))
1453 return -EOPNOTSUPP; 1388 return REG_REQ_IGNORE;
1454 1389
1455 if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) 1390 if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
1456 return REG_INTERSECT; 1391 return REG_REQ_INTERSECT;
1457 /* 1392 /*
1458 * If the user knows better the user should set the regdom 1393 * If the user knows better the user should set the regdom
1459 * to their country before the IE is picked up 1394 * to their country before the IE is picked up
1460 */ 1395 */
1461 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER && 1396 if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
1462 last_request->intersect) 1397 lr->intersect)
1463 return -EOPNOTSUPP; 1398 return REG_REQ_IGNORE;
1464 /* 1399 /*
1465 * Process user requests only after previous user/driver/core 1400 * Process user requests only after previous user/driver/core
1466 * requests have been processed 1401 * requests have been processed
1467 */ 1402 */
1468 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE || 1403 if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE ||
1469 last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || 1404 lr->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
1470 last_request->initiator == NL80211_REGDOM_SET_BY_USER) { 1405 lr->initiator == NL80211_REGDOM_SET_BY_USER) &&
1471 if (regdom_changes(last_request->alpha2)) 1406 regdom_changes(lr->alpha2))
1472 return -EAGAIN; 1407 return REG_REQ_IGNORE;
1473 }
1474 1408
1475 if (!regdom_changes(pending_request->alpha2)) 1409 if (!regdom_changes(pending_request->alpha2))
1476 return -EALREADY; 1410 return REG_REQ_ALREADY_SET;
1477 1411
1478 return 0; 1412 return REG_REQ_OK;
1479 } 1413 }
1480 1414
1481 return -EINVAL; 1415 return REG_REQ_IGNORE;
1482} 1416}
1483 1417
1484static void reg_set_request_processed(void) 1418static void reg_set_request_processed(void)
1485{ 1419{
1486 bool need_more_processing = false; 1420 bool need_more_processing = false;
1421 struct regulatory_request *lr = get_last_request();
1487 1422
1488 last_request->processed = true; 1423 lr->processed = true;
1489 1424
1490 spin_lock(&reg_requests_lock); 1425 spin_lock(&reg_requests_lock);
1491 if (!list_empty(&reg_requests_list)) 1426 if (!list_empty(&reg_requests_list))
1492 need_more_processing = true; 1427 need_more_processing = true;
1493 spin_unlock(&reg_requests_lock); 1428 spin_unlock(&reg_requests_lock);
1494 1429
1495 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) 1430 if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
1496 cancel_delayed_work(&reg_timeout); 1431 cancel_delayed_work(&reg_timeout);
1497 1432
1498 if (need_more_processing) 1433 if (need_more_processing)
@@ -1508,116 +1443,122 @@ static void reg_set_request_processed(void)
1508 * The Wireless subsystem can use this function to hint to the wireless core 1443 * The Wireless subsystem can use this function to hint to the wireless core
1509 * what it believes should be the current regulatory domain. 1444 * what it believes should be the current regulatory domain.
1510 * 1445 *
1511 * Returns zero if all went fine, %-EALREADY if a regulatory domain had 1446 * Returns one of the different reg request treatment values.
1512 * already been set or other standard error codes.
1513 * 1447 *
1514 * Caller must hold &cfg80211_mutex and &reg_mutex 1448 * Caller must hold &reg_mutex
1515 */ 1449 */
1516static int __regulatory_hint(struct wiphy *wiphy, 1450static enum reg_request_treatment
1517 struct regulatory_request *pending_request) 1451__regulatory_hint(struct wiphy *wiphy,
1452 struct regulatory_request *pending_request)
1518{ 1453{
1454 const struct ieee80211_regdomain *regd;
1519 bool intersect = false; 1455 bool intersect = false;
1520 int r = 0; 1456 enum reg_request_treatment treatment;
1521 1457 struct regulatory_request *lr;
1522 assert_cfg80211_lock();
1523 1458
1524 r = ignore_request(wiphy, pending_request); 1459 treatment = get_reg_request_treatment(wiphy, pending_request);
1525 1460
1526 if (r == REG_INTERSECT) { 1461 switch (treatment) {
1462 case REG_REQ_INTERSECT:
1527 if (pending_request->initiator == 1463 if (pending_request->initiator ==
1528 NL80211_REGDOM_SET_BY_DRIVER) { 1464 NL80211_REGDOM_SET_BY_DRIVER) {
1529 r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain); 1465 regd = reg_copy_regd(get_cfg80211_regdom());
1530 if (r) { 1466 if (IS_ERR(regd)) {
1531 kfree(pending_request); 1467 kfree(pending_request);
1532 return r; 1468 return PTR_ERR(regd);
1533 } 1469 }
1470 rcu_assign_pointer(wiphy->regd, regd);
1534 } 1471 }
1535 intersect = true; 1472 intersect = true;
1536 } else if (r) { 1473 break;
1474 case REG_REQ_OK:
1475 break;
1476 default:
1537 /* 1477 /*
1538 * If the regulatory domain being requested by the 1478 * If the regulatory domain being requested by the
1539 * driver has already been set just copy it to the 1479 * driver has already been set just copy it to the
1540 * wiphy 1480 * wiphy
1541 */ 1481 */
1542 if (r == -EALREADY && 1482 if (treatment == REG_REQ_ALREADY_SET &&
1543 pending_request->initiator == 1483 pending_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
1544 NL80211_REGDOM_SET_BY_DRIVER) { 1484 regd = reg_copy_regd(get_cfg80211_regdom());
1545 r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain); 1485 if (IS_ERR(regd)) {
1546 if (r) {
1547 kfree(pending_request); 1486 kfree(pending_request);
1548 return r; 1487 return REG_REQ_IGNORE;
1549 } 1488 }
1550 r = -EALREADY; 1489 treatment = REG_REQ_ALREADY_SET;
1490 rcu_assign_pointer(wiphy->regd, regd);
1551 goto new_request; 1491 goto new_request;
1552 } 1492 }
1553 kfree(pending_request); 1493 kfree(pending_request);
1554 return r; 1494 return treatment;
1555 } 1495 }
1556 1496
1557new_request: 1497new_request:
1558 if (last_request != &core_request_world) 1498 lr = get_last_request();
1559 kfree(last_request); 1499 if (lr != &core_request_world && lr)
1500 kfree_rcu(lr, rcu_head);
1560 1501
1561 last_request = pending_request; 1502 pending_request->intersect = intersect;
1562 last_request->intersect = intersect; 1503 pending_request->processed = false;
1504 rcu_assign_pointer(last_request, pending_request);
1505 lr = pending_request;
1563 1506
1564 pending_request = NULL; 1507 pending_request = NULL;
1565 1508
1566 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) { 1509 if (lr->initiator == NL80211_REGDOM_SET_BY_USER) {
1567 user_alpha2[0] = last_request->alpha2[0]; 1510 user_alpha2[0] = lr->alpha2[0];
1568 user_alpha2[1] = last_request->alpha2[1]; 1511 user_alpha2[1] = lr->alpha2[1];
1569 } 1512 }
1570 1513
1571 /* When r == REG_INTERSECT we do need to call CRDA */ 1514 /* When r == REG_REQ_INTERSECT we do need to call CRDA */
1572 if (r < 0) { 1515 if (treatment != REG_REQ_OK && treatment != REG_REQ_INTERSECT) {
1573 /* 1516 /*
1574 * Since CRDA will not be called in this case as we already 1517 * Since CRDA will not be called in this case as we already
1575 * have applied the requested regulatory domain before we just 1518 * have applied the requested regulatory domain before we just
1576 * inform userspace we have processed the request 1519 * inform userspace we have processed the request
1577 */ 1520 */
1578 if (r == -EALREADY) { 1521 if (treatment == REG_REQ_ALREADY_SET) {
1579 nl80211_send_reg_change_event(last_request); 1522 nl80211_send_reg_change_event(lr);
1580 reg_set_request_processed(); 1523 reg_set_request_processed();
1581 } 1524 }
1582 return r; 1525 return treatment;
1583 } 1526 }
1584 1527
1585 return call_crda(last_request->alpha2); 1528 if (call_crda(lr->alpha2))
1529 return REG_REQ_IGNORE;
1530 return REG_REQ_OK;
1586} 1531}
1587 1532
1588/* This processes *all* regulatory hints */ 1533/* This processes *all* regulatory hints */
1589static void reg_process_hint(struct regulatory_request *reg_request, 1534static void reg_process_hint(struct regulatory_request *reg_request,
1590 enum nl80211_reg_initiator reg_initiator) 1535 enum nl80211_reg_initiator reg_initiator)
1591{ 1536{
1592 int r = 0;
1593 struct wiphy *wiphy = NULL; 1537 struct wiphy *wiphy = NULL;
1594 1538
1595 BUG_ON(!reg_request->alpha2); 1539 if (WARN_ON(!reg_request->alpha2))
1540 return;
1596 1541
1597 if (wiphy_idx_valid(reg_request->wiphy_idx)) 1542 if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
1598 wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); 1543 wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
1599 1544
1600 if (reg_initiator == NL80211_REGDOM_SET_BY_DRIVER && 1545 if (reg_initiator == NL80211_REGDOM_SET_BY_DRIVER && !wiphy) {
1601 !wiphy) {
1602 kfree(reg_request); 1546 kfree(reg_request);
1603 return; 1547 return;
1604 } 1548 }
1605 1549
1606 r = __regulatory_hint(wiphy, reg_request); 1550 switch (__regulatory_hint(wiphy, reg_request)) {
1607 /* This is required so that the orig_* parameters are saved */ 1551 case REG_REQ_ALREADY_SET:
1608 if (r == -EALREADY && wiphy && 1552 /* This is required so that the orig_* parameters are saved */
1609 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { 1553 if (wiphy && wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
1610 wiphy_update_regulatory(wiphy, reg_initiator); 1554 wiphy_update_regulatory(wiphy, reg_initiator);
1611 return; 1555 break;
1556 default:
1557 if (reg_initiator == NL80211_REGDOM_SET_BY_USER)
1558 schedule_delayed_work(&reg_timeout,
1559 msecs_to_jiffies(3142));
1560 break;
1612 } 1561 }
1613
1614 /*
1615 * We only time out user hints, given that they should be the only
1616 * source of bogus requests.
1617 */
1618 if (r != -EALREADY &&
1619 reg_initiator == NL80211_REGDOM_SET_BY_USER)
1620 schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
1621} 1562}
1622 1563
1623/* 1564/*
@@ -1627,15 +1568,15 @@ static void reg_process_hint(struct regulatory_request *reg_request,
1627 */ 1568 */
1628static void reg_process_pending_hints(void) 1569static void reg_process_pending_hints(void)
1629{ 1570{
1630 struct regulatory_request *reg_request; 1571 struct regulatory_request *reg_request, *lr;
1631 1572
1632 mutex_lock(&cfg80211_mutex); 1573 mutex_lock(&cfg80211_mutex);
1633 mutex_lock(&reg_mutex); 1574 mutex_lock(&reg_mutex);
1575 lr = get_last_request();
1634 1576
1635 /* When last_request->processed becomes true this will be rescheduled */ 1577 /* When last_request->processed becomes true this will be rescheduled */
1636 if (last_request && !last_request->processed) { 1578 if (lr && !lr->processed) {
1637 REG_DBG_PRINT("Pending regulatory request, waiting " 1579 REG_DBG_PRINT("Pending regulatory request, waiting for it to be processed...\n");
1638 "for it to be processed...\n");
1639 goto out; 1580 goto out;
1640 } 1581 }
1641 1582
@@ -1666,23 +1607,14 @@ static void reg_process_pending_beacon_hints(void)
1666 struct cfg80211_registered_device *rdev; 1607 struct cfg80211_registered_device *rdev;
1667 struct reg_beacon *pending_beacon, *tmp; 1608 struct reg_beacon *pending_beacon, *tmp;
1668 1609
1669 /*
1670 * No need to hold the reg_mutex here as we just touch wiphys
1671 * and do not read or access regulatory variables.
1672 */
1673 mutex_lock(&cfg80211_mutex); 1610 mutex_lock(&cfg80211_mutex);
1611 mutex_lock(&reg_mutex);
1674 1612
1675 /* This goes through the _pending_ beacon list */ 1613 /* This goes through the _pending_ beacon list */
1676 spin_lock_bh(&reg_pending_beacons_lock); 1614 spin_lock_bh(&reg_pending_beacons_lock);
1677 1615
1678 if (list_empty(&reg_pending_beacons)) {
1679 spin_unlock_bh(&reg_pending_beacons_lock);
1680 goto out;
1681 }
1682
1683 list_for_each_entry_safe(pending_beacon, tmp, 1616 list_for_each_entry_safe(pending_beacon, tmp,
1684 &reg_pending_beacons, list) { 1617 &reg_pending_beacons, list) {
1685
1686 list_del_init(&pending_beacon->list); 1618 list_del_init(&pending_beacon->list);
1687 1619
1688 /* Applies the beacon hint to current wiphys */ 1620 /* Applies the beacon hint to current wiphys */
@@ -1694,7 +1626,7 @@ static void reg_process_pending_beacon_hints(void)
1694 } 1626 }
1695 1627
1696 spin_unlock_bh(&reg_pending_beacons_lock); 1628 spin_unlock_bh(&reg_pending_beacons_lock);
1697out: 1629 mutex_unlock(&reg_mutex);
1698 mutex_unlock(&cfg80211_mutex); 1630 mutex_unlock(&cfg80211_mutex);
1699} 1631}
1700 1632
@@ -1706,10 +1638,8 @@ static void reg_todo(struct work_struct *work)
1706 1638
1707static void queue_regulatory_request(struct regulatory_request *request) 1639static void queue_regulatory_request(struct regulatory_request *request)
1708{ 1640{
1709 if (isalpha(request->alpha2[0])) 1641 request->alpha2[0] = toupper(request->alpha2[0]);
1710 request->alpha2[0] = toupper(request->alpha2[0]); 1642 request->alpha2[1] = toupper(request->alpha2[1]);
1711 if (isalpha(request->alpha2[1]))
1712 request->alpha2[1] = toupper(request->alpha2[1]);
1713 1643
1714 spin_lock(&reg_requests_lock); 1644 spin_lock(&reg_requests_lock);
1715 list_add_tail(&request->list, &reg_requests_list); 1645 list_add_tail(&request->list, &reg_requests_list);
@@ -1726,8 +1656,7 @@ static int regulatory_hint_core(const char *alpha2)
1726{ 1656{
1727 struct regulatory_request *request; 1657 struct regulatory_request *request;
1728 1658
1729 request = kzalloc(sizeof(struct regulatory_request), 1659 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
1730 GFP_KERNEL);
1731 if (!request) 1660 if (!request)
1732 return -ENOMEM; 1661 return -ENOMEM;
1733 1662
@@ -1746,13 +1675,14 @@ int regulatory_hint_user(const char *alpha2,
1746{ 1675{
1747 struct regulatory_request *request; 1676 struct regulatory_request *request;
1748 1677
1749 BUG_ON(!alpha2); 1678 if (WARN_ON(!alpha2))
1679 return -EINVAL;
1750 1680
1751 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); 1681 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
1752 if (!request) 1682 if (!request)
1753 return -ENOMEM; 1683 return -ENOMEM;
1754 1684
1755 request->wiphy_idx = WIPHY_IDX_STALE; 1685 request->wiphy_idx = WIPHY_IDX_INVALID;
1756 request->alpha2[0] = alpha2[0]; 1686 request->alpha2[0] = alpha2[0];
1757 request->alpha2[1] = alpha2[1]; 1687 request->alpha2[1] = alpha2[1];
1758 request->initiator = NL80211_REGDOM_SET_BY_USER; 1688 request->initiator = NL80211_REGDOM_SET_BY_USER;
@@ -1768,8 +1698,8 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
1768{ 1698{
1769 struct regulatory_request *request; 1699 struct regulatory_request *request;
1770 1700
1771 BUG_ON(!alpha2); 1701 if (WARN_ON(!alpha2 || !wiphy))
1772 BUG_ON(!wiphy); 1702 return -EINVAL;
1773 1703
1774 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); 1704 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
1775 if (!request) 1705 if (!request)
@@ -1777,9 +1707,6 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
1777 1707
1778 request->wiphy_idx = get_wiphy_idx(wiphy); 1708 request->wiphy_idx = get_wiphy_idx(wiphy);
1779 1709
1780 /* Must have registered wiphy first */
1781 BUG_ON(!wiphy_idx_valid(request->wiphy_idx));
1782
1783 request->alpha2[0] = alpha2[0]; 1710 request->alpha2[0] = alpha2[0];
1784 request->alpha2[1] = alpha2[1]; 1711 request->alpha2[1] = alpha2[1];
1785 request->initiator = NL80211_REGDOM_SET_BY_DRIVER; 1712 request->initiator = NL80211_REGDOM_SET_BY_DRIVER;
@@ -1794,18 +1721,17 @@ EXPORT_SYMBOL(regulatory_hint);
1794 * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and 1721 * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and
1795 * therefore cannot iterate over the rdev list here. 1722 * therefore cannot iterate over the rdev list here.
1796 */ 1723 */
1797void regulatory_hint_11d(struct wiphy *wiphy, 1724void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band,
1798 enum ieee80211_band band, 1725 const u8 *country_ie, u8 country_ie_len)
1799 const u8 *country_ie,
1800 u8 country_ie_len)
1801{ 1726{
1802 char alpha2[2]; 1727 char alpha2[2];
1803 enum environment_cap env = ENVIRON_ANY; 1728 enum environment_cap env = ENVIRON_ANY;
1804 struct regulatory_request *request; 1729 struct regulatory_request *request, *lr;
1805 1730
1806 mutex_lock(&reg_mutex); 1731 mutex_lock(&reg_mutex);
1732 lr = get_last_request();
1807 1733
1808 if (unlikely(!last_request)) 1734 if (unlikely(!lr))
1809 goto out; 1735 goto out;
1810 1736
1811 /* IE len must be evenly divisible by 2 */ 1737 /* IE len must be evenly divisible by 2 */
@@ -1828,9 +1754,8 @@ void regulatory_hint_11d(struct wiphy *wiphy,
1828 * We leave conflict resolution to the workqueue, where can hold 1754 * We leave conflict resolution to the workqueue, where can hold
1829 * cfg80211_mutex. 1755 * cfg80211_mutex.
1830 */ 1756 */
1831 if (likely(last_request->initiator == 1757 if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1832 NL80211_REGDOM_SET_BY_COUNTRY_IE && 1758 lr->wiphy_idx != WIPHY_IDX_INVALID)
1833 wiphy_idx_valid(last_request->wiphy_idx)))
1834 goto out; 1759 goto out;
1835 1760
1836 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); 1761 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
@@ -1843,12 +1768,7 @@ void regulatory_hint_11d(struct wiphy *wiphy,
1843 request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; 1768 request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE;
1844 request->country_ie_env = env; 1769 request->country_ie_env = env;
1845 1770
1846 mutex_unlock(&reg_mutex);
1847
1848 queue_regulatory_request(request); 1771 queue_regulatory_request(request);
1849
1850 return;
1851
1852out: 1772out:
1853 mutex_unlock(&reg_mutex); 1773 mutex_unlock(&reg_mutex);
1854} 1774}
@@ -1863,8 +1783,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
1863 if (is_user_regdom_saved()) { 1783 if (is_user_regdom_saved()) {
1864 /* Unless we're asked to ignore it and reset it */ 1784 /* Unless we're asked to ignore it and reset it */
1865 if (reset_user) { 1785 if (reset_user) {
1866 REG_DBG_PRINT("Restoring regulatory settings " 1786 REG_DBG_PRINT("Restoring regulatory settings including user preference\n");
1867 "including user preference\n");
1868 user_alpha2[0] = '9'; 1787 user_alpha2[0] = '9';
1869 user_alpha2[1] = '7'; 1788 user_alpha2[1] = '7';
1870 1789
@@ -1874,26 +1793,20 @@ static void restore_alpha2(char *alpha2, bool reset_user)
1874 * back as they were for a full restore. 1793 * back as they were for a full restore.
1875 */ 1794 */
1876 if (!is_world_regdom(ieee80211_regdom)) { 1795 if (!is_world_regdom(ieee80211_regdom)) {
1877 REG_DBG_PRINT("Keeping preference on " 1796 REG_DBG_PRINT("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
1878 "module parameter ieee80211_regdom: %c%c\n", 1797 ieee80211_regdom[0], ieee80211_regdom[1]);
1879 ieee80211_regdom[0],
1880 ieee80211_regdom[1]);
1881 alpha2[0] = ieee80211_regdom[0]; 1798 alpha2[0] = ieee80211_regdom[0];
1882 alpha2[1] = ieee80211_regdom[1]; 1799 alpha2[1] = ieee80211_regdom[1];
1883 } 1800 }
1884 } else { 1801 } else {
1885 REG_DBG_PRINT("Restoring regulatory settings " 1802 REG_DBG_PRINT("Restoring regulatory settings while preserving user preference for: %c%c\n",
1886 "while preserving user preference for: %c%c\n", 1803 user_alpha2[0], user_alpha2[1]);
1887 user_alpha2[0],
1888 user_alpha2[1]);
1889 alpha2[0] = user_alpha2[0]; 1804 alpha2[0] = user_alpha2[0];
1890 alpha2[1] = user_alpha2[1]; 1805 alpha2[1] = user_alpha2[1];
1891 } 1806 }
1892 } else if (!is_world_regdom(ieee80211_regdom)) { 1807 } else if (!is_world_regdom(ieee80211_regdom)) {
1893 REG_DBG_PRINT("Keeping preference on " 1808 REG_DBG_PRINT("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
1894 "module parameter ieee80211_regdom: %c%c\n", 1809 ieee80211_regdom[0], ieee80211_regdom[1]);
1895 ieee80211_regdom[0],
1896 ieee80211_regdom[1]);
1897 alpha2[0] = ieee80211_regdom[0]; 1810 alpha2[0] = ieee80211_regdom[0];
1898 alpha2[1] = ieee80211_regdom[1]; 1811 alpha2[1] = ieee80211_regdom[1];
1899 } else 1812 } else
@@ -1948,7 +1861,7 @@ static void restore_regulatory_settings(bool reset_user)
1948 mutex_lock(&cfg80211_mutex); 1861 mutex_lock(&cfg80211_mutex);
1949 mutex_lock(&reg_mutex); 1862 mutex_lock(&reg_mutex);
1950 1863
1951 reset_regdomains(true); 1864 reset_regdomains(true, &world_regdom);
1952 restore_alpha2(alpha2, reset_user); 1865 restore_alpha2(alpha2, reset_user);
1953 1866
1954 /* 1867 /*
@@ -1958,49 +1871,35 @@ static void restore_regulatory_settings(bool reset_user)
1958 * settings. 1871 * settings.
1959 */ 1872 */
1960 spin_lock(&reg_requests_lock); 1873 spin_lock(&reg_requests_lock);
1961 if (!list_empty(&reg_requests_list)) { 1874 list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) {
1962 list_for_each_entry_safe(reg_request, tmp, 1875 if (reg_request->initiator != NL80211_REGDOM_SET_BY_USER)
1963 &reg_requests_list, list) { 1876 continue;
1964 if (reg_request->initiator != 1877 list_move_tail(&reg_request->list, &tmp_reg_req_list);
1965 NL80211_REGDOM_SET_BY_USER)
1966 continue;
1967 list_move_tail(&reg_request->list, &tmp_reg_req_list);
1968 }
1969 } 1878 }
1970 spin_unlock(&reg_requests_lock); 1879 spin_unlock(&reg_requests_lock);
1971 1880
1972 /* Clear beacon hints */ 1881 /* Clear beacon hints */
1973 spin_lock_bh(&reg_pending_beacons_lock); 1882 spin_lock_bh(&reg_pending_beacons_lock);
1974 if (!list_empty(&reg_pending_beacons)) { 1883 list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) {
1975 list_for_each_entry_safe(reg_beacon, btmp, 1884 list_del(&reg_beacon->list);
1976 &reg_pending_beacons, list) { 1885 kfree(reg_beacon);
1977 list_del(&reg_beacon->list);
1978 kfree(reg_beacon);
1979 }
1980 } 1886 }
1981 spin_unlock_bh(&reg_pending_beacons_lock); 1887 spin_unlock_bh(&reg_pending_beacons_lock);
1982 1888
1983 if (!list_empty(&reg_beacon_list)) { 1889 list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) {
1984 list_for_each_entry_safe(reg_beacon, btmp, 1890 list_del(&reg_beacon->list);
1985 &reg_beacon_list, list) { 1891 kfree(reg_beacon);
1986 list_del(&reg_beacon->list);
1987 kfree(reg_beacon);
1988 }
1989 } 1892 }
1990 1893
1991 /* First restore to the basic regulatory settings */ 1894 /* First restore to the basic regulatory settings */
1992 cfg80211_regdomain = cfg80211_world_regdom; 1895 world_alpha2[0] = cfg80211_world_regdom->alpha2[0];
1993 world_alpha2[0] = cfg80211_regdomain->alpha2[0]; 1896 world_alpha2[1] = cfg80211_world_regdom->alpha2[1];
1994 world_alpha2[1] = cfg80211_regdomain->alpha2[1];
1995 1897
1996 list_for_each_entry(rdev, &cfg80211_rdev_list, list) { 1898 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
1997 if (rdev->wiphy.flags & WIPHY_FLAG_CUSTOM_REGULATORY) 1899 if (rdev->wiphy.flags & WIPHY_FLAG_CUSTOM_REGULATORY)
1998 restore_custom_reg_settings(&rdev->wiphy); 1900 restore_custom_reg_settings(&rdev->wiphy);
1999 } 1901 }
2000 1902
2001 mutex_unlock(&reg_mutex);
2002 mutex_unlock(&cfg80211_mutex);
2003
2004 regulatory_hint_core(world_alpha2); 1903 regulatory_hint_core(world_alpha2);
2005 1904
2006 /* 1905 /*
@@ -2011,20 +1910,8 @@ static void restore_regulatory_settings(bool reset_user)
2011 if (is_an_alpha2(alpha2)) 1910 if (is_an_alpha2(alpha2))
2012 regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER); 1911 regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
2013 1912
2014 if (list_empty(&tmp_reg_req_list))
2015 return;
2016
2017 mutex_lock(&cfg80211_mutex);
2018 mutex_lock(&reg_mutex);
2019
2020 spin_lock(&reg_requests_lock); 1913 spin_lock(&reg_requests_lock);
2021 list_for_each_entry_safe(reg_request, tmp, &tmp_reg_req_list, list) { 1914 list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
2022 REG_DBG_PRINT("Adding request for country %c%c back "
2023 "into the queue\n",
2024 reg_request->alpha2[0],
2025 reg_request->alpha2[1]);
2026 list_move_tail(&reg_request->list, &reg_requests_list);
2027 }
2028 spin_unlock(&reg_requests_lock); 1915 spin_unlock(&reg_requests_lock);
2029 1916
2030 mutex_unlock(&reg_mutex); 1917 mutex_unlock(&reg_mutex);
@@ -2037,8 +1924,7 @@ static void restore_regulatory_settings(bool reset_user)
2037 1924
2038void regulatory_hint_disconnect(void) 1925void regulatory_hint_disconnect(void)
2039{ 1926{
2040 REG_DBG_PRINT("All devices are disconnected, going to " 1927 REG_DBG_PRINT("All devices are disconnected, going to restore regulatory settings\n");
2041 "restore regulatory settings\n");
2042 restore_regulatory_settings(false); 1928 restore_regulatory_settings(false);
2043} 1929}
2044 1930
@@ -2051,31 +1937,48 @@ static bool freq_is_chan_12_13_14(u16 freq)
2051 return false; 1937 return false;
2052} 1938}
2053 1939
1940static bool pending_reg_beacon(struct ieee80211_channel *beacon_chan)
1941{
1942 struct reg_beacon *pending_beacon;
1943
1944 list_for_each_entry(pending_beacon, &reg_pending_beacons, list)
1945 if (beacon_chan->center_freq ==
1946 pending_beacon->chan.center_freq)
1947 return true;
1948 return false;
1949}
1950
2054int regulatory_hint_found_beacon(struct wiphy *wiphy, 1951int regulatory_hint_found_beacon(struct wiphy *wiphy,
2055 struct ieee80211_channel *beacon_chan, 1952 struct ieee80211_channel *beacon_chan,
2056 gfp_t gfp) 1953 gfp_t gfp)
2057{ 1954{
2058 struct reg_beacon *reg_beacon; 1955 struct reg_beacon *reg_beacon;
1956 bool processing;
2059 1957
2060 if (likely((beacon_chan->beacon_found || 1958 if (beacon_chan->beacon_found ||
2061 (beacon_chan->flags & IEEE80211_CHAN_RADAR) || 1959 beacon_chan->flags & IEEE80211_CHAN_RADAR ||
2062 (beacon_chan->band == IEEE80211_BAND_2GHZ && 1960 (beacon_chan->band == IEEE80211_BAND_2GHZ &&
2063 !freq_is_chan_12_13_14(beacon_chan->center_freq))))) 1961 !freq_is_chan_12_13_14(beacon_chan->center_freq)))
1962 return 0;
1963
1964 spin_lock_bh(&reg_pending_beacons_lock);
1965 processing = pending_reg_beacon(beacon_chan);
1966 spin_unlock_bh(&reg_pending_beacons_lock);
1967
1968 if (processing)
2064 return 0; 1969 return 0;
2065 1970
2066 reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp); 1971 reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp);
2067 if (!reg_beacon) 1972 if (!reg_beacon)
2068 return -ENOMEM; 1973 return -ENOMEM;
2069 1974
2070 REG_DBG_PRINT("Found new beacon on " 1975 REG_DBG_PRINT("Found new beacon on frequency: %d MHz (Ch %d) on %s\n",
2071 "frequency: %d MHz (Ch %d) on %s\n",
2072 beacon_chan->center_freq, 1976 beacon_chan->center_freq,
2073 ieee80211_frequency_to_channel(beacon_chan->center_freq), 1977 ieee80211_frequency_to_channel(beacon_chan->center_freq),
2074 wiphy_name(wiphy)); 1978 wiphy_name(wiphy));
2075 1979
2076 memcpy(&reg_beacon->chan, beacon_chan, 1980 memcpy(&reg_beacon->chan, beacon_chan,
2077 sizeof(struct ieee80211_channel)); 1981 sizeof(struct ieee80211_channel));
2078
2079 1982
2080 /* 1983 /*
2081 * Since we can be called from BH or and non-BH context 1984 * Since we can be called from BH or and non-BH context
@@ -2155,21 +2058,19 @@ static void print_dfs_region(u8 dfs_region)
2155 pr_info(" DFS Master region JP"); 2058 pr_info(" DFS Master region JP");
2156 break; 2059 break;
2157 default: 2060 default:
2158 pr_info(" DFS Master region Uknown"); 2061 pr_info(" DFS Master region Unknown");
2159 break; 2062 break;
2160 } 2063 }
2161} 2064}
2162 2065
2163static void print_regdomain(const struct ieee80211_regdomain *rd) 2066static void print_regdomain(const struct ieee80211_regdomain *rd)
2164{ 2067{
2068 struct regulatory_request *lr = get_last_request();
2165 2069
2166 if (is_intersected_alpha2(rd->alpha2)) { 2070 if (is_intersected_alpha2(rd->alpha2)) {
2167 2071 if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
2168 if (last_request->initiator ==
2169 NL80211_REGDOM_SET_BY_COUNTRY_IE) {
2170 struct cfg80211_registered_device *rdev; 2072 struct cfg80211_registered_device *rdev;
2171 rdev = cfg80211_rdev_by_wiphy_idx( 2073 rdev = cfg80211_rdev_by_wiphy_idx(lr->wiphy_idx);
2172 last_request->wiphy_idx);
2173 if (rdev) { 2074 if (rdev) {
2174 pr_info("Current regulatory domain updated by AP to: %c%c\n", 2075 pr_info("Current regulatory domain updated by AP to: %c%c\n",
2175 rdev->country_ie_alpha2[0], 2076 rdev->country_ie_alpha2[0],
@@ -2178,22 +2079,21 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
2178 pr_info("Current regulatory domain intersected:\n"); 2079 pr_info("Current regulatory domain intersected:\n");
2179 } else 2080 } else
2180 pr_info("Current regulatory domain intersected:\n"); 2081 pr_info("Current regulatory domain intersected:\n");
2181 } else if (is_world_regdom(rd->alpha2)) 2082 } else if (is_world_regdom(rd->alpha2)) {
2182 pr_info("World regulatory domain updated:\n"); 2083 pr_info("World regulatory domain updated:\n");
2183 else { 2084 } else {
2184 if (is_unknown_alpha2(rd->alpha2)) 2085 if (is_unknown_alpha2(rd->alpha2))
2185 pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n"); 2086 pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
2186 else { 2087 else {
2187 if (reg_request_cell_base(last_request)) 2088 if (reg_request_cell_base(lr))
2188 pr_info("Regulatory domain changed " 2089 pr_info("Regulatory domain changed to country: %c%c by Cell Station\n",
2189 "to country: %c%c by Cell Station\n",
2190 rd->alpha2[0], rd->alpha2[1]); 2090 rd->alpha2[0], rd->alpha2[1]);
2191 else 2091 else
2192 pr_info("Regulatory domain changed " 2092 pr_info("Regulatory domain changed to country: %c%c\n",
2193 "to country: %c%c\n",
2194 rd->alpha2[0], rd->alpha2[1]); 2093 rd->alpha2[0], rd->alpha2[1]);
2195 } 2094 }
2196 } 2095 }
2096
2197 print_dfs_region(rd->dfs_region); 2097 print_dfs_region(rd->dfs_region);
2198 print_rd_rules(rd); 2098 print_rd_rules(rd);
2199} 2099}
@@ -2207,22 +2107,23 @@ static void print_regdomain_info(const struct ieee80211_regdomain *rd)
2207/* Takes ownership of rd only if it doesn't fail */ 2107/* Takes ownership of rd only if it doesn't fail */
2208static int __set_regdom(const struct ieee80211_regdomain *rd) 2108static int __set_regdom(const struct ieee80211_regdomain *rd)
2209{ 2109{
2110 const struct ieee80211_regdomain *regd;
2210 const struct ieee80211_regdomain *intersected_rd = NULL; 2111 const struct ieee80211_regdomain *intersected_rd = NULL;
2211 struct wiphy *request_wiphy; 2112 struct wiphy *request_wiphy;
2113 struct regulatory_request *lr = get_last_request();
2114
2212 /* Some basic sanity checks first */ 2115 /* Some basic sanity checks first */
2213 2116
2117 if (!reg_is_valid_request(rd->alpha2))
2118 return -EINVAL;
2119
2214 if (is_world_regdom(rd->alpha2)) { 2120 if (is_world_regdom(rd->alpha2)) {
2215 if (WARN_ON(!reg_is_valid_request(rd->alpha2)))
2216 return -EINVAL;
2217 update_world_regdomain(rd); 2121 update_world_regdomain(rd);
2218 return 0; 2122 return 0;
2219 } 2123 }
2220 2124
2221 if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) && 2125 if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) &&
2222 !is_unknown_alpha2(rd->alpha2)) 2126 !is_unknown_alpha2(rd->alpha2))
2223 return -EINVAL;
2224
2225 if (!last_request)
2226 return -EINVAL; 2127 return -EINVAL;
2227 2128
2228 /* 2129 /*
@@ -2230,7 +2131,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2230 * rd is non static (it means CRDA was present and was used last) 2131 * rd is non static (it means CRDA was present and was used last)
2231 * and the pending request came in from a country IE 2132 * and the pending request came in from a country IE
2232 */ 2133 */
2233 if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { 2134 if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
2234 /* 2135 /*
2235 * If someone else asked us to change the rd lets only bother 2136 * If someone else asked us to change the rd lets only bother
2236 * checking if the alpha2 changes if CRDA was already called 2137 * checking if the alpha2 changes if CRDA was already called
@@ -2246,29 +2147,23 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2246 * internal EEPROM data 2147 * internal EEPROM data
2247 */ 2148 */
2248 2149
2249 if (WARN_ON(!reg_is_valid_request(rd->alpha2)))
2250 return -EINVAL;
2251
2252 if (!is_valid_rd(rd)) { 2150 if (!is_valid_rd(rd)) {
2253 pr_err("Invalid regulatory domain detected:\n"); 2151 pr_err("Invalid regulatory domain detected:\n");
2254 print_regdomain_info(rd); 2152 print_regdomain_info(rd);
2255 return -EINVAL; 2153 return -EINVAL;
2256 } 2154 }
2257 2155
2258 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 2156 request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
2259 if (!request_wiphy && 2157 if (!request_wiphy &&
2260 (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || 2158 (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
2261 last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) { 2159 lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
2262 schedule_delayed_work(&reg_timeout, 0); 2160 schedule_delayed_work(&reg_timeout, 0);
2263 return -ENODEV; 2161 return -ENODEV;
2264 } 2162 }
2265 2163
2266 if (!last_request->intersect) { 2164 if (!lr->intersect) {
2267 int r; 2165 if (lr->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
2268 2166 reset_regdomains(false, rd);
2269 if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
2270 reset_regdomains(false);
2271 cfg80211_regdomain = rd;
2272 return 0; 2167 return 0;
2273 } 2168 }
2274 2169
@@ -2284,20 +2179,19 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2284 if (request_wiphy->regd) 2179 if (request_wiphy->regd)
2285 return -EALREADY; 2180 return -EALREADY;
2286 2181
2287 r = reg_copy_regd(&request_wiphy->regd, rd); 2182 regd = reg_copy_regd(rd);
2288 if (r) 2183 if (IS_ERR(regd))
2289 return r; 2184 return PTR_ERR(regd);
2290 2185
2291 reset_regdomains(false); 2186 rcu_assign_pointer(request_wiphy->regd, regd);
2292 cfg80211_regdomain = rd; 2187 reset_regdomains(false, rd);
2293 return 0; 2188 return 0;
2294 } 2189 }
2295 2190
2296 /* Intersection requires a bit more work */ 2191 /* Intersection requires a bit more work */
2297 2192
2298 if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { 2193 if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
2299 2194 intersected_rd = regdom_intersect(rd, get_cfg80211_regdom());
2300 intersected_rd = regdom_intersect(rd, cfg80211_regdomain);
2301 if (!intersected_rd) 2195 if (!intersected_rd)
2302 return -EINVAL; 2196 return -EINVAL;
2303 2197
@@ -2306,15 +2200,19 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2306 * However if a driver requested this specific regulatory 2200 * However if a driver requested this specific regulatory
2307 * domain we keep it for its private use 2201 * domain we keep it for its private use
2308 */ 2202 */
2309 if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) 2203 if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
2310 request_wiphy->regd = rd; 2204 const struct ieee80211_regdomain *tmp;
2311 else 2205
2206 tmp = get_wiphy_regdom(request_wiphy);
2207 rcu_assign_pointer(request_wiphy->regd, rd);
2208 rcu_free_regdom(tmp);
2209 } else {
2312 kfree(rd); 2210 kfree(rd);
2211 }
2313 2212
2314 rd = NULL; 2213 rd = NULL;
2315 2214
2316 reset_regdomains(false); 2215 reset_regdomains(false, intersected_rd);
2317 cfg80211_regdomain = intersected_rd;
2318 2216
2319 return 0; 2217 return 0;
2320 } 2218 }
@@ -2326,15 +2224,15 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2326/* 2224/*
2327 * Use this call to set the current regulatory domain. Conflicts with 2225 * Use this call to set the current regulatory domain. Conflicts with
2328 * multiple drivers can be ironed out later. Caller must've already 2226 * multiple drivers can be ironed out later. Caller must've already
2329 * kmalloc'd the rd structure. Caller must hold cfg80211_mutex 2227 * kmalloc'd the rd structure.
2330 */ 2228 */
2331int set_regdom(const struct ieee80211_regdomain *rd) 2229int set_regdom(const struct ieee80211_regdomain *rd)
2332{ 2230{
2231 struct regulatory_request *lr;
2333 int r; 2232 int r;
2334 2233
2335 assert_cfg80211_lock();
2336
2337 mutex_lock(&reg_mutex); 2234 mutex_lock(&reg_mutex);
2235 lr = get_last_request();
2338 2236
2339 /* Note that this doesn't update the wiphys, this is done below */ 2237 /* Note that this doesn't update the wiphys, this is done below */
2340 r = __set_regdom(rd); 2238 r = __set_regdom(rd);
@@ -2343,51 +2241,52 @@ int set_regdom(const struct ieee80211_regdomain *rd)
2343 reg_set_request_processed(); 2241 reg_set_request_processed();
2344 2242
2345 kfree(rd); 2243 kfree(rd);
2346 mutex_unlock(&reg_mutex); 2244 goto out;
2347 return r;
2348 } 2245 }
2349 2246
2350 /* This would make this whole thing pointless */ 2247 /* This would make this whole thing pointless */
2351 if (!last_request->intersect) 2248 if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom())) {
2352 BUG_ON(rd != cfg80211_regdomain); 2249 r = -EINVAL;
2250 goto out;
2251 }
2353 2252
2354 /* update all wiphys now with the new established regulatory domain */ 2253 /* update all wiphys now with the new established regulatory domain */
2355 update_all_wiphy_regulatory(last_request->initiator); 2254 update_all_wiphy_regulatory(lr->initiator);
2356 2255
2357 print_regdomain(cfg80211_regdomain); 2256 print_regdomain(get_cfg80211_regdom());
2358 2257
2359 nl80211_send_reg_change_event(last_request); 2258 nl80211_send_reg_change_event(lr);
2360 2259
2361 reg_set_request_processed(); 2260 reg_set_request_processed();
2362 2261
2262 out:
2363 mutex_unlock(&reg_mutex); 2263 mutex_unlock(&reg_mutex);
2364 2264
2365 return r; 2265 return r;
2366} 2266}
2367 2267
2368#ifdef CONFIG_HOTPLUG
2369int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) 2268int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
2370{ 2269{
2371 if (last_request && !last_request->processed) { 2270 struct regulatory_request *lr;
2372 if (add_uevent_var(env, "COUNTRY=%c%c", 2271 u8 alpha2[2];
2373 last_request->alpha2[0], 2272 bool add = false;
2374 last_request->alpha2[1])) 2273
2375 return -ENOMEM; 2274 rcu_read_lock();
2275 lr = get_last_request();
2276 if (lr && !lr->processed) {
2277 memcpy(alpha2, lr->alpha2, 2);
2278 add = true;
2376 } 2279 }
2280 rcu_read_unlock();
2377 2281
2282 if (add)
2283 return add_uevent_var(env, "COUNTRY=%c%c",
2284 alpha2[0], alpha2[1]);
2378 return 0; 2285 return 0;
2379} 2286}
2380#else
2381int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
2382{
2383 return -ENODEV;
2384}
2385#endif /* CONFIG_HOTPLUG */
2386 2287
2387void wiphy_regulatory_register(struct wiphy *wiphy) 2288void wiphy_regulatory_register(struct wiphy *wiphy)
2388{ 2289{
2389 assert_cfg80211_lock();
2390
2391 mutex_lock(&reg_mutex); 2290 mutex_lock(&reg_mutex);
2392 2291
2393 if (!reg_dev_ignore_cell_hint(wiphy)) 2292 if (!reg_dev_ignore_cell_hint(wiphy))
@@ -2402,32 +2301,32 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
2402void wiphy_regulatory_deregister(struct wiphy *wiphy) 2301void wiphy_regulatory_deregister(struct wiphy *wiphy)
2403{ 2302{
2404 struct wiphy *request_wiphy = NULL; 2303 struct wiphy *request_wiphy = NULL;
2405 2304 struct regulatory_request *lr;
2406 assert_cfg80211_lock();
2407 2305
2408 mutex_lock(&reg_mutex); 2306 mutex_lock(&reg_mutex);
2307 lr = get_last_request();
2409 2308
2410 if (!reg_dev_ignore_cell_hint(wiphy)) 2309 if (!reg_dev_ignore_cell_hint(wiphy))
2411 reg_num_devs_support_basehint--; 2310 reg_num_devs_support_basehint--;
2412 2311
2413 kfree(wiphy->regd); 2312 rcu_free_regdom(get_wiphy_regdom(wiphy));
2313 rcu_assign_pointer(wiphy->regd, NULL);
2414 2314
2415 if (last_request) 2315 if (lr)
2416 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 2316 request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
2417 2317
2418 if (!request_wiphy || request_wiphy != wiphy) 2318 if (!request_wiphy || request_wiphy != wiphy)
2419 goto out; 2319 goto out;
2420 2320
2421 last_request->wiphy_idx = WIPHY_IDX_STALE; 2321 lr->wiphy_idx = WIPHY_IDX_INVALID;
2422 last_request->country_ie_env = ENVIRON_ANY; 2322 lr->country_ie_env = ENVIRON_ANY;
2423out: 2323out:
2424 mutex_unlock(&reg_mutex); 2324 mutex_unlock(&reg_mutex);
2425} 2325}
2426 2326
2427static void reg_timeout_work(struct work_struct *work) 2327static void reg_timeout_work(struct work_struct *work)
2428{ 2328{
2429 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, " 2329 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
2430 "restoring regulatory settings\n");
2431 restore_regulatory_settings(true); 2330 restore_regulatory_settings(true);
2432} 2331}
2433 2332
@@ -2446,13 +2345,13 @@ int __init regulatory_init(void)
2446 2345
2447 reg_regdb_size_check(); 2346 reg_regdb_size_check();
2448 2347
2449 cfg80211_regdomain = cfg80211_world_regdom; 2348 rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
2450 2349
2451 user_alpha2[0] = '9'; 2350 user_alpha2[0] = '9';
2452 user_alpha2[1] = '7'; 2351 user_alpha2[1] = '7';
2453 2352
2454 /* We always try to get an update for the static regdomain */ 2353 /* We always try to get an update for the static regdomain */
2455 err = regulatory_hint_core(cfg80211_regdomain->alpha2); 2354 err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
2456 if (err) { 2355 if (err) {
2457 if (err == -ENOMEM) 2356 if (err == -ENOMEM)
2458 return err; 2357 return err;
@@ -2464,10 +2363,6 @@ int __init regulatory_init(void)
2464 * errors as non-fatal. 2363 * errors as non-fatal.
2465 */ 2364 */
2466 pr_err("kobject_uevent_env() was unable to call CRDA during init\n"); 2365 pr_err("kobject_uevent_env() was unable to call CRDA during init\n");
2467#ifdef CONFIG_CFG80211_REG_DEBUG
2468 /* We want to find out exactly why when debugging */
2469 WARN_ON(err);
2470#endif
2471 } 2366 }
2472 2367
2473 /* 2368 /*
@@ -2481,7 +2376,7 @@ int __init regulatory_init(void)
2481 return 0; 2376 return 0;
2482} 2377}
2483 2378
2484void /* __init_or_exit */ regulatory_exit(void) 2379void regulatory_exit(void)
2485{ 2380{
2486 struct regulatory_request *reg_request, *tmp; 2381 struct regulatory_request *reg_request, *tmp;
2487 struct reg_beacon *reg_beacon, *btmp; 2382 struct reg_beacon *reg_beacon, *btmp;
@@ -2489,43 +2384,27 @@ void /* __init_or_exit */ regulatory_exit(void)
2489 cancel_work_sync(&reg_work); 2384 cancel_work_sync(&reg_work);
2490 cancel_delayed_work_sync(&reg_timeout); 2385 cancel_delayed_work_sync(&reg_timeout);
2491 2386
2492 mutex_lock(&cfg80211_mutex); 2387 /* Lock to suppress warnings */
2493 mutex_lock(&reg_mutex); 2388 mutex_lock(&reg_mutex);
2494 2389 reset_regdomains(true, NULL);
2495 reset_regdomains(true); 2390 mutex_unlock(&reg_mutex);
2496 2391
2497 dev_set_uevent_suppress(&reg_pdev->dev, true); 2392 dev_set_uevent_suppress(&reg_pdev->dev, true);
2498 2393
2499 platform_device_unregister(reg_pdev); 2394 platform_device_unregister(reg_pdev);
2500 2395
2501 spin_lock_bh(&reg_pending_beacons_lock); 2396 list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) {
2502 if (!list_empty(&reg_pending_beacons)) { 2397 list_del(&reg_beacon->list);
2503 list_for_each_entry_safe(reg_beacon, btmp, 2398 kfree(reg_beacon);
2504 &reg_pending_beacons, list) {
2505 list_del(&reg_beacon->list);
2506 kfree(reg_beacon);
2507 }
2508 } 2399 }
2509 spin_unlock_bh(&reg_pending_beacons_lock);
2510 2400
2511 if (!list_empty(&reg_beacon_list)) { 2401 list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) {
2512 list_for_each_entry_safe(reg_beacon, btmp, 2402 list_del(&reg_beacon->list);
2513 &reg_beacon_list, list) { 2403 kfree(reg_beacon);
2514 list_del(&reg_beacon->list);
2515 kfree(reg_beacon);
2516 }
2517 } 2404 }
2518 2405
2519 spin_lock(&reg_requests_lock); 2406 list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) {
2520 if (!list_empty(&reg_requests_list)) { 2407 list_del(&reg_request->list);
2521 list_for_each_entry_safe(reg_request, tmp, 2408 kfree(reg_request);
2522 &reg_requests_list, list) {
2523 list_del(&reg_request->list);
2524 kfree(reg_request);
2525 }
2526 } 2409 }
2527 spin_unlock(&reg_requests_lock);
2528
2529 mutex_unlock(&reg_mutex);
2530 mutex_unlock(&cfg80211_mutex);
2531} 2410}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 4c0a32ffd530..af2d5f8a5d82 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -16,10 +16,9 @@
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */ 17 */
18 18
19extern const struct ieee80211_regdomain *cfg80211_regdomain; 19extern const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
20 20
21bool is_world_regdom(const char *alpha2); 21bool is_world_regdom(const char *alpha2);
22bool reg_is_valid_request(const char *alpha2);
23bool reg_supported_dfs_region(u8 dfs_region); 22bool reg_supported_dfs_region(u8 dfs_region);
24 23
25int regulatory_hint_user(const char *alpha2, 24int regulatory_hint_user(const char *alpha2,
@@ -55,8 +54,8 @@ bool reg_last_request_cell_base(void);
55 * set the wiphy->disable_beacon_hints to true. 54 * set the wiphy->disable_beacon_hints to true.
56 */ 55 */
57int regulatory_hint_found_beacon(struct wiphy *wiphy, 56int regulatory_hint_found_beacon(struct wiphy *wiphy,
58 struct ieee80211_channel *beacon_chan, 57 struct ieee80211_channel *beacon_chan,
59 gfp_t gfp); 58 gfp_t gfp);
60 59
61/** 60/**
62 * regulatory_hint_11d - hints a country IE as a regulatory domain 61 * regulatory_hint_11d - hints a country IE as a regulatory domain
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 01592d7d4789..674aadca0079 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -19,55 +19,142 @@
19#include "wext-compat.h" 19#include "wext-compat.h"
20#include "rdev-ops.h" 20#include "rdev-ops.h"
21 21
22/**
23 * DOC: BSS tree/list structure
24 *
25 * At the top level, the BSS list is kept in both a list in each
26 * registered device (@bss_list) as well as an RB-tree for faster
27 * lookup. In the RB-tree, entries can be looked up using their
28 * channel, MESHID, MESHCONF (for MBSSes) or channel, BSSID, SSID
29 * for other BSSes.
30 *
31 * Due to the possibility of hidden SSIDs, there's a second level
32 * structure, the "hidden_list" and "hidden_beacon_bss" pointer.
33 * The hidden_list connects all BSSes belonging to a single AP
34 * that has a hidden SSID, and connects beacon and probe response
35 * entries. For a probe response entry for a hidden SSID, the
36 * hidden_beacon_bss pointer points to the BSS struct holding the
37 * beacon's information.
38 *
39 * Reference counting is done for all these references except for
40 * the hidden_list, so that a beacon BSS struct that is otherwise
41 * not referenced has one reference for being on the bss_list and
42 * one for each probe response entry that points to it using the
43 * hidden_beacon_bss pointer. When a BSS struct that has such a
44 * pointer is get/put, the refcount update is also propagated to
45 * the referenced struct, this ensure that it cannot get removed
46 * while somebody is using the probe response version.
47 *
48 * Note that the hidden_beacon_bss pointer never changes, due to
49 * the reference counting. Therefore, no locking is needed for
50 * it.
51 *
52 * Also note that the hidden_beacon_bss pointer is only relevant
53 * if the driver uses something other than the IEs, e.g. private
54 * data stored stored in the BSS struct, since the beacon IEs are
55 * also linked into the probe response struct.
56 */
57
22#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) 58#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
23 59
24static void bss_release(struct kref *ref) 60static void bss_free(struct cfg80211_internal_bss *bss)
25{ 61{
26 struct cfg80211_bss_ies *ies; 62 struct cfg80211_bss_ies *ies;
27 struct cfg80211_internal_bss *bss;
28
29 bss = container_of(ref, struct cfg80211_internal_bss, ref);
30 63
31 if (WARN_ON(atomic_read(&bss->hold))) 64 if (WARN_ON(atomic_read(&bss->hold)))
32 return; 65 return;
33 66
34 if (bss->pub.free_priv)
35 bss->pub.free_priv(&bss->pub);
36
37 ies = (void *)rcu_access_pointer(bss->pub.beacon_ies); 67 ies = (void *)rcu_access_pointer(bss->pub.beacon_ies);
38 if (ies) 68 if (ies && !bss->pub.hidden_beacon_bss)
39 kfree_rcu(ies, rcu_head); 69 kfree_rcu(ies, rcu_head);
40 ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies); 70 ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies);
41 if (ies) 71 if (ies)
42 kfree_rcu(ies, rcu_head); 72 kfree_rcu(ies, rcu_head);
43 73
74 /*
75 * This happens when the module is removed, it doesn't
76 * really matter any more save for completeness
77 */
78 if (!list_empty(&bss->hidden_list))
79 list_del(&bss->hidden_list);
80
44 kfree(bss); 81 kfree(bss);
45} 82}
46 83
47/* must hold dev->bss_lock! */ 84static inline void bss_ref_get(struct cfg80211_registered_device *dev,
48static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev, 85 struct cfg80211_internal_bss *bss)
86{
87 lockdep_assert_held(&dev->bss_lock);
88
89 bss->refcount++;
90 if (bss->pub.hidden_beacon_bss) {
91 bss = container_of(bss->pub.hidden_beacon_bss,
92 struct cfg80211_internal_bss,
93 pub);
94 bss->refcount++;
95 }
96}
97
98static inline void bss_ref_put(struct cfg80211_registered_device *dev,
99 struct cfg80211_internal_bss *bss)
100{
101 lockdep_assert_held(&dev->bss_lock);
102
103 if (bss->pub.hidden_beacon_bss) {
104 struct cfg80211_internal_bss *hbss;
105 hbss = container_of(bss->pub.hidden_beacon_bss,
106 struct cfg80211_internal_bss,
107 pub);
108 hbss->refcount--;
109 if (hbss->refcount == 0)
110 bss_free(hbss);
111 }
112 bss->refcount--;
113 if (bss->refcount == 0)
114 bss_free(bss);
115}
116
117static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
49 struct cfg80211_internal_bss *bss) 118 struct cfg80211_internal_bss *bss)
50{ 119{
120 lockdep_assert_held(&dev->bss_lock);
121
122 if (!list_empty(&bss->hidden_list)) {
123 /*
124 * don't remove the beacon entry if it has
125 * probe responses associated with it
126 */
127 if (!bss->pub.hidden_beacon_bss)
128 return false;
129 /*
130 * if it's a probe response entry break its
131 * link to the other entries in the group
132 */
133 list_del_init(&bss->hidden_list);
134 }
135
51 list_del_init(&bss->list); 136 list_del_init(&bss->list);
52 rb_erase(&bss->rbn, &dev->bss_tree); 137 rb_erase(&bss->rbn, &dev->bss_tree);
53 kref_put(&bss->ref, bss_release); 138 bss_ref_put(dev, bss);
139 return true;
54} 140}
55 141
56/* must hold dev->bss_lock! */
57static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev, 142static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
58 unsigned long expire_time) 143 unsigned long expire_time)
59{ 144{
60 struct cfg80211_internal_bss *bss, *tmp; 145 struct cfg80211_internal_bss *bss, *tmp;
61 bool expired = false; 146 bool expired = false;
62 147
148 lockdep_assert_held(&dev->bss_lock);
149
63 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { 150 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
64 if (atomic_read(&bss->hold)) 151 if (atomic_read(&bss->hold))
65 continue; 152 continue;
66 if (!time_after(expire_time, bss->ts)) 153 if (!time_after(expire_time, bss->ts))
67 continue; 154 continue;
68 155
69 __cfg80211_unlink_bss(dev, bss); 156 if (__cfg80211_unlink_bss(dev, bss))
70 expired = true; 157 expired = true;
71 } 158 }
72 159
73 if (expired) 160 if (expired)
@@ -234,15 +321,16 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
234 return 0; 321 return 0;
235} 322}
236 323
237/* must hold dev->bss_lock! */
238void cfg80211_bss_age(struct cfg80211_registered_device *dev, 324void cfg80211_bss_age(struct cfg80211_registered_device *dev,
239 unsigned long age_secs) 325 unsigned long age_secs)
240{ 326{
241 struct cfg80211_internal_bss *bss; 327 struct cfg80211_internal_bss *bss;
242 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); 328 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
243 329
330 spin_lock_bh(&dev->bss_lock);
244 list_for_each_entry(bss, &dev->bss_list, list) 331 list_for_each_entry(bss, &dev->bss_list, list)
245 bss->ts -= age_jiffies; 332 bss->ts -= age_jiffies;
333 spin_unlock_bh(&dev->bss_lock);
246} 334}
247 335
248void cfg80211_bss_expire(struct cfg80211_registered_device *dev) 336void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
@@ -277,40 +365,24 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
277 if (!pos) 365 if (!pos)
278 return NULL; 366 return NULL;
279 367
280 if (end - pos < sizeof(*ie))
281 return NULL;
282
283 ie = (struct ieee80211_vendor_ie *)pos; 368 ie = (struct ieee80211_vendor_ie *)pos;
369
370 /* make sure we can access ie->len */
371 BUILD_BUG_ON(offsetof(struct ieee80211_vendor_ie, len) != 1);
372
373 if (ie->len < sizeof(*ie))
374 goto cont;
375
284 ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2]; 376 ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
285 if (ie_oui == oui && ie->oui_type == oui_type) 377 if (ie_oui == oui && ie->oui_type == oui_type)
286 return pos; 378 return pos;
287 379cont:
288 pos += 2 + ie->len; 380 pos += 2 + ie->len;
289 } 381 }
290 return NULL; 382 return NULL;
291} 383}
292EXPORT_SYMBOL(cfg80211_find_vendor_ie); 384EXPORT_SYMBOL(cfg80211_find_vendor_ie);
293 385
294static int cmp_ies(u8 num, const u8 *ies1, int len1, const u8 *ies2, int len2)
295{
296 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
297 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
298
299 /* equal if both missing */
300 if (!ie1 && !ie2)
301 return 0;
302 /* sort missing IE before (left of) present IE */
303 if (!ie1)
304 return -1;
305 if (!ie2)
306 return 1;
307
308 /* sort by length first, then by contents */
309 if (ie1[1] != ie2[1])
310 return ie2[1] - ie1[1];
311 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
312}
313
314static bool is_bss(struct cfg80211_bss *a, const u8 *bssid, 386static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
315 const u8 *ssid, size_t ssid_len) 387 const u8 *ssid, size_t ssid_len)
316{ 388{
@@ -334,109 +406,30 @@ static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
334 return memcmp(ssidie + 2, ssid, ssid_len) == 0; 406 return memcmp(ssidie + 2, ssid, ssid_len) == 0;
335} 407}
336 408
337static bool is_mesh_bss(struct cfg80211_bss *a) 409/**
338{ 410 * enum bss_compare_mode - BSS compare mode
339 const struct cfg80211_bss_ies *ies; 411 * @BSS_CMP_REGULAR: regular compare mode (for insertion and normal find)
340 const u8 *ie; 412 * @BSS_CMP_HIDE_ZLEN: find hidden SSID with zero-length mode
341 413 * @BSS_CMP_HIDE_NUL: find hidden SSID with NUL-ed out mode
342 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability)) 414 */
343 return false; 415enum bss_compare_mode {
344 416 BSS_CMP_REGULAR,
345 ies = rcu_access_pointer(a->ies); 417 BSS_CMP_HIDE_ZLEN,
346 if (!ies) 418 BSS_CMP_HIDE_NUL,
347 return false; 419};
348
349 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, ies->data, ies->len);
350 if (!ie)
351 return false;
352
353 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, ies->data, ies->len);
354 if (!ie)
355 return false;
356
357 return true;
358}
359
360static bool is_mesh(struct cfg80211_bss *a,
361 const u8 *meshid, size_t meshidlen,
362 const u8 *meshcfg)
363{
364 const struct cfg80211_bss_ies *ies;
365 const u8 *ie;
366
367 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
368 return false;
369
370 ies = rcu_access_pointer(a->ies);
371 if (!ies)
372 return false;
373
374 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, ies->data, ies->len);
375 if (!ie)
376 return false;
377 if (ie[1] != meshidlen)
378 return false;
379 if (memcmp(ie + 2, meshid, meshidlen))
380 return false;
381
382 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, ies->data, ies->len);
383 if (!ie)
384 return false;
385 if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
386 return false;
387
388 /*
389 * Ignore mesh capability (last two bytes of the IE) when
390 * comparing since that may differ between stations taking
391 * part in the same mesh.
392 */
393 return memcmp(ie + 2, meshcfg,
394 sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
395}
396 420
397static int cmp_bss_core(struct cfg80211_bss *a, struct cfg80211_bss *b) 421static int cmp_bss(struct cfg80211_bss *a,
422 struct cfg80211_bss *b,
423 enum bss_compare_mode mode)
398{ 424{
399 const struct cfg80211_bss_ies *a_ies, *b_ies; 425 const struct cfg80211_bss_ies *a_ies, *b_ies;
400 int r; 426 const u8 *ie1 = NULL;
427 const u8 *ie2 = NULL;
428 int i, r;
401 429
402 if (a->channel != b->channel) 430 if (a->channel != b->channel)
403 return b->channel->center_freq - a->channel->center_freq; 431 return b->channel->center_freq - a->channel->center_freq;
404 432
405 if (is_mesh_bss(a) && is_mesh_bss(b)) {
406 a_ies = rcu_access_pointer(a->ies);
407 if (!a_ies)
408 return -1;
409 b_ies = rcu_access_pointer(b->ies);
410 if (!b_ies)
411 return 1;
412
413 r = cmp_ies(WLAN_EID_MESH_ID,
414 a_ies->data, a_ies->len,
415 b_ies->data, b_ies->len);
416 if (r)
417 return r;
418 return cmp_ies(WLAN_EID_MESH_CONFIG,
419 a_ies->data, a_ies->len,
420 b_ies->data, b_ies->len);
421 }
422
423 /*
424 * we can't use compare_ether_addr here since we need a < > operator.
425 * The binary return value of compare_ether_addr isn't enough
426 */
427 return memcmp(a->bssid, b->bssid, sizeof(a->bssid));
428}
429
430static int cmp_bss(struct cfg80211_bss *a,
431 struct cfg80211_bss *b)
432{
433 const struct cfg80211_bss_ies *a_ies, *b_ies;
434 int r;
435
436 r = cmp_bss_core(a, b);
437 if (r)
438 return r;
439
440 a_ies = rcu_access_pointer(a->ies); 433 a_ies = rcu_access_pointer(a->ies);
441 if (!a_ies) 434 if (!a_ies)
442 return -1; 435 return -1;
@@ -444,42 +437,51 @@ static int cmp_bss(struct cfg80211_bss *a,
444 if (!b_ies) 437 if (!b_ies)
445 return 1; 438 return 1;
446 439
447 return cmp_ies(WLAN_EID_SSID, 440 if (WLAN_CAPABILITY_IS_STA_BSS(a->capability))
448 a_ies->data, a_ies->len, 441 ie1 = cfg80211_find_ie(WLAN_EID_MESH_ID,
449 b_ies->data, b_ies->len); 442 a_ies->data, a_ies->len);
450} 443 if (WLAN_CAPABILITY_IS_STA_BSS(b->capability))
451 444 ie2 = cfg80211_find_ie(WLAN_EID_MESH_ID,
452static int cmp_hidden_bss(struct cfg80211_bss *a, struct cfg80211_bss *b) 445 b_ies->data, b_ies->len);
453{ 446 if (ie1 && ie2) {
454 const struct cfg80211_bss_ies *a_ies, *b_ies; 447 int mesh_id_cmp;
455 const u8 *ie1; 448
456 const u8 *ie2; 449 if (ie1[1] == ie2[1])
457 int i; 450 mesh_id_cmp = memcmp(ie1 + 2, ie2 + 2, ie1[1]);
458 int r; 451 else
452 mesh_id_cmp = ie2[1] - ie1[1];
453
454 ie1 = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
455 a_ies->data, a_ies->len);
456 ie2 = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
457 b_ies->data, b_ies->len);
458 if (ie1 && ie2) {
459 if (mesh_id_cmp)
460 return mesh_id_cmp;
461 if (ie1[1] != ie2[1])
462 return ie2[1] - ie1[1];
463 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
464 }
465 }
459 466
460 r = cmp_bss_core(a, b); 467 /*
468 * we can't use compare_ether_addr here since we need a < > operator.
469 * The binary return value of compare_ether_addr isn't enough
470 */
471 r = memcmp(a->bssid, b->bssid, sizeof(a->bssid));
461 if (r) 472 if (r)
462 return r; 473 return r;
463 474
464 a_ies = rcu_access_pointer(a->ies);
465 if (!a_ies)
466 return -1;
467 b_ies = rcu_access_pointer(b->ies);
468 if (!b_ies)
469 return 1;
470
471 ie1 = cfg80211_find_ie(WLAN_EID_SSID, a_ies->data, a_ies->len); 475 ie1 = cfg80211_find_ie(WLAN_EID_SSID, a_ies->data, a_ies->len);
472 ie2 = cfg80211_find_ie(WLAN_EID_SSID, b_ies->data, b_ies->len); 476 ie2 = cfg80211_find_ie(WLAN_EID_SSID, b_ies->data, b_ies->len);
473 477
478 if (!ie1 && !ie2)
479 return 0;
480
474 /* 481 /*
475 * Key comparator must use same algorithm in any rb-tree 482 * Note that with "hide_ssid", the function returns a match if
476 * search function (order is important), otherwise ordering 483 * the already-present BSS ("b") is a hidden SSID beacon for
477 * of items in the tree is broken and search gives incorrect 484 * the new BSS ("a").
478 * results. This code uses same order as cmp_ies() does.
479 *
480 * Note that due to the differring behaviour with hidden SSIDs
481 * this function only works when "b" is the tree element and
482 * "a" is the key we're looking for.
483 */ 485 */
484 486
485 /* sort missing IE before (left of) present IE */ 487 /* sort missing IE before (left of) present IE */
@@ -488,24 +490,36 @@ static int cmp_hidden_bss(struct cfg80211_bss *a, struct cfg80211_bss *b)
488 if (!ie2) 490 if (!ie2)
489 return 1; 491 return 1;
490 492
491 /* zero-size SSID is used as an indication of the hidden bss */ 493 switch (mode) {
492 if (!ie2[1]) 494 case BSS_CMP_HIDE_ZLEN:
495 /*
496 * In ZLEN mode we assume the BSS entry we're
497 * looking for has a zero-length SSID. So if
498 * the one we're looking at right now has that,
499 * return 0. Otherwise, return the difference
500 * in length, but since we're looking for the
501 * 0-length it's really equivalent to returning
502 * the length of the one we're looking at.
503 *
504 * No content comparison is needed as we assume
505 * the content length is zero.
506 */
507 return ie2[1];
508 case BSS_CMP_REGULAR:
509 default:
510 /* sort by length first, then by contents */
511 if (ie1[1] != ie2[1])
512 return ie2[1] - ie1[1];
513 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
514 case BSS_CMP_HIDE_NUL:
515 if (ie1[1] != ie2[1])
516 return ie2[1] - ie1[1];
517 /* this is equivalent to memcmp(zeroes, ie2 + 2, len) */
518 for (i = 0; i < ie2[1]; i++)
519 if (ie2[i + 2])
520 return -1;
493 return 0; 521 return 0;
494 522 }
495 /* sort by length first, then by contents */
496 if (ie1[1] != ie2[1])
497 return ie2[1] - ie1[1];
498
499 /*
500 * zeroed SSID ie is another indication of a hidden bss;
501 * if it isn't zeroed just return the regular sort value
502 * to find the next candidate
503 */
504 for (i = 0; i < ie2[1]; i++)
505 if (ie2[i + 2])
506 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
507
508 return 0;
509} 523}
510 524
511struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, 525struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
@@ -534,7 +548,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
534 continue; 548 continue;
535 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { 549 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
536 res = bss; 550 res = bss;
537 kref_get(&res->ref); 551 bss_ref_get(dev, res);
538 break; 552 break;
539 } 553 }
540 } 554 }
@@ -547,34 +561,6 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
547} 561}
548EXPORT_SYMBOL(cfg80211_get_bss); 562EXPORT_SYMBOL(cfg80211_get_bss);
549 563
550struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy,
551 struct ieee80211_channel *channel,
552 const u8 *meshid, size_t meshidlen,
553 const u8 *meshcfg)
554{
555 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
556 struct cfg80211_internal_bss *bss, *res = NULL;
557
558 spin_lock_bh(&dev->bss_lock);
559
560 list_for_each_entry(bss, &dev->bss_list, list) {
561 if (channel && bss->pub.channel != channel)
562 continue;
563 if (is_mesh(&bss->pub, meshid, meshidlen, meshcfg)) {
564 res = bss;
565 kref_get(&res->ref);
566 break;
567 }
568 }
569
570 spin_unlock_bh(&dev->bss_lock);
571 if (!res)
572 return NULL;
573 return &res->pub;
574}
575EXPORT_SYMBOL(cfg80211_get_mesh);
576
577
578static void rb_insert_bss(struct cfg80211_registered_device *dev, 564static void rb_insert_bss(struct cfg80211_registered_device *dev,
579 struct cfg80211_internal_bss *bss) 565 struct cfg80211_internal_bss *bss)
580{ 566{
@@ -587,7 +573,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
587 parent = *p; 573 parent = *p;
588 tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn); 574 tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn);
589 575
590 cmp = cmp_bss(&bss->pub, &tbss->pub); 576 cmp = cmp_bss(&bss->pub, &tbss->pub, BSS_CMP_REGULAR);
591 577
592 if (WARN_ON(!cmp)) { 578 if (WARN_ON(!cmp)) {
593 /* will sort of leak this BSS */ 579 /* will sort of leak this BSS */
@@ -606,7 +592,8 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
606 592
607static struct cfg80211_internal_bss * 593static struct cfg80211_internal_bss *
608rb_find_bss(struct cfg80211_registered_device *dev, 594rb_find_bss(struct cfg80211_registered_device *dev,
609 struct cfg80211_internal_bss *res) 595 struct cfg80211_internal_bss *res,
596 enum bss_compare_mode mode)
610{ 597{
611 struct rb_node *n = dev->bss_tree.rb_node; 598 struct rb_node *n = dev->bss_tree.rb_node;
612 struct cfg80211_internal_bss *bss; 599 struct cfg80211_internal_bss *bss;
@@ -614,7 +601,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
614 601
615 while (n) { 602 while (n) {
616 bss = rb_entry(n, struct cfg80211_internal_bss, rbn); 603 bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
617 r = cmp_bss(&res->pub, &bss->pub); 604 r = cmp_bss(&res->pub, &bss->pub, mode);
618 605
619 if (r == 0) 606 if (r == 0)
620 return bss; 607 return bss;
@@ -627,46 +614,67 @@ rb_find_bss(struct cfg80211_registered_device *dev,
627 return NULL; 614 return NULL;
628} 615}
629 616
630static struct cfg80211_internal_bss * 617static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
631rb_find_hidden_bss(struct cfg80211_registered_device *dev, 618 struct cfg80211_internal_bss *new)
632 struct cfg80211_internal_bss *res)
633{ 619{
634 struct rb_node *n = dev->bss_tree.rb_node; 620 const struct cfg80211_bss_ies *ies;
635 struct cfg80211_internal_bss *bss; 621 struct cfg80211_internal_bss *bss;
636 int r; 622 const u8 *ie;
623 int i, ssidlen;
624 u8 fold = 0;
637 625
638 while (n) { 626 ies = rcu_access_pointer(new->pub.beacon_ies);
639 bss = rb_entry(n, struct cfg80211_internal_bss, rbn); 627 if (WARN_ON(!ies))
640 r = cmp_hidden_bss(&res->pub, &bss->pub); 628 return false;
641 629
642 if (r == 0) 630 ie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
643 return bss; 631 if (!ie) {
644 else if (r < 0) 632 /* nothing to do */
645 n = n->rb_left; 633 return true;
646 else
647 n = n->rb_right;
648 } 634 }
649 635
650 return NULL; 636 ssidlen = ie[1];
651} 637 for (i = 0; i < ssidlen; i++)
638 fold |= ie[2 + i];
652 639
653static void 640 if (fold) {
654copy_hidden_ies(struct cfg80211_internal_bss *res, 641 /* not a hidden SSID */
655 struct cfg80211_internal_bss *hidden) 642 return true;
656{ 643 }
657 const struct cfg80211_bss_ies *ies;
658 644
659 if (rcu_access_pointer(res->pub.beacon_ies)) 645 /* This is the bad part ... */
660 return;
661 646
662 ies = rcu_access_pointer(hidden->pub.beacon_ies); 647 list_for_each_entry(bss, &dev->bss_list, list) {
663 if (WARN_ON(!ies)) 648 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
664 return; 649 continue;
650 if (bss->pub.channel != new->pub.channel)
651 continue;
652 if (rcu_access_pointer(bss->pub.beacon_ies))
653 continue;
654 ies = rcu_access_pointer(bss->pub.ies);
655 if (!ies)
656 continue;
657 ie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
658 if (!ie)
659 continue;
660 if (ssidlen && ie[1] != ssidlen)
661 continue;
662 /* that would be odd ... */
663 if (bss->pub.beacon_ies)
664 continue;
665 if (WARN_ON_ONCE(bss->pub.hidden_beacon_bss))
666 continue;
667 if (WARN_ON_ONCE(!list_empty(&bss->hidden_list)))
668 list_del(&bss->hidden_list);
669 /* combine them */
670 list_add(&bss->hidden_list, &new->hidden_list);
671 bss->pub.hidden_beacon_bss = &new->pub;
672 new->refcount += bss->refcount;
673 rcu_assign_pointer(bss->pub.beacon_ies,
674 new->pub.beacon_ies);
675 }
665 676
666 ies = kmemdup(ies, sizeof(*ies) + ies->len, GFP_ATOMIC); 677 return true;
667 if (unlikely(!ies))
668 return;
669 rcu_assign_pointer(res->pub.beacon_ies, ies);
670} 678}
671 679
672static struct cfg80211_internal_bss * 680static struct cfg80211_internal_bss *
@@ -687,11 +695,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
687 return NULL; 695 return NULL;
688 } 696 }
689 697
690 found = rb_find_bss(dev, tmp); 698 found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
691 699
692 if (found) { 700 if (found) {
693 found->pub.beacon_interval = tmp->pub.beacon_interval; 701 found->pub.beacon_interval = tmp->pub.beacon_interval;
694 found->pub.tsf = tmp->pub.tsf;
695 found->pub.signal = tmp->pub.signal; 702 found->pub.signal = tmp->pub.signal;
696 found->pub.capability = tmp->pub.capability; 703 found->pub.capability = tmp->pub.capability;
697 found->ts = tmp->ts; 704 found->ts = tmp->ts;
@@ -711,19 +718,45 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
711 kfree_rcu((struct cfg80211_bss_ies *)old, 718 kfree_rcu((struct cfg80211_bss_ies *)old,
712 rcu_head); 719 rcu_head);
713 } else if (rcu_access_pointer(tmp->pub.beacon_ies)) { 720 } else if (rcu_access_pointer(tmp->pub.beacon_ies)) {
714 const struct cfg80211_bss_ies *old, *ies; 721 const struct cfg80211_bss_ies *old;
722 struct cfg80211_internal_bss *bss;
723
724 if (found->pub.hidden_beacon_bss &&
725 !list_empty(&found->hidden_list)) {
726 /*
727 * The found BSS struct is one of the probe
728 * response members of a group, but we're
729 * receiving a beacon (beacon_ies in the tmp
730 * bss is used). This can only mean that the
731 * AP changed its beacon from not having an
732 * SSID to showing it, which is confusing so
733 * drop this information.
734 */
735 goto drop;
736 }
715 737
716 old = rcu_access_pointer(found->pub.beacon_ies); 738 old = rcu_access_pointer(found->pub.beacon_ies);
717 ies = rcu_access_pointer(found->pub.ies);
718 739
719 rcu_assign_pointer(found->pub.beacon_ies, 740 rcu_assign_pointer(found->pub.beacon_ies,
720 tmp->pub.beacon_ies); 741 tmp->pub.beacon_ies);
721 742
722 /* Override IEs if they were from a beacon before */ 743 /* Override IEs if they were from a beacon before */
723 if (old == ies) 744 if (old == rcu_access_pointer(found->pub.ies))
724 rcu_assign_pointer(found->pub.ies, 745 rcu_assign_pointer(found->pub.ies,
725 tmp->pub.beacon_ies); 746 tmp->pub.beacon_ies);
726 747
748 /* Assign beacon IEs to all sub entries */
749 list_for_each_entry(bss, &found->hidden_list,
750 hidden_list) {
751 const struct cfg80211_bss_ies *ies;
752
753 ies = rcu_access_pointer(bss->pub.beacon_ies);
754 WARN_ON(ies != old);
755
756 rcu_assign_pointer(bss->pub.beacon_ies,
757 tmp->pub.beacon_ies);
758 }
759
727 if (old) 760 if (old)
728 kfree_rcu((struct cfg80211_bss_ies *)old, 761 kfree_rcu((struct cfg80211_bss_ies *)old,
729 rcu_head); 762 rcu_head);
@@ -733,19 +766,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
733 struct cfg80211_internal_bss *hidden; 766 struct cfg80211_internal_bss *hidden;
734 struct cfg80211_bss_ies *ies; 767 struct cfg80211_bss_ies *ies;
735 768
736 /* First check if the beacon is a probe response from
737 * a hidden bss. If so, copy beacon ies (with nullified
738 * ssid) into the probe response bss entry (with real ssid).
739 * It is required basically for PSM implementation
740 * (probe responses do not contain tim ie) */
741
742 /* TODO: The code is not trying to update existing probe
743 * response bss entries when beacon ies are
744 * getting changed. */
745 hidden = rb_find_hidden_bss(dev, tmp);
746 if (hidden)
747 copy_hidden_ies(tmp, hidden);
748
749 /* 769 /*
750 * create a copy -- the "res" variable that is passed in 770 * create a copy -- the "res" variable that is passed in
751 * is allocated on the stack since it's not needed in the 771 * is allocated on the stack since it's not needed in the
@@ -760,21 +780,51 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
760 ies = (void *)rcu_dereference(tmp->pub.proberesp_ies); 780 ies = (void *)rcu_dereference(tmp->pub.proberesp_ies);
761 if (ies) 781 if (ies)
762 kfree_rcu(ies, rcu_head); 782 kfree_rcu(ies, rcu_head);
763 spin_unlock_bh(&dev->bss_lock); 783 goto drop;
764 return NULL;
765 } 784 }
766 memcpy(new, tmp, sizeof(*new)); 785 memcpy(new, tmp, sizeof(*new));
767 kref_init(&new->ref); 786 new->refcount = 1;
787 INIT_LIST_HEAD(&new->hidden_list);
788
789 if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
790 hidden = rb_find_bss(dev, tmp, BSS_CMP_HIDE_ZLEN);
791 if (!hidden)
792 hidden = rb_find_bss(dev, tmp,
793 BSS_CMP_HIDE_NUL);
794 if (hidden) {
795 new->pub.hidden_beacon_bss = &hidden->pub;
796 list_add(&new->hidden_list,
797 &hidden->hidden_list);
798 hidden->refcount++;
799 rcu_assign_pointer(new->pub.beacon_ies,
800 hidden->pub.beacon_ies);
801 }
802 } else {
803 /*
804 * Ok so we found a beacon, and don't have an entry. If
805 * it's a beacon with hidden SSID, we might be in for an
806 * expensive search for any probe responses that should
807 * be grouped with this beacon for updates ...
808 */
809 if (!cfg80211_combine_bsses(dev, new)) {
810 kfree(new);
811 goto drop;
812 }
813 }
814
768 list_add_tail(&new->list, &dev->bss_list); 815 list_add_tail(&new->list, &dev->bss_list);
769 rb_insert_bss(dev, new); 816 rb_insert_bss(dev, new);
770 found = new; 817 found = new;
771 } 818 }
772 819
773 dev->bss_generation++; 820 dev->bss_generation++;
821 bss_ref_get(dev, found);
774 spin_unlock_bh(&dev->bss_lock); 822 spin_unlock_bh(&dev->bss_lock);
775 823
776 kref_get(&found->ref);
777 return found; 824 return found;
825 drop:
826 spin_unlock_bh(&dev->bss_lock);
827 return NULL;
778} 828}
779 829
780static struct ieee80211_channel * 830static struct ieee80211_channel *
@@ -833,7 +883,6 @@ cfg80211_inform_bss(struct wiphy *wiphy,
833 memcpy(tmp.pub.bssid, bssid, ETH_ALEN); 883 memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
834 tmp.pub.channel = channel; 884 tmp.pub.channel = channel;
835 tmp.pub.signal = signal; 885 tmp.pub.signal = signal;
836 tmp.pub.tsf = tsf;
837 tmp.pub.beacon_interval = beacon_interval; 886 tmp.pub.beacon_interval = beacon_interval;
838 tmp.pub.capability = capability; 887 tmp.pub.capability = capability;
839 /* 888 /*
@@ -841,16 +890,14 @@ cfg80211_inform_bss(struct wiphy *wiphy,
841 * Response frame, we need to pick one of the options and only use it 890 * Response frame, we need to pick one of the options and only use it
842 * with the driver that does not provide the full Beacon/Probe Response 891 * with the driver that does not provide the full Beacon/Probe Response
843 * frame. Use Beacon frame pointer to avoid indicating that this should 892 * frame. Use Beacon frame pointer to avoid indicating that this should
844 * override the iies pointer should we have received an earlier 893 * override the IEs pointer should we have received an earlier
845 * indication of Probe Response data. 894 * indication of Probe Response data.
846 *
847 * The initial buffer for the IEs is allocated with the BSS entry and
848 * is located after the private area.
849 */ 895 */
850 ies = kmalloc(sizeof(*ies) + ielen, gfp); 896 ies = kmalloc(sizeof(*ies) + ielen, gfp);
851 if (!ies) 897 if (!ies)
852 return NULL; 898 return NULL;
853 ies->len = ielen; 899 ies->len = ielen;
900 ies->tsf = tsf;
854 memcpy(ies->data, ie, ielen); 901 memcpy(ies->data, ie, ielen);
855 902
856 rcu_assign_pointer(tmp.pub.beacon_ies, ies); 903 rcu_assign_pointer(tmp.pub.beacon_ies, ies);
@@ -907,6 +954,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
907 if (!ies) 954 if (!ies)
908 return NULL; 955 return NULL;
909 ies->len = ielen; 956 ies->len = ielen;
957 ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
910 memcpy(ies->data, mgmt->u.probe_resp.variable, ielen); 958 memcpy(ies->data, mgmt->u.probe_resp.variable, ielen);
911 959
912 if (ieee80211_is_probe_resp(mgmt->frame_control)) 960 if (ieee80211_is_probe_resp(mgmt->frame_control))
@@ -918,7 +966,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
918 memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN); 966 memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
919 tmp.pub.channel = channel; 967 tmp.pub.channel = channel;
920 tmp.pub.signal = signal; 968 tmp.pub.signal = signal;
921 tmp.pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
922 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 969 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
923 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 970 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
924 971
@@ -935,27 +982,35 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
935} 982}
936EXPORT_SYMBOL(cfg80211_inform_bss_frame); 983EXPORT_SYMBOL(cfg80211_inform_bss_frame);
937 984
938void cfg80211_ref_bss(struct cfg80211_bss *pub) 985void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
939{ 986{
987 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
940 struct cfg80211_internal_bss *bss; 988 struct cfg80211_internal_bss *bss;
941 989
942 if (!pub) 990 if (!pub)
943 return; 991 return;
944 992
945 bss = container_of(pub, struct cfg80211_internal_bss, pub); 993 bss = container_of(pub, struct cfg80211_internal_bss, pub);
946 kref_get(&bss->ref); 994
995 spin_lock_bh(&dev->bss_lock);
996 bss_ref_get(dev, bss);
997 spin_unlock_bh(&dev->bss_lock);
947} 998}
948EXPORT_SYMBOL(cfg80211_ref_bss); 999EXPORT_SYMBOL(cfg80211_ref_bss);
949 1000
950void cfg80211_put_bss(struct cfg80211_bss *pub) 1001void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
951{ 1002{
1003 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
952 struct cfg80211_internal_bss *bss; 1004 struct cfg80211_internal_bss *bss;
953 1005
954 if (!pub) 1006 if (!pub)
955 return; 1007 return;
956 1008
957 bss = container_of(pub, struct cfg80211_internal_bss, pub); 1009 bss = container_of(pub, struct cfg80211_internal_bss, pub);
958 kref_put(&bss->ref, bss_release); 1010
1011 spin_lock_bh(&dev->bss_lock);
1012 bss_ref_put(dev, bss);
1013 spin_unlock_bh(&dev->bss_lock);
959} 1014}
960EXPORT_SYMBOL(cfg80211_put_bss); 1015EXPORT_SYMBOL(cfg80211_put_bss);
961 1016
@@ -971,8 +1026,8 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
971 1026
972 spin_lock_bh(&dev->bss_lock); 1027 spin_lock_bh(&dev->bss_lock);
973 if (!list_empty(&bss->list)) { 1028 if (!list_empty(&bss->list)) {
974 __cfg80211_unlink_bss(dev, bss); 1029 if (__cfg80211_unlink_bss(dev, bss))
975 dev->bss_generation++; 1030 dev->bss_generation++;
976 } 1031 }
977 spin_unlock_bh(&dev->bss_lock); 1032 spin_unlock_bh(&dev->bss_lock);
978} 1033}
@@ -1155,16 +1210,6 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
1155 } 1210 }
1156} 1211}
1157 1212
1158static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
1159{
1160 unsigned long end = jiffies;
1161
1162 if (end >= start)
1163 return jiffies_to_msecs(end - start);
1164
1165 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
1166}
1167
1168static char * 1213static char *
1169ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info, 1214ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1170 struct cfg80211_internal_bss *bss, char *current_ev, 1215 struct cfg80211_internal_bss *bss, char *current_ev,
@@ -1241,15 +1286,10 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1241 1286
1242 rcu_read_lock(); 1287 rcu_read_lock();
1243 ies = rcu_dereference(bss->pub.ies); 1288 ies = rcu_dereference(bss->pub.ies);
1244 if (ies) { 1289 rem = ies->len;
1245 rem = ies->len; 1290 ie = ies->data;
1246 ie = ies->data;
1247 } else {
1248 rem = 0;
1249 ie = NULL;
1250 }
1251 1291
1252 while (ies && rem >= 2) { 1292 while (rem >= 2) {
1253 /* invalid data */ 1293 /* invalid data */
1254 if (ie[1] > rem - 2) 1294 if (ie[1] > rem - 2)
1255 break; 1295 break;
@@ -1358,11 +1398,11 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1358 &iwe, IW_EV_UINT_LEN); 1398 &iwe, IW_EV_UINT_LEN);
1359 } 1399 }
1360 1400
1361 buf = kmalloc(30, GFP_ATOMIC); 1401 buf = kmalloc(31, GFP_ATOMIC);
1362 if (buf) { 1402 if (buf) {
1363 memset(&iwe, 0, sizeof(iwe)); 1403 memset(&iwe, 0, sizeof(iwe));
1364 iwe.cmd = IWEVCUSTOM; 1404 iwe.cmd = IWEVCUSTOM;
1365 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->pub.tsf)); 1405 sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf));
1366 iwe.u.data.length = strlen(buf); 1406 iwe.u.data.length = strlen(buf);
1367 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 1407 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1368 &iwe, buf); 1408 &iwe, buf);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f2431e41a373..f432bd3755b1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -192,7 +192,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
192 prev_bssid, 192 prev_bssid,
193 params->ssid, params->ssid_len, 193 params->ssid, params->ssid_len,
194 params->ie, params->ie_len, 194 params->ie, params->ie_len,
195 false, &params->crypto, 195 params->mfp != NL80211_MFP_NO,
196 &params->crypto,
196 params->flags, &params->ht_capa, 197 params->flags, &params->ht_capa,
197 &params->ht_capa_mask); 198 &params->ht_capa_mask);
198 if (err) 199 if (err)
@@ -300,7 +301,7 @@ static void __cfg80211_sme_scan_done(struct net_device *dev)
300 301
301 bss = cfg80211_get_conn_bss(wdev); 302 bss = cfg80211_get_conn_bss(wdev);
302 if (bss) { 303 if (bss) {
303 cfg80211_put_bss(bss); 304 cfg80211_put_bss(&rdev->wiphy, bss);
304 } else { 305 } else {
305 /* not found */ 306 /* not found */
306 if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) 307 if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)
@@ -463,7 +464,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
463 464
464 if (wdev->current_bss) { 465 if (wdev->current_bss) {
465 cfg80211_unhold_bss(wdev->current_bss); 466 cfg80211_unhold_bss(wdev->current_bss);
466 cfg80211_put_bss(&wdev->current_bss->pub); 467 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
467 wdev->current_bss = NULL; 468 wdev->current_bss = NULL;
468 } 469 }
469 470
@@ -479,7 +480,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
479 kfree(wdev->connect_keys); 480 kfree(wdev->connect_keys);
480 wdev->connect_keys = NULL; 481 wdev->connect_keys = NULL;
481 wdev->ssid_len = 0; 482 wdev->ssid_len = 0;
482 cfg80211_put_bss(bss); 483 cfg80211_put_bss(wdev->wiphy, bss);
483 return; 484 return;
484 } 485 }
485 486
@@ -519,10 +520,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
519 * - country_ie + 2, the start of the country ie data, and 520 * - country_ie + 2, the start of the country ie data, and
520 * - and country_ie[1] which is the IE length 521 * - and country_ie[1] which is the IE length
521 */ 522 */
522 regulatory_hint_11d(wdev->wiphy, 523 regulatory_hint_11d(wdev->wiphy, bss->channel->band,
523 bss->channel->band, 524 country_ie + 2, country_ie[1]);
524 country_ie + 2,
525 country_ie[1]);
526 kfree(country_ie); 525 kfree(country_ie);
527} 526}
528 527
@@ -587,7 +586,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
587 } 586 }
588 587
589 cfg80211_unhold_bss(wdev->current_bss); 588 cfg80211_unhold_bss(wdev->current_bss);
590 cfg80211_put_bss(&wdev->current_bss->pub); 589 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
591 wdev->current_bss = NULL; 590 wdev->current_bss = NULL;
592 591
593 cfg80211_hold_bss(bss_from_pub(bss)); 592 cfg80211_hold_bss(bss_from_pub(bss));
@@ -622,7 +621,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
622 621
623 return; 622 return;
624out: 623out:
625 cfg80211_put_bss(bss); 624 cfg80211_put_bss(wdev->wiphy, bss);
626} 625}
627 626
628void cfg80211_roamed(struct net_device *dev, 627void cfg80211_roamed(struct net_device *dev,
@@ -664,7 +663,7 @@ void cfg80211_roamed_bss(struct net_device *dev,
664 663
665 ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); 664 ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
666 if (!ev) { 665 if (!ev) {
667 cfg80211_put_bss(bss); 666 cfg80211_put_bss(wdev->wiphy, bss);
668 return; 667 return;
669 } 668 }
670 669
@@ -705,7 +704,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
705 704
706 if (wdev->current_bss) { 705 if (wdev->current_bss) {
707 cfg80211_unhold_bss(wdev->current_bss); 706 cfg80211_unhold_bss(wdev->current_bss);
708 cfg80211_put_bss(&wdev->current_bss->pub); 707 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
709 } 708 }
710 709
711 wdev->current_bss = NULL; 710 wdev->current_bss = NULL;
@@ -876,7 +875,7 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
876 if (bss) { 875 if (bss) {
877 wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; 876 wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
878 err = cfg80211_conn_do_work(wdev); 877 err = cfg80211_conn_do_work(wdev);
879 cfg80211_put_bss(bss); 878 cfg80211_put_bss(wdev->wiphy, bss);
880 } else { 879 } else {
881 /* otherwise we'll need to scan for the AP first */ 880 /* otherwise we'll need to scan for the AP first */
882 err = cfg80211_conn_scan(wdev); 881 err = cfg80211_conn_scan(wdev);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9bf6d5e32166..238ee49b3868 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -77,13 +77,11 @@ static void wiphy_dev_release(struct device *dev)
77 cfg80211_dev_free(rdev); 77 cfg80211_dev_free(rdev);
78} 78}
79 79
80#ifdef CONFIG_HOTPLUG
81static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) 80static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
82{ 81{
83 /* TODO, we probably need stuff here */ 82 /* TODO, we probably need stuff here */
84 return 0; 83 return 0;
85} 84}
86#endif
87 85
88static int wiphy_suspend(struct device *dev, pm_message_t state) 86static int wiphy_suspend(struct device *dev, pm_message_t state)
89{ 87{
@@ -108,9 +106,7 @@ static int wiphy_resume(struct device *dev)
108 int ret = 0; 106 int ret = 0;
109 107
110 /* Age scan results with time spent in suspend */ 108 /* Age scan results with time spent in suspend */
111 spin_lock_bh(&rdev->bss_lock);
112 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); 109 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
113 spin_unlock_bh(&rdev->bss_lock);
114 110
115 if (rdev->ops->resume) { 111 if (rdev->ops->resume) {
116 rtnl_lock(); 112 rtnl_lock();
@@ -134,9 +130,7 @@ struct class ieee80211_class = {
134 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
135 .dev_release = wiphy_dev_release, 131 .dev_release = wiphy_dev_release,
136 .dev_attrs = ieee80211_dev_attrs, 132 .dev_attrs = ieee80211_dev_attrs,
137#ifdef CONFIG_HOTPLUG
138 .dev_uevent = wiphy_uevent, 133 .dev_uevent = wiphy_uevent,
139#endif
140 .suspend = wiphy_suspend, 134 .suspend = wiphy_suspend,
141 .resume = wiphy_resume, 135 .resume = wiphy_resume,
142 .ns_type = &net_ns_type_operations, 136 .ns_type = &net_ns_type_operations,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 2134576f426e..b7a531380e19 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1767,6 +1767,24 @@ DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device,
1767 TP_ARGS(wiphy, wdev) 1767 TP_ARGS(wiphy, wdev)
1768); 1768);
1769 1769
1770TRACE_EVENT(rdev_set_mac_acl,
1771 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1772 struct cfg80211_acl_data *params),
1773 TP_ARGS(wiphy, netdev, params),
1774 TP_STRUCT__entry(
1775 WIPHY_ENTRY
1776 NETDEV_ENTRY
1777 __field(u32, acl_policy)
1778 ),
1779 TP_fast_assign(
1780 WIPHY_ASSIGN;
1781 WIPHY_ASSIGN;
1782 __entry->acl_policy = params->acl_policy;
1783 ),
1784 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",
1785 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy)
1786);
1787
1770/************************************************************* 1788/*************************************************************
1771 * cfg80211 exported functions traces * 1789 * cfg80211 exported functions traces *
1772 *************************************************************/ 1790 *************************************************************/
@@ -2033,6 +2051,21 @@ TRACE_EVENT(cfg80211_reg_can_beacon,
2033 WIPHY_PR_ARG, CHAN_DEF_PR_ARG) 2051 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
2034); 2052);
2035 2053
2054TRACE_EVENT(cfg80211_chandef_dfs_required,
2055 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
2056 TP_ARGS(wiphy, chandef),
2057 TP_STRUCT__entry(
2058 WIPHY_ENTRY
2059 CHAN_DEF_ENTRY
2060 ),
2061 TP_fast_assign(
2062 WIPHY_ASSIGN;
2063 CHAN_DEF_ASSIGN(chandef);
2064 ),
2065 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
2066 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
2067);
2068
2036TRACE_EVENT(cfg80211_ch_switch_notify, 2069TRACE_EVENT(cfg80211_ch_switch_notify,
2037 TP_PROTO(struct net_device *netdev, 2070 TP_PROTO(struct net_device *netdev,
2038 struct cfg80211_chan_def *chandef), 2071 struct cfg80211_chan_def *chandef),
@@ -2049,6 +2082,36 @@ TRACE_EVENT(cfg80211_ch_switch_notify,
2049 NETDEV_PR_ARG, CHAN_DEF_PR_ARG) 2082 NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
2050); 2083);
2051 2084
2085TRACE_EVENT(cfg80211_radar_event,
2086 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
2087 TP_ARGS(wiphy, chandef),
2088 TP_STRUCT__entry(
2089 WIPHY_ENTRY
2090 CHAN_DEF_ENTRY
2091 ),
2092 TP_fast_assign(
2093 WIPHY_ASSIGN;
2094 CHAN_DEF_ASSIGN(chandef);
2095 ),
2096 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
2097 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
2098);
2099
2100TRACE_EVENT(cfg80211_cac_event,
2101 TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt),
2102 TP_ARGS(netdev, evt),
2103 TP_STRUCT__entry(
2104 NETDEV_ENTRY
2105 __field(enum nl80211_radar_event, evt)
2106 ),
2107 TP_fast_assign(
2108 NETDEV_ASSIGN;
2109 __entry->evt = evt;
2110 ),
2111 TP_printk(NETDEV_PR_FMT ", event: %d",
2112 NETDEV_PR_ARG, __entry->evt)
2113);
2114
2052DECLARE_EVENT_CLASS(cfg80211_rx_evt, 2115DECLARE_EVENT_CLASS(cfg80211_rx_evt,
2053 TP_PROTO(struct net_device *netdev, const u8 *addr), 2116 TP_PROTO(struct net_device *netdev, const u8 *addr),
2054 TP_ARGS(netdev, addr), 2117 TP_ARGS(netdev, addr),
@@ -2315,6 +2378,41 @@ TRACE_EVENT(cfg80211_return_u32,
2315 TP_printk("ret: %u", __entry->ret) 2378 TP_printk("ret: %u", __entry->ret)
2316); 2379);
2317 2380
2381TRACE_EVENT(cfg80211_report_wowlan_wakeup,
2382 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
2383 struct cfg80211_wowlan_wakeup *wakeup),
2384 TP_ARGS(wiphy, wdev, wakeup),
2385 TP_STRUCT__entry(
2386 WIPHY_ENTRY
2387 WDEV_ENTRY
2388 __field(bool, disconnect)
2389 __field(bool, magic_pkt)
2390 __field(bool, gtk_rekey_failure)
2391 __field(bool, eap_identity_req)
2392 __field(bool, four_way_handshake)
2393 __field(bool, rfkill_release)
2394 __field(s32, pattern_idx)
2395 __field(u32, packet_len)
2396 __dynamic_array(u8, packet, wakeup->packet_present_len)
2397 ),
2398 TP_fast_assign(
2399 WIPHY_ASSIGN;
2400 WDEV_ASSIGN;
2401 __entry->disconnect = wakeup->disconnect;
2402 __entry->magic_pkt = wakeup->magic_pkt;
2403 __entry->gtk_rekey_failure = wakeup->gtk_rekey_failure;
2404 __entry->eap_identity_req = wakeup->eap_identity_req;
2405 __entry->four_way_handshake = wakeup->four_way_handshake;
2406 __entry->rfkill_release = wakeup->rfkill_release;
2407 __entry->pattern_idx = wakeup->pattern_idx;
2408 __entry->packet_len = wakeup->packet_len;
2409 if (wakeup->packet && wakeup->packet_present_len)
2410 memcpy(__get_dynamic_array(packet), wakeup->packet,
2411 wakeup->packet_present_len);
2412 ),
2413 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
2414);
2415
2318#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ 2416#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
2319 2417
2320#undef TRACE_INCLUDE_PATH 2418#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 16d76a807c2f..37a56ee1e1ed 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1184,7 +1184,8 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1184 struct wireless_dev *wdev, 1184 struct wireless_dev *wdev,
1185 enum nl80211_iftype iftype, 1185 enum nl80211_iftype iftype,
1186 struct ieee80211_channel *chan, 1186 struct ieee80211_channel *chan,
1187 enum cfg80211_chan_mode chanmode) 1187 enum cfg80211_chan_mode chanmode,
1188 u8 radar_detect)
1188{ 1189{
1189 struct wireless_dev *wdev_iter; 1190 struct wireless_dev *wdev_iter;
1190 u32 used_iftypes = BIT(iftype); 1191 u32 used_iftypes = BIT(iftype);
@@ -1195,14 +1196,46 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1195 enum cfg80211_chan_mode chmode; 1196 enum cfg80211_chan_mode chmode;
1196 int num_different_channels = 0; 1197 int num_different_channels = 0;
1197 int total = 1; 1198 int total = 1;
1199 bool radar_required;
1198 int i, j; 1200 int i, j;
1199 1201
1200 ASSERT_RTNL(); 1202 ASSERT_RTNL();
1201 lockdep_assert_held(&rdev->devlist_mtx); 1203 lockdep_assert_held(&rdev->devlist_mtx);
1202 1204
1205 if (WARN_ON(hweight32(radar_detect) > 1))
1206 return -EINVAL;
1207
1208 switch (iftype) {
1209 case NL80211_IFTYPE_ADHOC:
1210 case NL80211_IFTYPE_AP:
1211 case NL80211_IFTYPE_AP_VLAN:
1212 case NL80211_IFTYPE_MESH_POINT:
1213 case NL80211_IFTYPE_P2P_GO:
1214 case NL80211_IFTYPE_WDS:
1215 radar_required = !!(chan &&
1216 (chan->flags & IEEE80211_CHAN_RADAR));
1217 break;
1218 case NL80211_IFTYPE_P2P_CLIENT:
1219 case NL80211_IFTYPE_STATION:
1220 case NL80211_IFTYPE_P2P_DEVICE:
1221 case NL80211_IFTYPE_MONITOR:
1222 radar_required = false;
1223 break;
1224 case NUM_NL80211_IFTYPES:
1225 case NL80211_IFTYPE_UNSPECIFIED:
1226 default:
1227 return -EINVAL;
1228 }
1229
1230 if (radar_required && !radar_detect)
1231 return -EINVAL;
1232
1203 /* Always allow software iftypes */ 1233 /* Always allow software iftypes */
1204 if (rdev->wiphy.software_iftypes & BIT(iftype)) 1234 if (rdev->wiphy.software_iftypes & BIT(iftype)) {
1235 if (radar_detect)
1236 return -EINVAL;
1205 return 0; 1237 return 0;
1238 }
1206 1239
1207 memset(num, 0, sizeof(num)); 1240 memset(num, 0, sizeof(num));
1208 memset(used_channels, 0, sizeof(used_channels)); 1241 memset(used_channels, 0, sizeof(used_channels));
@@ -1275,7 +1308,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1275 used_iftypes |= BIT(wdev_iter->iftype); 1308 used_iftypes |= BIT(wdev_iter->iftype);
1276 } 1309 }
1277 1310
1278 if (total == 1) 1311 if (total == 1 && !radar_detect)
1279 return 0; 1312 return 0;
1280 1313
1281 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 1314 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
@@ -1308,6 +1341,9 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1308 } 1341 }
1309 } 1342 }
1310 1343
1344 if (radar_detect && !(c->radar_detect_widths & radar_detect))
1345 goto cont;
1346
1311 /* 1347 /*
1312 * Finally check that all iftypes that we're currently 1348 * Finally check that all iftypes that we're currently
1313 * using are actually part of this combination. If they 1349 * using are actually part of this combination. If they
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index 8bafa31fa9f8..e98a01c1034f 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -143,7 +143,8 @@ static const struct file_operations wireless_seq_fops = {
143int __net_init wext_proc_init(struct net *net) 143int __net_init wext_proc_init(struct net *net)
144{ 144{
145 /* Create /proc/net/wireless entry */ 145 /* Create /proc/net/wireless entry */
146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) 146 if (!proc_create("wireless", S_IRUGO, net->proc_net,
147 &wireless_seq_fops))
147 return -ENOMEM; 148 return -ENOMEM;
148 149
149 return 0; 150 return 0;
@@ -151,5 +152,5 @@ int __net_init wext_proc_init(struct net *net)
151 152
152void __net_exit wext_proc_exit(struct net *net) 153void __net_exit wext_proc_exit(struct net *net)
153{ 154{
154 proc_net_remove(net, "wireless"); 155 remove_proc_entry("wireless", net->proc_net);
155} 156}
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index e6759c9660bb..c959312c45e3 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -3,8 +3,7 @@
3# 3#
4 4
5config X25 5config X25
6 tristate "CCITT X.25 Packet Layer (EXPERIMENTAL)" 6 tristate "CCITT X.25 Packet Layer"
7 depends on EXPERIMENTAL
8 ---help--- 7 ---help---
9 X.25 is a set of standardized network protocols, similar in scope to 8 X.25 is a set of standardized network protocols, similar in scope to
10 frame relay; the one physical line from your box to the X.25 network 9 frame relay; the one physical line from your box to the X.25 network
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index ce90b8d92365..bda1a13628a8 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -21,8 +21,8 @@ config XFRM_USER
21 If unsure, say Y. 21 If unsure, say Y.
22 22
23config XFRM_SUB_POLICY 23config XFRM_SUB_POLICY
24 bool "Transformation sub policy support (EXPERIMENTAL)" 24 bool "Transformation sub policy support"
25 depends on XFRM && EXPERIMENTAL 25 depends on XFRM
26 ---help--- 26 ---help---
27 Support sub policy for developers. By using sub policy with main 27 Support sub policy for developers. By using sub policy with main
28 one, two policies can be applied to the same packet at once. 28 one, two policies can be applied to the same packet at once.
@@ -31,8 +31,8 @@ config XFRM_SUB_POLICY
31 If unsure, say N. 31 If unsure, say N.
32 32
33config XFRM_MIGRATE 33config XFRM_MIGRATE
34 bool "Transformation migrate database (EXPERIMENTAL)" 34 bool "Transformation migrate database"
35 depends on XFRM && EXPERIMENTAL 35 depends on XFRM
36 ---help--- 36 ---help---
37 A feature to update locator(s) of a given IPsec security 37 A feature to update locator(s) of a given IPsec security
38 association dynamically. This feature is required, for 38 association dynamically. This feature is required, for
@@ -42,8 +42,8 @@ config XFRM_MIGRATE
42 If unsure, say N. 42 If unsure, say N.
43 43
44config XFRM_STATISTICS 44config XFRM_STATISTICS
45 bool "Transformation statistics (EXPERIMENTAL)" 45 bool "Transformation statistics"
46 depends on INET && XFRM && PROC_FS && EXPERIMENTAL 46 depends on INET && XFRM && PROC_FS
47 ---help--- 47 ---help---
48 This statistics is not a SNMP/MIB specification but shows 48 This statistics is not a SNMP/MIB specification but shows
49 statistics about transformation error (or almost error) factor 49 statistics about transformation error (or almost error) factor
@@ -68,8 +68,8 @@ config NET_KEY
68 Say Y unless you know what you are doing. 68 Say Y unless you know what you are doing.
69 69
70config NET_KEY_MIGRATE 70config NET_KEY_MIGRATE
71 bool "PF_KEY MIGRATE (EXPERIMENTAL)" 71 bool "PF_KEY MIGRATE"
72 depends on NET_KEY && EXPERIMENTAL 72 depends on NET_KEY
73 select XFRM_MIGRATE 73 select XFRM_MIGRATE
74 ---help--- 74 ---help---
75 Add a PF_KEY MIGRATE message to PF_KEYv2 socket family. 75 Add a PF_KEY MIGRATE message to PF_KEYv2 socket family.
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 4ce2d93162c1..6fb9d00a75dc 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -35,6 +35,8 @@ static struct xfrm_algo_desc aead_list[] = {
35 } 35 }
36 }, 36 },
37 37
38 .pfkey_supported = 1,
39
38 .desc = { 40 .desc = {
39 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV8, 41 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV8,
40 .sadb_alg_ivlen = 8, 42 .sadb_alg_ivlen = 8,
@@ -51,6 +53,8 @@ static struct xfrm_algo_desc aead_list[] = {
51 } 53 }
52 }, 54 },
53 55
56 .pfkey_supported = 1,
57
54 .desc = { 58 .desc = {
55 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV12, 59 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV12,
56 .sadb_alg_ivlen = 8, 60 .sadb_alg_ivlen = 8,
@@ -67,6 +71,8 @@ static struct xfrm_algo_desc aead_list[] = {
67 } 71 }
68 }, 72 },
69 73
74 .pfkey_supported = 1,
75
70 .desc = { 76 .desc = {
71 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV16, 77 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV16,
72 .sadb_alg_ivlen = 8, 78 .sadb_alg_ivlen = 8,
@@ -83,6 +89,8 @@ static struct xfrm_algo_desc aead_list[] = {
83 } 89 }
84 }, 90 },
85 91
92 .pfkey_supported = 1,
93
86 .desc = { 94 .desc = {
87 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV8, 95 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV8,
88 .sadb_alg_ivlen = 8, 96 .sadb_alg_ivlen = 8,
@@ -99,6 +107,8 @@ static struct xfrm_algo_desc aead_list[] = {
99 } 107 }
100 }, 108 },
101 109
110 .pfkey_supported = 1,
111
102 .desc = { 112 .desc = {
103 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV12, 113 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV12,
104 .sadb_alg_ivlen = 8, 114 .sadb_alg_ivlen = 8,
@@ -115,6 +125,8 @@ static struct xfrm_algo_desc aead_list[] = {
115 } 125 }
116 }, 126 },
117 127
128 .pfkey_supported = 1,
129
118 .desc = { 130 .desc = {
119 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV16, 131 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV16,
120 .sadb_alg_ivlen = 8, 132 .sadb_alg_ivlen = 8,
@@ -131,6 +143,8 @@ static struct xfrm_algo_desc aead_list[] = {
131 } 143 }
132 }, 144 },
133 145
146 .pfkey_supported = 1,
147
134 .desc = { 148 .desc = {
135 .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC, 149 .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
136 .sadb_alg_ivlen = 8, 150 .sadb_alg_ivlen = 8,
@@ -151,6 +165,8 @@ static struct xfrm_algo_desc aalg_list[] = {
151 } 165 }
152 }, 166 },
153 167
168 .pfkey_supported = 1,
169
154 .desc = { 170 .desc = {
155 .sadb_alg_id = SADB_X_AALG_NULL, 171 .sadb_alg_id = SADB_X_AALG_NULL,
156 .sadb_alg_ivlen = 0, 172 .sadb_alg_ivlen = 0,
@@ -169,6 +185,8 @@ static struct xfrm_algo_desc aalg_list[] = {
169 } 185 }
170 }, 186 },
171 187
188 .pfkey_supported = 1,
189
172 .desc = { 190 .desc = {
173 .sadb_alg_id = SADB_AALG_MD5HMAC, 191 .sadb_alg_id = SADB_AALG_MD5HMAC,
174 .sadb_alg_ivlen = 0, 192 .sadb_alg_ivlen = 0,
@@ -187,6 +205,8 @@ static struct xfrm_algo_desc aalg_list[] = {
187 } 205 }
188 }, 206 },
189 207
208 .pfkey_supported = 1,
209
190 .desc = { 210 .desc = {
191 .sadb_alg_id = SADB_AALG_SHA1HMAC, 211 .sadb_alg_id = SADB_AALG_SHA1HMAC,
192 .sadb_alg_ivlen = 0, 212 .sadb_alg_ivlen = 0,
@@ -205,6 +225,8 @@ static struct xfrm_algo_desc aalg_list[] = {
205 } 225 }
206 }, 226 },
207 227
228 .pfkey_supported = 1,
229
208 .desc = { 230 .desc = {
209 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC, 231 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
210 .sadb_alg_ivlen = 0, 232 .sadb_alg_ivlen = 0,
@@ -222,6 +244,8 @@ static struct xfrm_algo_desc aalg_list[] = {
222 } 244 }
223 }, 245 },
224 246
247 .pfkey_supported = 1,
248
225 .desc = { 249 .desc = {
226 .sadb_alg_id = SADB_X_AALG_SHA2_384HMAC, 250 .sadb_alg_id = SADB_X_AALG_SHA2_384HMAC,
227 .sadb_alg_ivlen = 0, 251 .sadb_alg_ivlen = 0,
@@ -239,6 +263,8 @@ static struct xfrm_algo_desc aalg_list[] = {
239 } 263 }
240 }, 264 },
241 265
266 .pfkey_supported = 1,
267
242 .desc = { 268 .desc = {
243 .sadb_alg_id = SADB_X_AALG_SHA2_512HMAC, 269 .sadb_alg_id = SADB_X_AALG_SHA2_512HMAC,
244 .sadb_alg_ivlen = 0, 270 .sadb_alg_ivlen = 0,
@@ -257,6 +283,8 @@ static struct xfrm_algo_desc aalg_list[] = {
257 } 283 }
258 }, 284 },
259 285
286 .pfkey_supported = 1,
287
260 .desc = { 288 .desc = {
261 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC, 289 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
262 .sadb_alg_ivlen = 0, 290 .sadb_alg_ivlen = 0,
@@ -274,6 +302,8 @@ static struct xfrm_algo_desc aalg_list[] = {
274 } 302 }
275 }, 303 },
276 304
305 .pfkey_supported = 1,
306
277 .desc = { 307 .desc = {
278 .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC, 308 .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
279 .sadb_alg_ivlen = 0, 309 .sadb_alg_ivlen = 0,
@@ -295,6 +325,8 @@ static struct xfrm_algo_desc ealg_list[] = {
295 } 325 }
296 }, 326 },
297 327
328 .pfkey_supported = 1,
329
298 .desc = { 330 .desc = {
299 .sadb_alg_id = SADB_EALG_NULL, 331 .sadb_alg_id = SADB_EALG_NULL,
300 .sadb_alg_ivlen = 0, 332 .sadb_alg_ivlen = 0,
@@ -313,6 +345,8 @@ static struct xfrm_algo_desc ealg_list[] = {
313 } 345 }
314 }, 346 },
315 347
348 .pfkey_supported = 1,
349
316 .desc = { 350 .desc = {
317 .sadb_alg_id = SADB_EALG_DESCBC, 351 .sadb_alg_id = SADB_EALG_DESCBC,
318 .sadb_alg_ivlen = 8, 352 .sadb_alg_ivlen = 8,
@@ -331,6 +365,8 @@ static struct xfrm_algo_desc ealg_list[] = {
331 } 365 }
332 }, 366 },
333 367
368 .pfkey_supported = 1,
369
334 .desc = { 370 .desc = {
335 .sadb_alg_id = SADB_EALG_3DESCBC, 371 .sadb_alg_id = SADB_EALG_3DESCBC,
336 .sadb_alg_ivlen = 8, 372 .sadb_alg_ivlen = 8,
@@ -349,6 +385,8 @@ static struct xfrm_algo_desc ealg_list[] = {
349 } 385 }
350 }, 386 },
351 387
388 .pfkey_supported = 1,
389
352 .desc = { 390 .desc = {
353 .sadb_alg_id = SADB_X_EALG_CASTCBC, 391 .sadb_alg_id = SADB_X_EALG_CASTCBC,
354 .sadb_alg_ivlen = 8, 392 .sadb_alg_ivlen = 8,
@@ -367,6 +405,8 @@ static struct xfrm_algo_desc ealg_list[] = {
367 } 405 }
368 }, 406 },
369 407
408 .pfkey_supported = 1,
409
370 .desc = { 410 .desc = {
371 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC, 411 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
372 .sadb_alg_ivlen = 8, 412 .sadb_alg_ivlen = 8,
@@ -385,6 +425,8 @@ static struct xfrm_algo_desc ealg_list[] = {
385 } 425 }
386 }, 426 },
387 427
428 .pfkey_supported = 1,
429
388 .desc = { 430 .desc = {
389 .sadb_alg_id = SADB_X_EALG_AESCBC, 431 .sadb_alg_id = SADB_X_EALG_AESCBC,
390 .sadb_alg_ivlen = 8, 432 .sadb_alg_ivlen = 8,
@@ -403,6 +445,8 @@ static struct xfrm_algo_desc ealg_list[] = {
403 } 445 }
404 }, 446 },
405 447
448 .pfkey_supported = 1,
449
406 .desc = { 450 .desc = {
407 .sadb_alg_id = SADB_X_EALG_SERPENTCBC, 451 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
408 .sadb_alg_ivlen = 8, 452 .sadb_alg_ivlen = 8,
@@ -421,6 +465,8 @@ static struct xfrm_algo_desc ealg_list[] = {
421 } 465 }
422 }, 466 },
423 467
468 .pfkey_supported = 1,
469
424 .desc = { 470 .desc = {
425 .sadb_alg_id = SADB_X_EALG_CAMELLIACBC, 471 .sadb_alg_id = SADB_X_EALG_CAMELLIACBC,
426 .sadb_alg_ivlen = 8, 472 .sadb_alg_ivlen = 8,
@@ -439,6 +485,8 @@ static struct xfrm_algo_desc ealg_list[] = {
439 } 485 }
440 }, 486 },
441 487
488 .pfkey_supported = 1,
489
442 .desc = { 490 .desc = {
443 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC, 491 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
444 .sadb_alg_ivlen = 8, 492 .sadb_alg_ivlen = 8,
@@ -456,6 +504,8 @@ static struct xfrm_algo_desc ealg_list[] = {
456 } 504 }
457 }, 505 },
458 506
507 .pfkey_supported = 1,
508
459 .desc = { 509 .desc = {
460 .sadb_alg_id = SADB_X_EALG_AESCTR, 510 .sadb_alg_id = SADB_X_EALG_AESCTR,
461 .sadb_alg_ivlen = 8, 511 .sadb_alg_ivlen = 8,
@@ -473,6 +523,7 @@ static struct xfrm_algo_desc calg_list[] = {
473 .threshold = 90, 523 .threshold = 90,
474 } 524 }
475 }, 525 },
526 .pfkey_supported = 1,
476 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE } 527 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
477}, 528},
478{ 529{
@@ -482,6 +533,7 @@ static struct xfrm_algo_desc calg_list[] = {
482 .threshold = 90, 533 .threshold = 90,
483 } 534 }
484 }, 535 },
536 .pfkey_supported = 1,
485 .desc = { .sadb_alg_id = SADB_X_CALG_LZS } 537 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
486}, 538},
487{ 539{
@@ -491,6 +543,7 @@ static struct xfrm_algo_desc calg_list[] = {
491 .threshold = 50, 543 .threshold = 50,
492 } 544 }
493 }, 545 },
546 .pfkey_supported = 1,
494 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH } 547 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
495}, 548},
496}; 549};
@@ -700,8 +753,7 @@ void xfrm_probe_algs(void)
700 } 753 }
701 754
702 for (i = 0; i < ealg_entries(); i++) { 755 for (i = 0; i < ealg_entries(); i++) {
703 status = crypto_has_blkcipher(ealg_list[i].name, 0, 756 status = crypto_has_ablkcipher(ealg_list[i].name, 0, 0);
704 CRYPTO_ALG_ASYNC);
705 if (ealg_list[i].available != status) 757 if (ealg_list[i].available != status)
706 ealg_list[i].available = status; 758 ealg_list[i].available = status;
707 } 759 }
@@ -715,27 +767,27 @@ void xfrm_probe_algs(void)
715} 767}
716EXPORT_SYMBOL_GPL(xfrm_probe_algs); 768EXPORT_SYMBOL_GPL(xfrm_probe_algs);
717 769
718int xfrm_count_auth_supported(void) 770int xfrm_count_pfkey_auth_supported(void)
719{ 771{
720 int i, n; 772 int i, n;
721 773
722 for (i = 0, n = 0; i < aalg_entries(); i++) 774 for (i = 0, n = 0; i < aalg_entries(); i++)
723 if (aalg_list[i].available) 775 if (aalg_list[i].available && aalg_list[i].pfkey_supported)
724 n++; 776 n++;
725 return n; 777 return n;
726} 778}
727EXPORT_SYMBOL_GPL(xfrm_count_auth_supported); 779EXPORT_SYMBOL_GPL(xfrm_count_pfkey_auth_supported);
728 780
729int xfrm_count_enc_supported(void) 781int xfrm_count_pfkey_enc_supported(void)
730{ 782{
731 int i, n; 783 int i, n;
732 784
733 for (i = 0, n = 0; i < ealg_entries(); i++) 785 for (i = 0, n = 0; i < ealg_entries(); i++)
734 if (ealg_list[i].available) 786 if (ealg_list[i].available && ealg_list[i].pfkey_supported)
735 n++; 787 n++;
736 return n; 788 return n;
737} 789}
738EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); 790EXPORT_SYMBOL_GPL(xfrm_count_pfkey_enc_supported);
739 791
740#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) 792#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
741 793
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 95a338c89f99..bcfda8921b5b 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -61,6 +61,12 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
61 } 61 }
62 62
63 spin_lock_bh(&x->lock); 63 spin_lock_bh(&x->lock);
64
65 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
66 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
67 goto error;
68 }
69
64 err = xfrm_state_check_expire(x); 70 err = xfrm_state_check_expire(x);
65 if (err) { 71 if (err) {
66 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED); 72 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 41eabc46f110..5b47180986f8 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -35,6 +35,10 @@
35 35
36#include "xfrm_hash.h" 36#include "xfrm_hash.h"
37 37
38#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40#define XFRM_MAX_QUEUE_LEN 100
41
38DEFINE_MUTEX(xfrm_cfg_mutex); 42DEFINE_MUTEX(xfrm_cfg_mutex);
39EXPORT_SYMBOL(xfrm_cfg_mutex); 43EXPORT_SYMBOL(xfrm_cfg_mutex);
40 44
@@ -51,7 +55,7 @@ static struct kmem_cache *xfrm_dst_cache __read_mostly;
51static void xfrm_init_pmtu(struct dst_entry *dst); 55static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst); 56static int stale_bundle(struct dst_entry *dst);
53static int xfrm_bundle_ok(struct xfrm_dst *xdst); 57static int xfrm_bundle_ok(struct xfrm_dst *xdst);
54 58static void xfrm_policy_queue_process(unsigned long arg);
55 59
56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 60static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
57 int dir); 61 int dir);
@@ -287,8 +291,11 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
287 INIT_HLIST_NODE(&policy->byidx); 291 INIT_HLIST_NODE(&policy->byidx);
288 rwlock_init(&policy->lock); 292 rwlock_init(&policy->lock);
289 atomic_set(&policy->refcnt, 1); 293 atomic_set(&policy->refcnt, 1);
294 skb_queue_head_init(&policy->polq.hold_queue);
290 setup_timer(&policy->timer, xfrm_policy_timer, 295 setup_timer(&policy->timer, xfrm_policy_timer,
291 (unsigned long)policy); 296 (unsigned long)policy);
297 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
298 (unsigned long)policy);
292 policy->flo.ops = &xfrm_policy_fc_ops; 299 policy->flo.ops = &xfrm_policy_fc_ops;
293 } 300 }
294 return policy; 301 return policy;
@@ -309,6 +316,16 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
309} 316}
310EXPORT_SYMBOL(xfrm_policy_destroy); 317EXPORT_SYMBOL(xfrm_policy_destroy);
311 318
319static void xfrm_queue_purge(struct sk_buff_head *list)
320{
321 struct sk_buff *skb;
322
323 while ((skb = skb_dequeue(list)) != NULL) {
324 dev_put(skb->dev);
325 kfree_skb(skb);
326 }
327}
328
312/* Rule must be locked. Release descentant resources, announce 329/* Rule must be locked. Release descentant resources, announce
313 * entry dead. The rule must be unlinked from lists to the moment. 330 * entry dead. The rule must be unlinked from lists to the moment.
314 */ 331 */
@@ -319,6 +336,9 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
319 336
320 atomic_inc(&policy->genid); 337 atomic_inc(&policy->genid);
321 338
339 del_timer(&policy->polq.hold_timer);
340 xfrm_queue_purge(&policy->polq.hold_queue);
341
322 if (del_timer(&policy->timer)) 342 if (del_timer(&policy->timer))
323 xfrm_pol_put(policy); 343 xfrm_pol_put(policy);
324 344
@@ -562,6 +582,46 @@ static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s
562 return 0; 582 return 0;
563} 583}
564 584
585static void xfrm_policy_requeue(struct xfrm_policy *old,
586 struct xfrm_policy *new)
587{
588 struct xfrm_policy_queue *pq = &old->polq;
589 struct sk_buff_head list;
590
591 __skb_queue_head_init(&list);
592
593 spin_lock_bh(&pq->hold_queue.lock);
594 skb_queue_splice_init(&pq->hold_queue, &list);
595 del_timer(&pq->hold_timer);
596 spin_unlock_bh(&pq->hold_queue.lock);
597
598 if (skb_queue_empty(&list))
599 return;
600
601 pq = &new->polq;
602
603 spin_lock_bh(&pq->hold_queue.lock);
604 skb_queue_splice(&list, &pq->hold_queue);
605 pq->timeout = XFRM_QUEUE_TMO_MIN;
606 mod_timer(&pq->hold_timer, jiffies);
607 spin_unlock_bh(&pq->hold_queue.lock);
608}
609
610static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
611 struct xfrm_policy *pol)
612{
613 u32 mark = policy->mark.v & policy->mark.m;
614
615 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
616 return true;
617
618 if ((mark & pol->mark.m) == pol->mark.v &&
619 policy->priority == pol->priority)
620 return true;
621
622 return false;
623}
624
565int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 625int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
566{ 626{
567 struct net *net = xp_net(policy); 627 struct net *net = xp_net(policy);
@@ -569,7 +629,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
569 struct xfrm_policy *delpol; 629 struct xfrm_policy *delpol;
570 struct hlist_head *chain; 630 struct hlist_head *chain;
571 struct hlist_node *entry, *newpos; 631 struct hlist_node *entry, *newpos;
572 u32 mark = policy->mark.v & policy->mark.m;
573 632
574 write_lock_bh(&xfrm_policy_lock); 633 write_lock_bh(&xfrm_policy_lock);
575 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 634 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
@@ -578,7 +637,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
578 hlist_for_each_entry(pol, entry, chain, bydst) { 637 hlist_for_each_entry(pol, entry, chain, bydst) {
579 if (pol->type == policy->type && 638 if (pol->type == policy->type &&
580 !selector_cmp(&pol->selector, &policy->selector) && 639 !selector_cmp(&pol->selector, &policy->selector) &&
581 (mark & pol->mark.m) == pol->mark.v && 640 xfrm_policy_mark_match(policy, pol) &&
582 xfrm_sec_ctx_match(pol->security, policy->security) && 641 xfrm_sec_ctx_match(pol->security, policy->security) &&
583 !WARN_ON(delpol)) { 642 !WARN_ON(delpol)) {
584 if (excl) { 643 if (excl) {
@@ -603,8 +662,10 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
603 net->xfrm.policy_count[dir]++; 662 net->xfrm.policy_count[dir]++;
604 atomic_inc(&flow_cache_genid); 663 atomic_inc(&flow_cache_genid);
605 rt_genid_bump(net); 664 rt_genid_bump(net);
606 if (delpol) 665 if (delpol) {
666 xfrm_policy_requeue(delpol, policy);
607 __xfrm_policy_unlink(delpol, dir); 667 __xfrm_policy_unlink(delpol, dir);
668 }
608 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 669 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
609 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 670 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
610 policy->curlft.add_time = get_seconds(); 671 policy->curlft.add_time = get_seconds();
@@ -1115,11 +1176,15 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1115 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1176 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1116 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1177 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1117 } 1178 }
1118 if (old_pol) 1179 if (old_pol) {
1180 if (pol)
1181 xfrm_policy_requeue(old_pol, pol);
1182
1119 /* Unlinking succeeds always. This is the only function 1183 /* Unlinking succeeds always. This is the only function
1120 * allowed to delete or replace socket policy. 1184 * allowed to delete or replace socket policy.
1121 */ 1185 */
1122 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1186 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1187 }
1123 write_unlock_bh(&xfrm_policy_lock); 1188 write_unlock_bh(&xfrm_policy_lock);
1124 1189
1125 if (old_pol) { 1190 if (old_pol) {
@@ -1310,6 +1375,8 @@ static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *f
1310 * It means we need to try again resolving. */ 1375 * It means we need to try again resolving. */
1311 if (xdst->num_xfrms > 0) 1376 if (xdst->num_xfrms > 0)
1312 return NULL; 1377 return NULL;
1378 } else if (dst->flags & DST_XFRM_QUEUE) {
1379 return NULL;
1313 } else { 1380 } else {
1314 /* Real bundle */ 1381 /* Real bundle */
1315 if (stale_bundle(dst)) 1382 if (stale_bundle(dst))
@@ -1673,6 +1740,171 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1673 return xdst; 1740 return xdst;
1674} 1741}
1675 1742
1743static void xfrm_policy_queue_process(unsigned long arg)
1744{
1745 int err = 0;
1746 struct sk_buff *skb;
1747 struct sock *sk;
1748 struct dst_entry *dst;
1749 struct net_device *dev;
1750 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1751 struct xfrm_policy_queue *pq = &pol->polq;
1752 struct flowi fl;
1753 struct sk_buff_head list;
1754
1755 spin_lock(&pq->hold_queue.lock);
1756 skb = skb_peek(&pq->hold_queue);
1757 dst = skb_dst(skb);
1758 sk = skb->sk;
1759 xfrm_decode_session(skb, &fl, dst->ops->family);
1760 spin_unlock(&pq->hold_queue.lock);
1761
1762 dst_hold(dst->path);
1763 dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1764 sk, 0);
1765 if (IS_ERR(dst))
1766 goto purge_queue;
1767
1768 if (dst->flags & DST_XFRM_QUEUE) {
1769 dst_release(dst);
1770
1771 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1772 goto purge_queue;
1773
1774 pq->timeout = pq->timeout << 1;
1775 mod_timer(&pq->hold_timer, jiffies + pq->timeout);
1776 return;
1777 }
1778
1779 dst_release(dst);
1780
1781 __skb_queue_head_init(&list);
1782
1783 spin_lock(&pq->hold_queue.lock);
1784 pq->timeout = 0;
1785 skb_queue_splice_init(&pq->hold_queue, &list);
1786 spin_unlock(&pq->hold_queue.lock);
1787
1788 while (!skb_queue_empty(&list)) {
1789 skb = __skb_dequeue(&list);
1790
1791 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1792 dst_hold(skb_dst(skb)->path);
1793 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1794 &fl, skb->sk, 0);
1795 if (IS_ERR(dst)) {
1796 dev_put(skb->dev);
1797 kfree_skb(skb);
1798 continue;
1799 }
1800
1801 nf_reset(skb);
1802 skb_dst_drop(skb);
1803 skb_dst_set(skb, dst);
1804
1805 dev = skb->dev;
1806 err = dst_output(skb);
1807 dev_put(dev);
1808 }
1809
1810 return;
1811
1812purge_queue:
1813 pq->timeout = 0;
1814 xfrm_queue_purge(&pq->hold_queue);
1815}
1816
1817static int xdst_queue_output(struct sk_buff *skb)
1818{
1819 unsigned long sched_next;
1820 struct dst_entry *dst = skb_dst(skb);
1821 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1822 struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
1823
1824 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1825 kfree_skb(skb);
1826 return -EAGAIN;
1827 }
1828
1829 skb_dst_force(skb);
1830 dev_hold(skb->dev);
1831
1832 spin_lock_bh(&pq->hold_queue.lock);
1833
1834 if (!pq->timeout)
1835 pq->timeout = XFRM_QUEUE_TMO_MIN;
1836
1837 sched_next = jiffies + pq->timeout;
1838
1839 if (del_timer(&pq->hold_timer)) {
1840 if (time_before(pq->hold_timer.expires, sched_next))
1841 sched_next = pq->hold_timer.expires;
1842 }
1843
1844 __skb_queue_tail(&pq->hold_queue, skb);
1845 mod_timer(&pq->hold_timer, sched_next);
1846
1847 spin_unlock_bh(&pq->hold_queue.lock);
1848
1849 return 0;
1850}
1851
1852static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1853 struct dst_entry *dst,
1854 const struct flowi *fl,
1855 int num_xfrms,
1856 u16 family)
1857{
1858 int err;
1859 struct net_device *dev;
1860 struct dst_entry *dst1;
1861 struct xfrm_dst *xdst;
1862
1863 xdst = xfrm_alloc_dst(net, family);
1864 if (IS_ERR(xdst))
1865 return xdst;
1866
1867 if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
1868 (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
1869 return xdst;
1870
1871 dst1 = &xdst->u.dst;
1872 dst_hold(dst);
1873 xdst->route = dst;
1874
1875 dst_copy_metrics(dst1, dst);
1876
1877 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1878 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1879 dst1->lastuse = jiffies;
1880
1881 dst1->input = dst_discard;
1882 dst1->output = xdst_queue_output;
1883
1884 dst_hold(dst);
1885 dst1->child = dst;
1886 dst1->path = dst;
1887
1888 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1889
1890 err = -ENODEV;
1891 dev = dst->dev;
1892 if (!dev)
1893 goto free_dst;
1894
1895 err = xfrm_fill_dst(xdst, dev, fl);
1896 if (err)
1897 goto free_dst;
1898
1899out:
1900 return xdst;
1901
1902free_dst:
1903 dst_release(dst1);
1904 xdst = ERR_PTR(err);
1905 goto out;
1906}
1907
1676static struct flow_cache_object * 1908static struct flow_cache_object *
1677xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1909xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1678 struct flow_cache_object *oldflo, void *ctx) 1910 struct flow_cache_object *oldflo, void *ctx)
@@ -1751,7 +1983,7 @@ make_dummy_bundle:
1751 /* We found policies, but there's no bundles to instantiate: 1983 /* We found policies, but there's no bundles to instantiate:
1752 * either because the policy blocks, has no transformations or 1984 * either because the policy blocks, has no transformations or
1753 * we could not build template (no xfrm_states).*/ 1985 * we could not build template (no xfrm_states).*/
1754 xdst = xfrm_alloc_dst(net, family); 1986 xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
1755 if (IS_ERR(xdst)) { 1987 if (IS_ERR(xdst)) {
1756 xfrm_pols_put(pols, num_pols); 1988 xfrm_pols_put(pols, num_pols);
1757 return ERR_CAST(xdst); 1989 return ERR_CAST(xdst);
@@ -2359,6 +2591,9 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
2359 (dst->dev && !netif_running(dst->dev))) 2591 (dst->dev && !netif_running(dst->dev)))
2360 return 0; 2592 return 0;
2361 2593
2594 if (dst->flags & DST_XFRM_QUEUE)
2595 return 1;
2596
2362 last = NULL; 2597 last = NULL;
2363 2598
2364 do { 2599 do {
@@ -2656,7 +2891,7 @@ static void xfrm_policy_fini(struct net *net)
2656 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2891 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2657 2892
2658 htab = &net->xfrm.policy_bydst[dir]; 2893 htab = &net->xfrm.policy_bydst[dir];
2659 sz = (htab->hmask + 1); 2894 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2660 WARN_ON(!hlist_empty(htab->table)); 2895 WARN_ON(!hlist_empty(htab->table));
2661 xfrm_hash_free(htab->table, sz); 2896 xfrm_hash_free(htab->table, sz);
2662 } 2897 }
@@ -2786,10 +3021,10 @@ static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
2786{ 3021{
2787 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 3022 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2788 if (sel_tgt->family == sel_cmp->family && 3023 if (sel_tgt->family == sel_cmp->family &&
2789 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 3024 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
2790 sel_cmp->family) == 0 && 3025 sel_cmp->family) &&
2791 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 3026 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
2792 sel_cmp->family) == 0 && 3027 sel_cmp->family) &&
2793 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 3028 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2794 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 3029 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2795 return true; 3030 return true;
@@ -2847,10 +3082,10 @@ static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tm
2847 switch (t->mode) { 3082 switch (t->mode) {
2848 case XFRM_MODE_TUNNEL: 3083 case XFRM_MODE_TUNNEL:
2849 case XFRM_MODE_BEET: 3084 case XFRM_MODE_BEET:
2850 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, 3085 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
2851 m->old_family) == 0 && 3086 m->old_family) &&
2852 xfrm_addr_cmp(&t->saddr, &m->old_saddr, 3087 xfrm_addr_equal(&t->saddr, &m->old_saddr,
2853 m->old_family) == 0) { 3088 m->old_family)) {
2854 match = 1; 3089 match = 1;
2855 } 3090 }
2856 break; 3091 break;
@@ -2916,10 +3151,10 @@ static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
2916 return -EINVAL; 3151 return -EINVAL;
2917 3152
2918 for (i = 0; i < num_migrate; i++) { 3153 for (i = 0; i < num_migrate; i++) {
2919 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, 3154 if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
2920 m[i].old_family) == 0) && 3155 m[i].old_family) &&
2921 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, 3156 xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
2922 m[i].old_family) == 0)) 3157 m[i].old_family))
2923 return -EINVAL; 3158 return -EINVAL;
2924 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 3159 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2925 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 3160 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index d0a1af8ed584..c721b0d9ab8b 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -43,6 +43,7 @@ static const struct snmp_mib xfrm_mib_list[] = {
43 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD), 43 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD),
44 SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR), 44 SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR),
45 SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR), 45 SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
46 SNMP_MIB_ITEM("XfrmOutStateInvalid", LINUX_MIB_XFRMOUTSTATEINVALID),
46 SNMP_MIB_SENTINEL 47 SNMP_MIB_SENTINEL
47}; 48};
48 49
@@ -73,13 +74,13 @@ static const struct file_operations xfrm_statistics_seq_fops = {
73 74
74int __net_init xfrm_proc_init(struct net *net) 75int __net_init xfrm_proc_init(struct net *net)
75{ 76{
76 if (!proc_net_fops_create(net, "xfrm_stat", S_IRUGO, 77 if (!proc_create("xfrm_stat", S_IRUGO, net->proc_net,
77 &xfrm_statistics_seq_fops)) 78 &xfrm_statistics_seq_fops))
78 return -ENOMEM; 79 return -ENOMEM;
79 return 0; 80 return 0;
80} 81}
81 82
82void xfrm_proc_fini(struct net *net) 83void xfrm_proc_fini(struct net *net)
83{ 84{
84 proc_net_remove(net, "xfrm_stat"); 85 remove_proc_entry("xfrm_stat", net->proc_net);
85} 86}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 765f6fe951eb..35754cc8a9e5 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -242,11 +242,13 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
242 u32 diff; 242 u32 diff;
243 struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 243 struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
244 u32 seq = ntohl(net_seq); 244 u32 seq = ntohl(net_seq);
245 u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; 245 u32 pos;
246 246
247 if (!replay_esn->replay_window) 247 if (!replay_esn->replay_window)
248 return; 248 return;
249 249
250 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
251
250 if (seq > replay_esn->seq) { 252 if (seq > replay_esn->seq) {
251 diff = seq - replay_esn->seq; 253 diff = seq - replay_esn->seq;
252 254
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 3459692092ec..ae01bdbcb294 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -158,8 +158,8 @@ out_unlock:
158 mutex_unlock(&hash_resize_mutex); 158 mutex_unlock(&hash_resize_mutex);
159} 159}
160 160
161static DEFINE_RWLOCK(xfrm_state_afinfo_lock); 161static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
162static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; 162static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
163 163
164static DEFINE_SPINLOCK(xfrm_state_gc_lock); 164static DEFINE_SPINLOCK(xfrm_state_gc_lock);
165 165
@@ -168,58 +168,45 @@ int __xfrm_state_delete(struct xfrm_state *x);
168int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 168int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
169void km_state_expired(struct xfrm_state *x, int hard, u32 portid); 169void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
170 170
171static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family) 171static DEFINE_SPINLOCK(xfrm_type_lock);
172{
173 struct xfrm_state_afinfo *afinfo;
174 if (unlikely(family >= NPROTO))
175 return NULL;
176 write_lock_bh(&xfrm_state_afinfo_lock);
177 afinfo = xfrm_state_afinfo[family];
178 if (unlikely(!afinfo))
179 write_unlock_bh(&xfrm_state_afinfo_lock);
180 return afinfo;
181}
182
183static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
184 __releases(xfrm_state_afinfo_lock)
185{
186 write_unlock_bh(&xfrm_state_afinfo_lock);
187}
188
189int xfrm_register_type(const struct xfrm_type *type, unsigned short family) 172int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
190{ 173{
191 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); 174 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
192 const struct xfrm_type **typemap; 175 const struct xfrm_type **typemap;
193 int err = 0; 176 int err = 0;
194 177
195 if (unlikely(afinfo == NULL)) 178 if (unlikely(afinfo == NULL))
196 return -EAFNOSUPPORT; 179 return -EAFNOSUPPORT;
197 typemap = afinfo->type_map; 180 typemap = afinfo->type_map;
181 spin_lock_bh(&xfrm_type_lock);
198 182
199 if (likely(typemap[type->proto] == NULL)) 183 if (likely(typemap[type->proto] == NULL))
200 typemap[type->proto] = type; 184 typemap[type->proto] = type;
201 else 185 else
202 err = -EEXIST; 186 err = -EEXIST;
203 xfrm_state_unlock_afinfo(afinfo); 187 spin_unlock_bh(&xfrm_type_lock);
188 xfrm_state_put_afinfo(afinfo);
204 return err; 189 return err;
205} 190}
206EXPORT_SYMBOL(xfrm_register_type); 191EXPORT_SYMBOL(xfrm_register_type);
207 192
208int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) 193int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
209{ 194{
210 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); 195 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
211 const struct xfrm_type **typemap; 196 const struct xfrm_type **typemap;
212 int err = 0; 197 int err = 0;
213 198
214 if (unlikely(afinfo == NULL)) 199 if (unlikely(afinfo == NULL))
215 return -EAFNOSUPPORT; 200 return -EAFNOSUPPORT;
216 typemap = afinfo->type_map; 201 typemap = afinfo->type_map;
202 spin_lock_bh(&xfrm_type_lock);
217 203
218 if (unlikely(typemap[type->proto] != type)) 204 if (unlikely(typemap[type->proto] != type))
219 err = -ENOENT; 205 err = -ENOENT;
220 else 206 else
221 typemap[type->proto] = NULL; 207 typemap[type->proto] = NULL;
222 xfrm_state_unlock_afinfo(afinfo); 208 spin_unlock_bh(&xfrm_type_lock);
209 xfrm_state_put_afinfo(afinfo);
223 return err; 210 return err;
224} 211}
225EXPORT_SYMBOL(xfrm_unregister_type); 212EXPORT_SYMBOL(xfrm_unregister_type);
@@ -256,6 +243,7 @@ static void xfrm_put_type(const struct xfrm_type *type)
256 module_put(type->owner); 243 module_put(type->owner);
257} 244}
258 245
246static DEFINE_SPINLOCK(xfrm_mode_lock);
259int xfrm_register_mode(struct xfrm_mode *mode, int family) 247int xfrm_register_mode(struct xfrm_mode *mode, int family)
260{ 248{
261 struct xfrm_state_afinfo *afinfo; 249 struct xfrm_state_afinfo *afinfo;
@@ -265,12 +253,13 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
265 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 253 if (unlikely(mode->encap >= XFRM_MODE_MAX))
266 return -EINVAL; 254 return -EINVAL;
267 255
268 afinfo = xfrm_state_lock_afinfo(family); 256 afinfo = xfrm_state_get_afinfo(family);
269 if (unlikely(afinfo == NULL)) 257 if (unlikely(afinfo == NULL))
270 return -EAFNOSUPPORT; 258 return -EAFNOSUPPORT;
271 259
272 err = -EEXIST; 260 err = -EEXIST;
273 modemap = afinfo->mode_map; 261 modemap = afinfo->mode_map;
262 spin_lock_bh(&xfrm_mode_lock);
274 if (modemap[mode->encap]) 263 if (modemap[mode->encap])
275 goto out; 264 goto out;
276 265
@@ -283,7 +272,8 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
283 err = 0; 272 err = 0;
284 273
285out: 274out:
286 xfrm_state_unlock_afinfo(afinfo); 275 spin_unlock_bh(&xfrm_mode_lock);
276 xfrm_state_put_afinfo(afinfo);
287 return err; 277 return err;
288} 278}
289EXPORT_SYMBOL(xfrm_register_mode); 279EXPORT_SYMBOL(xfrm_register_mode);
@@ -297,19 +287,21 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
297 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 287 if (unlikely(mode->encap >= XFRM_MODE_MAX))
298 return -EINVAL; 288 return -EINVAL;
299 289
300 afinfo = xfrm_state_lock_afinfo(family); 290 afinfo = xfrm_state_get_afinfo(family);
301 if (unlikely(afinfo == NULL)) 291 if (unlikely(afinfo == NULL))
302 return -EAFNOSUPPORT; 292 return -EAFNOSUPPORT;
303 293
304 err = -ENOENT; 294 err = -ENOENT;
305 modemap = afinfo->mode_map; 295 modemap = afinfo->mode_map;
296 spin_lock_bh(&xfrm_mode_lock);
306 if (likely(modemap[mode->encap] == mode)) { 297 if (likely(modemap[mode->encap] == mode)) {
307 modemap[mode->encap] = NULL; 298 modemap[mode->encap] = NULL;
308 module_put(mode->afinfo->owner); 299 module_put(mode->afinfo->owner);
309 err = 0; 300 err = 0;
310 } 301 }
311 302
312 xfrm_state_unlock_afinfo(afinfo); 303 spin_unlock_bh(&xfrm_mode_lock);
304 xfrm_state_put_afinfo(afinfo);
313 return err; 305 return err;
314} 306}
315EXPORT_SYMBOL(xfrm_unregister_mode); 307EXPORT_SYMBOL(xfrm_unregister_mode);
@@ -699,7 +691,7 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
699 if (x->props.family != family || 691 if (x->props.family != family ||
700 x->id.spi != spi || 692 x->id.spi != spi ||
701 x->id.proto != proto || 693 x->id.proto != proto ||
702 xfrm_addr_cmp(&x->id.daddr, daddr, family)) 694 !xfrm_addr_equal(&x->id.daddr, daddr, family))
703 continue; 695 continue;
704 696
705 if ((mark & x->mark.m) != x->mark.v) 697 if ((mark & x->mark.m) != x->mark.v)
@@ -723,8 +715,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
723 hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) { 715 hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
724 if (x->props.family != family || 716 if (x->props.family != family ||
725 x->id.proto != proto || 717 x->id.proto != proto ||
726 xfrm_addr_cmp(&x->id.daddr, daddr, family) || 718 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
727 xfrm_addr_cmp(&x->props.saddr, saddr, family)) 719 !xfrm_addr_equal(&x->props.saddr, saddr, family))
728 continue; 720 continue;
729 721
730 if ((mark & x->mark.m) != x->mark.v) 722 if ((mark & x->mark.m) != x->mark.v)
@@ -989,8 +981,8 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
989 if (x->props.family == family && 981 if (x->props.family == family &&
990 x->props.reqid == reqid && 982 x->props.reqid == reqid &&
991 (mark & x->mark.m) == x->mark.v && 983 (mark & x->mark.m) == x->mark.v &&
992 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && 984 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
993 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) 985 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
994 x->genid++; 986 x->genid++;
995 } 987 }
996} 988}
@@ -1024,8 +1016,8 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
1024 x->id.spi != 0 || 1016 x->id.spi != 0 ||
1025 x->id.proto != proto || 1017 x->id.proto != proto ||
1026 (mark & x->mark.m) != x->mark.v || 1018 (mark & x->mark.m) != x->mark.v ||
1027 xfrm_addr_cmp(&x->id.daddr, daddr, family) || 1019 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1028 xfrm_addr_cmp(&x->props.saddr, saddr, family)) 1020 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1029 continue; 1021 continue;
1030 1022
1031 xfrm_state_hold(x); 1023 xfrm_state_hold(x);
@@ -1108,7 +1100,7 @@ int xfrm_state_add(struct xfrm_state *x)
1108 if (use_spi && x->km.seq) { 1100 if (use_spi && x->km.seq) {
1109 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq); 1101 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1110 if (x1 && ((x1->id.proto != x->id.proto) || 1102 if (x1 && ((x1->id.proto != x->id.proto) ||
1111 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) { 1103 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1112 to_put = x1; 1104 to_put = x1;
1113 x1 = NULL; 1105 x1 = NULL;
1114 } 1106 }
@@ -1234,10 +1226,10 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1234 continue; 1226 continue;
1235 if (m->reqid && x->props.reqid != m->reqid) 1227 if (m->reqid && x->props.reqid != m->reqid)
1236 continue; 1228 continue;
1237 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr, 1229 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1238 m->old_family) || 1230 m->old_family) ||
1239 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr, 1231 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1240 m->old_family)) 1232 m->old_family))
1241 continue; 1233 continue;
1242 xfrm_state_hold(x); 1234 xfrm_state_hold(x);
1243 return x; 1235 return x;
@@ -1249,10 +1241,10 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1249 if (x->props.mode != m->mode || 1241 if (x->props.mode != m->mode ||
1250 x->id.proto != m->proto) 1242 x->id.proto != m->proto)
1251 continue; 1243 continue;
1252 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr, 1244 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1253 m->old_family) || 1245 m->old_family) ||
1254 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr, 1246 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1255 m->old_family)) 1247 m->old_family))
1256 continue; 1248 continue;
1257 xfrm_state_hold(x); 1249 xfrm_state_hold(x);
1258 return x; 1250 return x;
@@ -1277,7 +1269,7 @@ struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1277 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr)); 1269 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1278 1270
1279 /* add state */ 1271 /* add state */
1280 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) { 1272 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1281 /* a care is needed when the destination address of the 1273 /* a care is needed when the destination address of the
1282 state is to be updated as it is a part of triplet */ 1274 state is to be updated as it is a part of triplet */
1283 xfrm_state_insert(xc); 1275 xfrm_state_insert(xc);
@@ -1370,9 +1362,6 @@ int xfrm_state_check_expire(struct xfrm_state *x)
1370 if (!x->curlft.use_time) 1362 if (!x->curlft.use_time)
1371 x->curlft.use_time = get_seconds(); 1363 x->curlft.use_time = get_seconds();
1372 1364
1373 if (x->km.state != XFRM_STATE_VALID)
1374 return -EINVAL;
1375
1376 if (x->curlft.bytes >= x->lft.hard_byte_limit || 1365 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1377 x->curlft.packets >= x->lft.hard_packet_limit) { 1366 x->curlft.packets >= x->lft.hard_packet_limit) {
1378 x->km.state = XFRM_STATE_EXPIRED; 1367 x->km.state = XFRM_STATE_EXPIRED;
@@ -1648,27 +1637,26 @@ static void xfrm_replay_timer_handler(unsigned long data)
1648} 1637}
1649 1638
1650static LIST_HEAD(xfrm_km_list); 1639static LIST_HEAD(xfrm_km_list);
1651static DEFINE_RWLOCK(xfrm_km_lock);
1652 1640
1653void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 1641void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
1654{ 1642{
1655 struct xfrm_mgr *km; 1643 struct xfrm_mgr *km;
1656 1644
1657 read_lock(&xfrm_km_lock); 1645 rcu_read_lock();
1658 list_for_each_entry(km, &xfrm_km_list, list) 1646 list_for_each_entry_rcu(km, &xfrm_km_list, list)
1659 if (km->notify_policy) 1647 if (km->notify_policy)
1660 km->notify_policy(xp, dir, c); 1648 km->notify_policy(xp, dir, c);
1661 read_unlock(&xfrm_km_lock); 1649 rcu_read_unlock();
1662} 1650}
1663 1651
1664void km_state_notify(struct xfrm_state *x, const struct km_event *c) 1652void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1665{ 1653{
1666 struct xfrm_mgr *km; 1654 struct xfrm_mgr *km;
1667 read_lock(&xfrm_km_lock); 1655 rcu_read_lock();
1668 list_for_each_entry(km, &xfrm_km_list, list) 1656 list_for_each_entry_rcu(km, &xfrm_km_list, list)
1669 if (km->notify) 1657 if (km->notify)
1670 km->notify(x, c); 1658 km->notify(x, c);
1671 read_unlock(&xfrm_km_lock); 1659 rcu_read_unlock();
1672} 1660}
1673 1661
1674EXPORT_SYMBOL(km_policy_notify); 1662EXPORT_SYMBOL(km_policy_notify);
@@ -1698,13 +1686,13 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1698 int err = -EINVAL, acqret; 1686 int err = -EINVAL, acqret;
1699 struct xfrm_mgr *km; 1687 struct xfrm_mgr *km;
1700 1688
1701 read_lock(&xfrm_km_lock); 1689 rcu_read_lock();
1702 list_for_each_entry(km, &xfrm_km_list, list) { 1690 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1703 acqret = km->acquire(x, t, pol); 1691 acqret = km->acquire(x, t, pol);
1704 if (!acqret) 1692 if (!acqret)
1705 err = acqret; 1693 err = acqret;
1706 } 1694 }
1707 read_unlock(&xfrm_km_lock); 1695 rcu_read_unlock();
1708 return err; 1696 return err;
1709} 1697}
1710EXPORT_SYMBOL(km_query); 1698EXPORT_SYMBOL(km_query);
@@ -1714,14 +1702,14 @@ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1714 int err = -EINVAL; 1702 int err = -EINVAL;
1715 struct xfrm_mgr *km; 1703 struct xfrm_mgr *km;
1716 1704
1717 read_lock(&xfrm_km_lock); 1705 rcu_read_lock();
1718 list_for_each_entry(km, &xfrm_km_list, list) { 1706 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1719 if (km->new_mapping) 1707 if (km->new_mapping)
1720 err = km->new_mapping(x, ipaddr, sport); 1708 err = km->new_mapping(x, ipaddr, sport);
1721 if (!err) 1709 if (!err)
1722 break; 1710 break;
1723 } 1711 }
1724 read_unlock(&xfrm_km_lock); 1712 rcu_read_unlock();
1725 return err; 1713 return err;
1726} 1714}
1727EXPORT_SYMBOL(km_new_mapping); 1715EXPORT_SYMBOL(km_new_mapping);
@@ -1750,15 +1738,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1750 int ret; 1738 int ret;
1751 struct xfrm_mgr *km; 1739 struct xfrm_mgr *km;
1752 1740
1753 read_lock(&xfrm_km_lock); 1741 rcu_read_lock();
1754 list_for_each_entry(km, &xfrm_km_list, list) { 1742 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1755 if (km->migrate) { 1743 if (km->migrate) {
1756 ret = km->migrate(sel, dir, type, m, num_migrate, k); 1744 ret = km->migrate(sel, dir, type, m, num_migrate, k);
1757 if (!ret) 1745 if (!ret)
1758 err = ret; 1746 err = ret;
1759 } 1747 }
1760 } 1748 }
1761 read_unlock(&xfrm_km_lock); 1749 rcu_read_unlock();
1762 return err; 1750 return err;
1763} 1751}
1764EXPORT_SYMBOL(km_migrate); 1752EXPORT_SYMBOL(km_migrate);
@@ -1770,15 +1758,15 @@ int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address
1770 int ret; 1758 int ret;
1771 struct xfrm_mgr *km; 1759 struct xfrm_mgr *km;
1772 1760
1773 read_lock(&xfrm_km_lock); 1761 rcu_read_lock();
1774 list_for_each_entry(km, &xfrm_km_list, list) { 1762 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1775 if (km->report) { 1763 if (km->report) {
1776 ret = km->report(net, proto, sel, addr); 1764 ret = km->report(net, proto, sel, addr);
1777 if (!ret) 1765 if (!ret)
1778 err = ret; 1766 err = ret;
1779 } 1767 }
1780 } 1768 }
1781 read_unlock(&xfrm_km_lock); 1769 rcu_read_unlock();
1782 return err; 1770 return err;
1783} 1771}
1784EXPORT_SYMBOL(km_report); 1772EXPORT_SYMBOL(km_report);
@@ -1802,14 +1790,14 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
1802 goto out; 1790 goto out;
1803 1791
1804 err = -EINVAL; 1792 err = -EINVAL;
1805 read_lock(&xfrm_km_lock); 1793 rcu_read_lock();
1806 list_for_each_entry(km, &xfrm_km_list, list) { 1794 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1807 pol = km->compile_policy(sk, optname, data, 1795 pol = km->compile_policy(sk, optname, data,
1808 optlen, &err); 1796 optlen, &err);
1809 if (err >= 0) 1797 if (err >= 0)
1810 break; 1798 break;
1811 } 1799 }
1812 read_unlock(&xfrm_km_lock); 1800 rcu_read_unlock();
1813 1801
1814 if (err >= 0) { 1802 if (err >= 0) {
1815 xfrm_sk_policy_insert(sk, err, pol); 1803 xfrm_sk_policy_insert(sk, err, pol);
@@ -1823,20 +1811,23 @@ out:
1823} 1811}
1824EXPORT_SYMBOL(xfrm_user_policy); 1812EXPORT_SYMBOL(xfrm_user_policy);
1825 1813
1814static DEFINE_SPINLOCK(xfrm_km_lock);
1815
1826int xfrm_register_km(struct xfrm_mgr *km) 1816int xfrm_register_km(struct xfrm_mgr *km)
1827{ 1817{
1828 write_lock_bh(&xfrm_km_lock); 1818 spin_lock_bh(&xfrm_km_lock);
1829 list_add_tail(&km->list, &xfrm_km_list); 1819 list_add_tail_rcu(&km->list, &xfrm_km_list);
1830 write_unlock_bh(&xfrm_km_lock); 1820 spin_unlock_bh(&xfrm_km_lock);
1831 return 0; 1821 return 0;
1832} 1822}
1833EXPORT_SYMBOL(xfrm_register_km); 1823EXPORT_SYMBOL(xfrm_register_km);
1834 1824
1835int xfrm_unregister_km(struct xfrm_mgr *km) 1825int xfrm_unregister_km(struct xfrm_mgr *km)
1836{ 1826{
1837 write_lock_bh(&xfrm_km_lock); 1827 spin_lock_bh(&xfrm_km_lock);
1838 list_del(&km->list); 1828 list_del_rcu(&km->list);
1839 write_unlock_bh(&xfrm_km_lock); 1829 spin_unlock_bh(&xfrm_km_lock);
1830 synchronize_rcu();
1840 return 0; 1831 return 0;
1841} 1832}
1842EXPORT_SYMBOL(xfrm_unregister_km); 1833EXPORT_SYMBOL(xfrm_unregister_km);
@@ -1848,12 +1839,12 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1848 return -EINVAL; 1839 return -EINVAL;
1849 if (unlikely(afinfo->family >= NPROTO)) 1840 if (unlikely(afinfo->family >= NPROTO))
1850 return -EAFNOSUPPORT; 1841 return -EAFNOSUPPORT;
1851 write_lock_bh(&xfrm_state_afinfo_lock); 1842 spin_lock_bh(&xfrm_state_afinfo_lock);
1852 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) 1843 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1853 err = -ENOBUFS; 1844 err = -ENOBUFS;
1854 else 1845 else
1855 xfrm_state_afinfo[afinfo->family] = afinfo; 1846 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
1856 write_unlock_bh(&xfrm_state_afinfo_lock); 1847 spin_unlock_bh(&xfrm_state_afinfo_lock);
1857 return err; 1848 return err;
1858} 1849}
1859EXPORT_SYMBOL(xfrm_state_register_afinfo); 1850EXPORT_SYMBOL(xfrm_state_register_afinfo);
@@ -1865,14 +1856,15 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1865 return -EINVAL; 1856 return -EINVAL;
1866 if (unlikely(afinfo->family >= NPROTO)) 1857 if (unlikely(afinfo->family >= NPROTO))
1867 return -EAFNOSUPPORT; 1858 return -EAFNOSUPPORT;
1868 write_lock_bh(&xfrm_state_afinfo_lock); 1859 spin_lock_bh(&xfrm_state_afinfo_lock);
1869 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { 1860 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1870 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) 1861 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1871 err = -EINVAL; 1862 err = -EINVAL;
1872 else 1863 else
1873 xfrm_state_afinfo[afinfo->family] = NULL; 1864 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
1874 } 1865 }
1875 write_unlock_bh(&xfrm_state_afinfo_lock); 1866 spin_unlock_bh(&xfrm_state_afinfo_lock);
1867 synchronize_rcu();
1876 return err; 1868 return err;
1877} 1869}
1878EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1870EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
@@ -1882,17 +1874,16 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1882 struct xfrm_state_afinfo *afinfo; 1874 struct xfrm_state_afinfo *afinfo;
1883 if (unlikely(family >= NPROTO)) 1875 if (unlikely(family >= NPROTO))
1884 return NULL; 1876 return NULL;
1885 read_lock(&xfrm_state_afinfo_lock); 1877 rcu_read_lock();
1886 afinfo = xfrm_state_afinfo[family]; 1878 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
1887 if (unlikely(!afinfo)) 1879 if (unlikely(!afinfo))
1888 read_unlock(&xfrm_state_afinfo_lock); 1880 rcu_read_unlock();
1889 return afinfo; 1881 return afinfo;
1890} 1882}
1891 1883
1892static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1884static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1893 __releases(xfrm_state_afinfo_lock)
1894{ 1885{
1895 read_unlock(&xfrm_state_afinfo_lock); 1886 rcu_read_unlock();
1896} 1887}
1897 1888
1898/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ 1889/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index eb872b2e366e..fbd9e6cd0fd7 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1112,7 +1112,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1112 mark = xfrm_mark_get(attrs, &m); 1112 mark = xfrm_mark_get(attrs, &m);
1113 if (p->info.seq) { 1113 if (p->info.seq) {
1114 x = xfrm_find_acq_byseq(net, mark, p->info.seq); 1114 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1115 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { 1115 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1116 xfrm_state_put(x); 1116 xfrm_state_put(x);
1117 x = NULL; 1117 x = NULL;
1118 } 1118 }